text
stringlengths 29
850k
|
---|
# Copyright 2011 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import mock
from oslo_serialization import jsonutils
from requests import exceptions as request_exceptions
from cinder.compute import nova
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common.scheduler import filters
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.tests import utils
class HostFiltersTestCase(test.TestCase):
"""Test case for host filters."""
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
# This has a side effect of testing 'get_filter_classes'
# when specifying a method (in this case, our standard filters)
filter_handler = filters.HostFilterHandler('cinder.scheduler.filters')
classes = filter_handler.get_all_classes()
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
class CapacityFilterTestCase(HostFiltersTestCase):
def setUp(self):
super(CapacityFilterTestCase, self).setUp()
self.json_query = jsonutils.dumps(
['and',
['>=', '$free_capacity_gb', 1024],
['>=', '$total_capacity_gb', 10 * 1024]])
@mock.patch('cinder.utils.service_is_up')
def test_filter_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 200,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_current_host_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100, 'vol_exists_on': 'host1'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 100,
'free_capacity_gb': 10,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 200,
'free_capacity_gb': 120,
'reserved_percentage': 20,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_passes_infinite(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'infinite',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_passes_unknown(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'unknown',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_thin_true_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 200,
'provisioned_capacity_gb': 500,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_thin_false_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> False',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
# If "thin_provisioning_support" is False,
# "max_over_subscription_ratio" will be ignored.
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 200,
'provisioned_capacity_gb': 300,
'max_over_subscription_ratio': 1.0,
'reserved_percentage': 5,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_over_subscription_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 200,
'provisioned_capacity_gb': 500,
'max_over_subscription_ratio': 1.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_over_subscription_fails2(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 30,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 30,
'provisioned_capacity_gb': 500,
'max_over_subscription_ratio': 1.0,
'reserved_percentage': 0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_true_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 100,
'provisioned_capacity_gb': 500,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_false_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> False',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
# If "thin_provisioning_support" is False,
# "max_over_subscription_ratio" will be ignored.
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 100,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 1.0,
'reserved_percentage': 5,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_thick_true_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 100,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_thick_true_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 125,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_thick_true_fails2(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 99,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_thick_true_passes2(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 100,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 0,
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
class AffinityFilterTestCase(HostFiltersTestCase):
@mock.patch('cinder.utils.service_is_up')
def test_different_filter_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['DifferentBackendFilter']()
service = {'disabled': False}
host = fakes.FakeHostState('host1:pool0',
{'free_capacity_gb': '1000',
'updated_at': None,
'service': service})
volume = utils.create_volume(self.context, host='host1:pool1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_different_filter_legacy_volume_hint_passes(
self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['DifferentBackendFilter']()
service = {'disabled': False}
host = fakes.FakeHostState('host1:pool0',
{'free_capacity_gb': '1000',
'updated_at': None,
'service': service})
volume = utils.create_volume(self.context, host='host1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_different_filter_non_list_fails(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host2', {})
volume = utils.create_volume(self.context, host='host2')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': vol_id}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_different_filter_fails(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_different_filter_handles_deleted_instance(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
vol_id = volume.id
db.volume_destroy(utils.get_test_admin_context(), vol_id)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_different_filter_fail_nonuuid_hint(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': "NOT-a-valid-UUID", }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_different_filter_handles_multiple_uuids(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume1 = utils.create_volume(self.context, host='host1:pool1')
vol_id1 = volume1.id
volume2 = utils.create_volume(self.context, host='host1:pool3')
vol_id2 = volume2.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id1, vol_id2], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_different_filter_handles_invalid_uuids(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host2')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id, "NOT-a-valid-UUID"], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': vol_id}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_same_filter_passes(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1#pool0')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [vol_id], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_same_filter_legacy_vol_fails(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [vol_id], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_same_filter_fails(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1#pool1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [vol_id], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_same_filter_vol_list_pass(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume1 = utils.create_volume(self.context, host='host1')
vol_id1 = volume1.id
volume2 = utils.create_volume(self.context, host='host2')
vol_id2 = volume2.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [vol_id1, vol_id2], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_same_filter_handles_none(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_same_filter_handles_deleted_instance(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host2')
vol_id = volume.id
db.volume_destroy(utils.get_test_admin_context(), vol_id)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [vol_id], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_same_filter_fail_nonuuid_hint(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': "NOT-a-valid-UUID", }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
class DriverFilterTestCase(HostFiltersTestCase):
def test_passing_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': '1 == 1',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_failing_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': '1 == 2',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_no_filter_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': None,
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_not_implemented(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_no_volume_extra_specs(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': '1 == 1',
}
})
filter_properties = {'volume_type': {}}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_volume_backend_name_different(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': '1 == 1',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake2',
}
}
}
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_function_extra_spec_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': 'extra.var == 1',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
'var': 1,
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_function_stats_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'total_capacity_gb': 100,
'capabilities': {
'filter_function': 'stats.total_capacity_gb < 200',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_function_volume_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': 'volume.size < 5',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
},
'request_spec': {
'volume_properties': {
'size': 1
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_function_qos_spec_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': 'qos.var == 1',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
},
'qos_specs': {
'var': 1
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_function_exception_caught(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': '1 / 0 == 0',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_function_empty_qos(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': 'qos.maxiops == 1',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
},
'qos_specs': None
}
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_capabilities(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'foo': 10,
'filter_function': 'capabilities.foo == 10',
},
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
class InstanceLocalityFilterTestCase(HostFiltersTestCase):
def setUp(self):
super(InstanceLocalityFilterTestCase, self).setUp()
self.override_config('nova_endpoint_template',
'http://novahost:8774/v2/%(project_id)s')
self.context.service_catalog = \
[{'type': 'compute', 'name': 'nova', 'endpoints':
[{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]},
{'type': 'identity', 'name': 'keystone', 'endpoints':
[{'publicURL': 'http://keystonehost:5000/v2.0'}]}]
@mock.patch('cinder.compute.nova.novaclient')
def test_same_host(self, _mock_novaclient):
_mock_novaclient.return_value = fakes.FakeNovaClient()
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
uuid = nova.novaclient().servers.create('host1')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.compute.nova.novaclient')
def test_different_host(self, _mock_novaclient):
_mock_novaclient.return_value = fakes.FakeNovaClient()
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
uuid = nova.novaclient().servers.create('host2')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_handles_none(self):
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context,
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_invalid_uuid(self):
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context,
'scheduler_hints':
{'local_to_instance': 'e29b11d4-not-valid-a716'}}
self.assertRaises(exception.InvalidUUID,
filt_cls.host_passes, host, filter_properties)
@mock.patch('cinder.compute.nova.novaclient')
def test_nova_no_extended_server_attributes(self, _mock_novaclient):
_mock_novaclient.return_value = fakes.FakeNovaClient(
ext_srv_attr=False)
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
uuid = nova.novaclient().servers.create('host1')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
self.assertRaises(exception.CinderException,
filt_cls.host_passes, host, filter_properties)
@mock.patch('cinder.compute.nova.novaclient')
def test_nova_down_does_not_alter_other_filters(self, _mock_novaclient):
# Simulate Nova API is not available
_mock_novaclient.side_effect = Exception
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context, 'size': 100}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('requests.request')
def test_nova_timeout(self, _mock_request):
# Simulate a HTTP timeout
_mock_request.side_effect = request_exceptions.Timeout
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = \
{'context': self.context, 'scheduler_hints':
{'local_to_instance': 'e29b11d4-15ef-34a9-a716-598a6f0b5467'}}
self.assertRaises(exception.APITimeout,
filt_cls.host_passes, host, filter_properties)
|
So some food for thought, its not just about getting back on the horse so much as it is about making the effort to not fall off again. We are human and somedays life is not perfect and we slip…for myself I was having a terrible headache (die off symptom no. 1) and I had some bread, now this is not the worst thing I could have done however afterwards I felt flushed, and uncomfortable both emotionally and physically. I felt badly as I let my body down but I know better than to dwell on the past so I picked myself back up and trudged along on my candida diet.
This week was considerably better, after eliminating for the last two weeks I finally begun to ditch my headache and the nausea that went along with it, and being on the diet became a tad more natural. In my day to day life I am refined sugar free and mostly dairy free but cutting out ALL of it, was something my body noticed almost immediately. My gut punished me for depriving it of the things it craves the most, sugar, dairy, and corn.
The past two weeks I have been taking a probiotic and a magnesium supplement as well as a active B vitamin complex to help support my systems while they are adapting and healing. This week I got the results back from my food sensitivity testing and I am also cutting out eggs…this was moderate to high and this will last for around 90 days. And I will also continue to live dairy free for at least the next two months. However despite their dominance in my current diet, like gluten-free it will soon become second nature to not eat, or want these things and eliminating these will help my bloating, gas and nausea that I frequently feel.
My energy was so-so last week, but I am beginning to notice an improvement…my body is in a state of healing and is still under siege so I am not entirely surprised that I am still tired as my resources are going to war in my gut where a lot of my energy and happy hormones are produced.
I have set goals with my nutritionist to stay motivated and remember why I am doing this: to be the best possible version of myself, to not just feel good but to feel amazing, to be able to get off my anti-depressants and feel human again. These will be at the forefront of our sessions, motivation is key.
We also discussed things i would like to celebrate and I chose the accomplishment of staying away from sugars and dairy, as well as alcohol…these are not easy when your favourite restaurant serves both. This week I will begin the kill phase, and will be taking a series of natural anti-fungals (garlic, cinnamon etc.) and we shall see how my body tolerates these. I began taking them today, this will help to kill off the candida from my gut and will help to promote healthy gut flora. I am taking two probiotics now, which will hopefully begin to help outnumber the candida.
I will be sure to update you on my life sans eggs…this one will be more difficult than dairy as all my pancakes etc. require them. It is time for myself to play around with substitutes. Remember if you have any questions regarding my candida cleanse or who to consult here in Calgary, feel free to give me a shout. |
#!/usr/bin/env python
# coding=utf-8
import flask
from flask import request, redirect, url_for, flash, abort
from flask_login import login_required
from rpress.runtimes.template import render_template
from rpress.runtimes.auth import user_login, user_logout
from rpress.forms import LoginForm
auth = flask.Blueprint('auth', __name__)
@auth.route('/login', methods=['GET', 'POST'])
def login():
# Here we use a class of some kind to represent and validate our
# client-side form data. For example, WTForms is a library that will
# handle this for us, and we use a custom LoginForm to validate.
form = LoginForm()
if form.validate_on_submit():
# Login and validate the user.
# user should be an instance of your `User` class
if not user_login(form.username.data, form.password.data):
flash('login fail.')
abort(401)
# return redirect(url_for('.index'))
flash('Logged in successfully.')
next_location = request.args.get('next')
# next_is_valid should check if the user has valid
# permission to access the `next` url
# if not next_is_valid(next):
# return flaskabort(400)
return redirect(next_location or url_for('rpadmin_dashboard.dashboard'))
return render_template('/common/login.html', form=form)
@auth.route("/logout")
@login_required
def logout():
user_logout()
return redirect(url_for('post_page.paginate_with_all'))
|
Which baby boy's name starting with "A" do आप prefer?
Which pic of my niece, Jasmynn is the cutest?
When girls mimic their baby's noises do आप think it's inconsiderate to the baby ?
Are शिशु with hair cuter than bald babies?
Why do people call pretty शिशु cute?
I thought this might be a good place to ask the mother out there about their experiences, as I have an issue and think I may be pregnant?
On March 31 when it was dark and me and my family left from the सुपरमैन vs. बैटमैन showing, when we were in the lobby I saw a guy who got tickets so his baby who's bald-ish could see Zootopia.
Today at Whole Foods there was a baby with blond hair in a baby सीट dancing until I कहा "Hi" to him and the Mom कहा to him "Say hi" and he didn't say anything and then the Mom asked him to give me a high five and I put my hand up and he didn't do anything and the Mom put his fist on my hand.
शिशु to me are the most adorable things in the world. When they come into this world they are are a gift from God, a precious miracle. Nothing's better than taking care of a baby. It's a lot of work, but it's good experience. शिशु are so delicate, they need all the प्यार and support they can get. I प्यार when I hear those stories about शिशु being born, the stories of happy baby stories. शिशु should grow up safe, and they should feel safe. They should not feel unloved, unprotected. They need to know someone is there. शिशु have feelings too. शिशु are a big part of this world.
Just to let आप know, this is NOT mine. I found it on another website.
आप and (DH) have been married for well over a साल when आप decide the house is too big for just the two of you. (ROLL THE DICE) months later, आप get pregnant with boy/girl twins. The boy's middle name honors DH's uncle and the girl's middle name honors your grandmother.
What are the baby's first names?
an answer was added to this question: Why do people call pretty babies cute?
Where do शिशु come from?
Babies: प्यार Em या Hate Em?
How To Conceive A Baby Boy?
Is anyone here a parent?
This picture was take 16 years ago. |
#
# Proximate - Peer-to-peer social networking
#
# Copyright (c) 2008-2011 Nokia Corporation
#
# All rights reserved.
#
# This software is licensed under The Clear BSD license.
# See the LICENSE file for more details.
#
import os
from bencode import fmt_bdecode, bencode
from ioutils import get_flen, TCP_Queue, TCPQ_ERROR
from plugins import Plugin, get_plugin_by_type
from support import warning
from proximateprotocol import TP_SEND_FILE, valid_receive_name, \
PLUGIN_TYPE_COMMUNITY, PLUGIN_TYPE_SEND_FILE, \
TP_CONNECT_TIMEOUT, PLUGIN_TYPE_NOTIFICATION, \
PLUGIN_TYPE_FILE_TRANSFER, TP_MAX_TRANSFER
from utils import format_bytes
SEND_FILE_ACCEPT = 'mkay'
SEND_FILE_DENY = 'nothx'
community = None
notify = None
sendfile = None
ACCEPT_TIMEOUT = 300
class Send_File_Server:
""" Process incoming sendfile connection """
sendspec = {'uid': str,
'flen': lambda flen: (type(flen) == int or type(flen) == long) and flen >= 0,
'name': valid_receive_name,
}
def __init__(self, address, sock, data):
self.q = TCP_Queue(self.msghandler, closehandler=self.queue_closed)
# Close queue that is idle for a period of time
self.q.set_timeout(ACCEPT_TIMEOUT)
self.address = address
self.initstate = True
self.f = None
self.ui = None
self.pos = 0
self.user = None
self.name = None
self.flen = None
self.cb = None
self.ctx = None
self.q.append_input(data)
self.q.initialize(sock)
def queue_closed(self, q, parameter, msg):
if self.f != None:
self.f.close()
self.f = None
if self.ui != None:
self.ui.cleanup('End')
self.ui = None
if self.cb != None:
self.cb(self.pos == self.flen, self.ctx)
self.cb = None
if self.name != None and self.pos < self.flen:
notify('Unable to receive a file from %s: %s' % (self.user.get('nick'), self.name), True)
self.name = None
def msghandler(self, q, data, parameter):
if not self.initstate:
warning('send file server: protocol violation!\n')
return False
self.initstate = False
d = fmt_bdecode(self.sendspec, data)
if d == None:
warning('send file server: invalid msg: %s\n' % data)
return False
self.user = community.safe_get_user(d['uid'], self.address[0])
if self.user == None:
warning('send file server: invalid uid: %s\n' % d['uid'])
return False
self.name = d['name']
self.flen = d['flen']
notify('Got a file send request from %s: %s (%s)' % (self.user.get('nick'), self.name, format_bytes(self.flen)))
for cb in sendfile.receive_cb:
cb(self.accept_send, self.user, self.name)
return True
def abort_cb(self, ctx):
self.q.close(msg='Aborted')
def accept_send(self, accept, destname, cb, ctx=None):
""" callback(success, bytes, ctx) """
if self.name == None:
# Aborted
return
if not accept:
self.q.write(SEND_FILE_DENY)
self.q.close_after_send('File denied')
return
filetransfer = get_plugin_by_type(PLUGIN_TYPE_FILE_TRANSFER)
if filetransfer != None:
title = 'Receiving from %s: %s' % (self.user.get('nick'), self.name)
self.ui = filetransfer.add_transfer(title, self.flen, self.abort_cb)
self.q.set_timeout(TP_CONNECT_TIMEOUT)
self.cb = cb
self.ctx = ctx
self.q.write(SEND_FILE_ACCEPT)
try:
self.f = open(destname, 'w')
except IOError, (errno, strerror):
self.q.close(TCPQ_ERROR, msg=strerror)
return
self.q.set_recv_handler(self.receive)
def receive(self, data):
amount = min(len(data), self.flen - self.pos)
try:
self.f.write(data[0:amount])
except IOError, (errno, strerror):
self.q.close(TCPQ_ERROR, msg=strerror)
return None
self.pos += amount
if self.ui != None:
self.ui.update(amount)
if self.flen == self.pos:
notify('Received a file from %s succefully: %s' % (self.user.get('nick'), self.name))
self.q.close(msg='Complete')
return None
return amount
class Send_File:
def __init__(self, user, fname):
self.q = TCP_Queue(self.msghandler, closehandler=self.queue_closed)
self.user = user
self.f = None
self.fname = fname
self.name = os.path.basename(fname)
self.ui = None
self.initstate = True
self.pos = 0
self.flen = None
def queue_closed(self, q, parameter, msg):
if self.f != None:
self.f.close()
self.f = None
if self.ui != None:
self.ui.cleanup('End')
self.ui = None
if self.flen != None and self.pos < self.flen:
notify('Unable to send a file to %s: %s' % (self.user.get('nick'), self.name), True)
self.flen = None
def begin(self):
try:
self.f = open(self.fname, 'r')
except IOError, (errno, strerror):
self.q.close(TCPQ_ERROR, msg=strerror)
return False
try:
self.f.seek(0, os.SEEK_END)
except IOError, (errno, strerror):
self.q.close(TCPQ_ERROR, msg=strerror)
return False
self.flen = self.f.tell()
self.f.seek(0)
notify('Sending a file to %s: %s (%s)' % (self.user.get('nick'), self.name, format_bytes(self.flen)))
filetransfer = get_plugin_by_type(PLUGIN_TYPE_FILE_TRANSFER)
if filetransfer != None:
title = 'Sending to %s: %s' % (self.user.get('nick'), self.name)
self.ui = filetransfer.add_transfer(title, self.flen, self.abort_cb)
return self.connect()
def abort_cb(self, ctx):
self.q.close(msg='Aborted')
def connect(self):
ip = self.user.get('ip')
port = self.user.get('port')
if ip == None or port == None or not self.q.connect((ip, port), TP_CONNECT_TIMEOUT):
return False
prefix = TP_SEND_FILE + '\n'
self.q.write(prefix, writelength = False)
myuid = community.get_myuid()
req = {'uid': myuid, 'flen': self.flen, 'name': self.name}
self.q.write(bencode(req))
# Close queue that is idle for a period of time
self.q.set_timeout(ACCEPT_TIMEOUT)
return True
def msghandler(self, q, data, parameter):
if not self.initstate:
warning('send file: protocol violation!\n')
return False
self.initstate = False
if data == SEND_FILE_ACCEPT:
self.q.set_timeout(TP_CONNECT_TIMEOUT)
self.q.set_send_handler(self.send)
return True
elif data == SEND_FILE_DENY:
return False
warning('send file: invalid message %s\n' % data)
return False
def send(self):
amount = min(TP_MAX_TRANSFER * 4, self.flen - self.pos)
try:
chunk = self.f.read(amount)
except IOError, (errno, strerror):
self.q.close(TCPQ_ERROR, msg=strerror)
return None
self.pos += amount
if self.ui != None:
self.ui.update(amount)
if self.pos == self.flen:
notify('Sent a file to %s succefully: %s' % (self.user.get('nick'), self.name))
self.q.set_send_handler(None)
self.q.close_after_send('Complete')
return chunk
class Send_File_Plugin(Plugin):
def __init__(self):
global sendfile
self.register_plugin(PLUGIN_TYPE_SEND_FILE)
self.register_server(TP_SEND_FILE, Send_File_Server)
sendfile = self
self.receive_cb = []
def ready(self):
global community, notify
community = get_plugin_by_type(PLUGIN_TYPE_COMMUNITY)
notify = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION).notify
def send(self, user, fname):
s = Send_File(user, fname)
return s.begin()
def init(options):
Send_File_Plugin()
|
“I create lifestyles for a discerning clientele that value individuality and quality; quality of design, an appreciation of the art of sourcing, and a reverence for the best materials,” said Catherine.
Catherine Bailly Dunne, ASID is an award-winning interior designer that has captivated audiences around the world with her bold and synergistic approach to interior design.
A winner of the prestigious American Society of Interior Designers (ASID) Design Competition, Catherine has appeared on countless national television shows, and as the featured guest on news, radio and television shows all around the country. Catherine was honored as the National Spokesperson for the American Furniture Manufacture Association.
Catherine has designed extraordinary homes for many of the most notable clientele in cities across America, while equally comfortable designing historic hotels.
Catherine uses a global palette to draw from; traveling from China, South America, Thailand, Canada, to every exceptional nook of Europe, and all the unique destinations in the United States – to uncover ideas and treasures to express her clientele’s personalities and lifestyles.
As an author, Catherine’s “Interior Designing for All Five Senses” has become a definitive design book referenced by design schools around the world. “I love that!” Oprah Winfrey said on the Oprah Show, enthusiastically discussing Catherine’s book. “Catherine’s book will bring you closer to the peaceful haven you crave,” Oprah said.
Catherine Bailly Dunne was educated at Parson’s School of Design in New York City, and continued her extensive studies in Los Angeles and Paris. Catherine teaches a master class in residential remodeling at UCLA. |
import io, struct, os, sys, math
from binascii import crc32, unhexlify, hexlify
from bitcoin.main import *
from bitcoin.bci import *
from bitcoin.transaction import *
#from bitcoin.pyspecials import hexify, unhexify, by
def _mk_multisig_scriptpubkey(fo):
# make a single output's redeemScript
data = fo.read(65*3)
if not data:
return None
script_pubkeys = []
while data:
chunk = data[:65]
data = data[65:]
# pad right side with null bytes
if len(chunk) < 33:
chunk += by(bytearray(33-len(chunk)))
elif len(chunk) < 65:
chunk += by(bytearray(65-len(chunk)))
script_pubkeys.append(chunk)
pubz = list(map(hexify, script_pubkeys))
return mk_multisig_script(pubz, 1)
def _mk_txouts(fo, value=None):
value = 547 if not value else int(value)
hexval = hexify(struct.pack('<Q', value)) # make 8 byte LE value
txouts = []
while True:
scriptPubKey = _mk_multisig_scriptpubkey(fo)
if scriptPubKey is None: break
txouts.append( {'script': scriptPubKey, 'value': value} )
return txouts
#return ''.join([(hexval + str(wrap_script(x['script']))) for x in txouts])
#Encode file into the blockchain (with prepended file length, crc32) using multisig addresses
def _mk_binary_txouts(filename, value=None):
try: fileobj = open(filename, 'rb').read()
except: raise Exception("can't find file!")
data = struct.pack('<I', len(fileobj)) + \
struct.pack('<I', crc32(fileobj) & 0xffffffff) + fileobj
fd = io.BytesIO(data)
TXOUTS = _mk_txouts(fd, value)
return list(TXOUTS)
#return wrap_varint(TXOUTS)
def encode_file(filename, privkey, *args):
""""""
#filename, privkey, value, change_address, network, signtx
if len(args) == 0:
value, input_address, network, signtx = None, None, None, False
elif len(args) == 3:
value, input_address, network = args
signtx = False
elif len(args) == 4:
value, input_address, network, signtx = args
else:
raise SyntaxError("params = filename, privkey, value, change_address, network, signtx")
if not network:
network = 'testnet'
if input_address is None:
input_address = privtoaddr(privkey, 111) if network == 'testnet' else privtoaddr(privkey)
u = unspent(input_address, 'testnet', source='blockr') if network == 'testnet' else unspent(input_address)
value = 547 if value is None else int(value)
TXFEE = int(math.ceil(1.1 * (10000*os.path.getsize(filename)/1000)))
OUTS = _mk_binary_txouts(filename, value)
TOTALFEE = TXFEE + int(value)*len(OUTS)
INS = select(u, TOTALFEE)
rawtx = mksend(INS, OUTS, input_address, TXFEE)
if signtx:
signedtx = sign(rawtx, 0, privkey, 1)
return signedtx
return rawtx
def decode_file(txid, network='btc'):
"""Returns decoded blockchain binary file as bytes, ready to write to a file"""
# TODO: multiple TxIDs? verify encode_file output?
assert network in ('btc', 'testnet')
txh = fetchtx(txid, network, source='blockr')
txo = deserialize(txh)
outs1 = map(deserialize_script, multiaccess(txo['outs'], 'script'))
# get hex key data from multisig scripts
outs2 = filter(lambda l: l[-1] == 174, outs1) # TODO: check for _non-p2sh_ outputs
outs3 = map(lambda l: l[1:-2], outs2)
data = unhexify(''.join([item for sublist in outs3 for item in sublist])) # base 256 of encoded data
# TODO: are length & crc32 prepended?
length = struct.unpack('<I', data[0:4])[0] # TODO: check length == len(data)
checksum = struct.unpack('<I', data[4:8])[0]
data = data[8:8+length]
assert checksum == crc32(data) & 0xffffffff
return data # TODO: write return to file object?
# def decode_files(txids, network='btc'):
# if isinstance(txids, string_types):
# return decode_file(txids, network)
# elif isinstance(txids, list) and len(txids) == 1:
# return decode_file(txids[0], network)
# return ''.join([decode_file(x) for x in txids])
|
NOTE: If no paths are given, the default system database is used.
The utility continues to dump several dozen other shims. It’s still in beta for now, so the final output might not match, but it allows us to build a list of several interesting system shims, which I’ll enumerate below. Caveat: my criteria was a mix between usefulness, interesting security implications, and completely out-of-this-world, bizare or uber-hack shims. The ones in bold are some of my favorite, but you should defintely read through them all. Once the tool is completed, you’ll be able to dump your own.
DESCRIPTION=”Changes COM Security Level from RPC_C_AUTHN_LEVEL_NONE to RPC_C_AUTHN_LEVEL_COMMON. This enables temporary elevation of the security context for an application.
As you can see, the Shim Engine allows from the simplest of hacks (such as adding PEB flags) to complete ports of 9x APIs (such as the Heap Manager). Many other shims are simply extremly useful features that should be accessible easier. The ability to deal with random setup application names is something I’ve had to code on my own in the past, and the VirtualRegistry shim in XP seems to be almost as powerful as the built-in Vista feature. Yet others, dealing with delay loading DLLs, instantly killing, and redirection can be lifesavers during certain debugging scenarios.
For now, these shims have only been presented. Later series will deal with actually using this shims, but for now, we’ll have to continue exploring the system further inside the next article.
There are some really interestingly-named shims enumerated by this tool, that aren’t documented as far as I can tell. Like “CreateDummyProcess”, “ShellExecuteNoZone”, “ShellExecuteXP”. |
#
# Open addresses ETL Common Library
#
# Bulk Insert Class
#
# Version 1.0 (Python) in progress
# Author John Murray
# Licence MIT
#
# Purpose Bulk insert items into a MySQl or MariaDB table
#
# Arguments: database cursor, table name, list of fields, max = maximum buffer (2000), ignore = ingore duplicate keys (false)
#
import MySQLdb
import string
class BulkInsert:
def __init__(self, cur,table,fields,max=2000,ignore=False): # Instantiation - pass database
self.max_rows = max
self.cursor = cur
self.fields = fields
self.table = table
if ignore:
self.type = "IGNORE "
else:
self.type = ""
self.nrecs = 0
self.bufrecs = 0
self.values = []
self.prefix = "INSERT "+self.type+"INTO `"+self.table+"` ("
self.prefix += string.join(["`" + field + "`" for field in fields],",")
self.prefix += ") VALUES "
def close(self):
if self.bufrecs > 0:
self.writeData()
def addRow(self,row):
self.values.append(row)
self.nrecs += 1
self.bufrecs += 1
if (self.nrecs % self.max_rows) == 0:
self.writeData()
def writeData(self):
query = self.prefix
for i in range(0,len(self.values)):
if i > 0:
query += ", "
query += "("
for j in range(0,len(self.fields)):
if j > 0:
query += ", "
if isinstance(self.values[i][j], (int, long, float, complex)): # Is numeric
query += "'" + str(self.values[i][j]) + "'"
elif self.values[i][j] == "NULL":
query += "NULL"
elif self.values[i][j][0:12] == "GeomFromText":
query += self.values[i][j]
else:
query += "'" + self.values[i][j].replace("'","\\'") + "'"
query += ")"
query += ";"
self.cursor.execute(query)
self.values = []
self.bufrecs = 0
|
The Los Angeles purse from MODE. is fashioned from luxurious Saffiano leather with matching trim. Securing with a zip around fastener, the LA purse has 8 card slots, a central compartment for your Phone and 2 slip pockets for notes and receipts. There is also a useful zipped pocket on the back. It is finished with gold hardware and a matching detachable wristlet. |
# Copyright (c) 2015 Mitch Garnaat. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class Resource(object):
def __init__(self, response):
self.href = '/'
self.items = []
for key in response:
if key == '_links':
self._links = response['_links']
if 'self' in self._links:
self.href = self._links['self']['href']
self.url = self._links.get('self', '/')
elif key == '_embedded':
if isinstance(response['_embedded']['item'], dict):
self.items.append(Resource(response['_embedded']['item']))
else:
for item in response['_embedded']['item']:
self.items.append(Resource(item))
else:
setattr(self, key, response[key])
def __repr__(self):
return 'Resource: %s' % self.href
|
My goal in visiting Liz Mierendorf, at Isthmus Wellness, was to alleviate an acute flare up of Ankylosing Spondylarthropothy or inflammation in my spine. I have suffered with these types of flare-ups for about 7 years, usually several times a year. The pain is debilitating and often leaves me crawling from place to place, missing work, and taking far too many pain pills and muscle relaxers because that is the only option that traditional medicine has to offer. I was referred to Liz by a co-worker and from the moment I met her, I felt at ease. After the very first treatment I felt some relief but more than anything, I felt hopeful, because of Liz’s background, knowledge, and friendliness. I was seen several times over the next couple of weeks after my initial visit, and experienced the quickest recovery from any flare up, ever. I still see Liz for maintenance and haven’t had a flare up in a while, I will continue with monthly maintenance visits to maintain my initial goal, and can only look forward to the remainder of the year without any flare-ups. I’m thrilled to know Liz and her acupuncture and herbalist work and will recommend her at every opportunity (and already have).” –Roni F.
Liz is My Miracle Worker! |
# coding: utf-8
import json
import logging
import webapp2
from webapp2_extras import sessions
from google.appengine.api.taskqueue import TombstonedTaskError, TaskAlreadyExistsError, DuplicateTaskNameError
from domain.entity import User
import error
class BaseHandler(webapp2.RequestHandler):
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
user = self.session.get('user')
if user:
self.user = User.from_json(user)
else:
self.user = None
try:
return webapp2.RequestHandler.dispatch(self)
except webapp2.HTTPException as e:
self.response.set_status(e.code)
if e.message:
self.response.write(e.message)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
return self.session_store.get_session()
@property
def session_id(self):
cookie_name = self.session_store.config['cookie_name']
return self.request.cookies[cookie_name]
class JsonHandler(BaseHandler):
def dispatch(self):
j = super(JsonHandler, self).dispatch()
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
if j is not None:
self.response.out.write(json.dumps(j))
class TaskHandler(BaseHandler):
"""Handle unrecoverable errors."""
def dispatch(self):
try:
super(TaskHandler, self).dispatch()
# Unrecoverable Exceptions such as Invalid Parameter
except error.TaskUnrecoverableException as e:
logging.error(e)
except (TombstonedTaskError,
TaskAlreadyExistsError,
DuplicateTaskNameError) as e:
logging.error(e)
def signin_user_only(f):
"""Raise UnauthorizedException if session user is None
Examples:
class MyHandler(BaseHandler):
@singin_user_only
def get(self):
# following code is executed only if user is signed in.
...
"""
def wrapper(self, *args, **keywords):
if not self.user:
raise error.UnauthorizedException('Need sign in')
else:
return f(self, *args, **keywords)
return wrapper
|
Yes, I see that those links are redirecting to a drug sales website. Have you downloaded the latest definition updates for my plugin and ran another Complete Scan?
Can you send me the results of your last Complete Scan via email? |
import string
import re
import os
def read_from_dir(path='../Data/singleFile/'):
pool = []
dirs = [i for i in os.listdir(path) if i.find('cpp') != -1]
for d in dirs:
with open(os.path.join(path, d)) as f:
pool.append(''.join(f.readlines()))
return pool
def clearFile(f, pattern=None):
if pattern is None:
pattern = r'[^a-z0-9\"\':;><,.\/\?~`!@#$%^&*()_\+\-=\|\{\}\[\]]'
return ''.join(re.split(pattern, f.lower()))
def winnowing(hv, window_size=4):
if window_size > len(hv):
raise ValueError('windows size must less than length of hash list')
forb = set()
fp = []
for index in range(len(hv) - window_size + 1):
t = [(hv[i], -i) for i in range(index, index + window_size)]
t = sorted(t)
if t[0][1] not in forb:
fp.append(tuple(t[0]))
forb.add(t[0][1])
else:
for j in t[1:]:
if j[0] != t[0][0]:
break
if j[1] not in forb:
forb.add(j[1])
fp.append(tuple(j))
return fp
def rolling_hash(s, gram_size=4, hash_table=None):
if hash_table is None:
hash_table = ' {}{}{}'.format(string.ascii_lowercase, '0123456789', '"\':;><,.\/?~`!@#$%^&*()_+-=|{}[]')
if len(s) < gram_size:
raise ValueError('String length should larger than gram size')
seq = []
hv = 0
n = 10 ** (gram_size - 1)
for i in s[:gram_size]:
h = hash_table.find(i)
if h == -1:
raise ValueError('Can not find in hash table')
hv *= 10
hv += h
head = 0
seq.append(hv)
for i in s[gram_size:]:
h = hash_table.find(i)
if h == -1:
raise ValueError('Can not find in hash table')
hv -= n * hash_table.find(s[head])
hv *= 10
hv += h
head += 1
seq.append(hv)
return seq
def get_hash_value(dat, hash_table=list(string.ascii_letters + ' '), gram_size=4):
token = dat[:gram_size]
fp = 0
for t in token:
fp *= 10
try:
fp += hash_table.index(t)
except ValueError:
fp = hash_table.__len__()
hash_table.append(t)
rst = [fp]
prev_hv = hash_table.index(token[-1])
for t in dat[gram_size:]:
last_fp = int(rst[-1])
last_fp -= gram_size * 10 * prev_hv
last_fp *= 10
try:
last_fp += hash_table.index(t)
except ValueError:
last_fp += hash_table.__len__()
hash_table.append(t)
prev_hv = hash_table.index(t)
rst.append(last_fp)
return set(rst)
def sim(base, com):
base = set(base)
com = set(com)
inter = base.intersection(com)
score = (len(inter) / len(base), len(inter) / len(com))
dist = 1 - (score[0] ** 2 / 2 + score[1] ** 2 / 2) ** 0.5
return (score, dist)
def cal_sim_dup(fp_pool):
sim_pool = {}
for index, base in enumerate(fp_pool):
for cindex, com in enumerate(fp_pool[index:]):
if cindex == 0:
continue
sim_pool['{},{}'.format(index, index + cindex)] = sim(base, com)
return sim_pool
if __name__ == '__main__':
files = read_from_dir()
files = [clearFile(f) for f in files]
hv = [rolling_hash(f.lower) for f in files]
fingerprint = [winnowing(i) for i in hv]
|
One blog reader asked me how to customize error messages. Its a lucky day i am posting the same topic. You can easily customize the database error messages in any language. In this post i will demonstrate how to customize the error messages. I am customizing error messages of primary key and unique constraint.
You can find the error messages code on the following links.
In my previous post i have mentioned about skipping validation on the page definition and i have found a very useful blog post by Jobinesh.
af:carousel is a cool component in Oracle ADF which provides a gallery view of images (or others components) and enhance the user experience on the web. |
# very ugly solution
def min(*args,**kwargs):
key = kwargs.get("key",None)
if hasattr(args[0],'__iter__') and not isinstance(args[0],str):
# is some sort of an iterable object that is not a string
args = sorted(args,key=key)
if len(args) == 1:
args = args[0]
args = sorted(args,key=key)
return args[0]
elif isinstance(args[0],str):
args = args[0]
args = sorted(list(args),key=key)
return args[0]
else:
args = sorted(list(args),key=key)
return args[0]
def max(*args,**kwargs):
key = kwargs.get("key",None)
if hasattr(args[0],'__iter__') and not isinstance(args[0],str):
# is some sort of an iterable object that is not a string
args = sorted(args,key=key,reverse=True)
if len(args) == 1:
args = list(args[0])
args = sorted(args,key=key,reverse=True)
return args[0]
elif isinstance(args[0],str):
args = args[0]
args = sorted(list(args),key=key,reverse=True)
return args[0]
else:
args = sorted(list(args),key=key,reverse=True)
return args[0]
print(min([1,2,3]))
print(max((2,1)))
print(min(8,9))
print(max("yolo"))
print(min("j")) |
Mobile Phone service from Comcast near me in Brackenridge, PA.
Brackenridge, PA mobile phone fanatics: get Xfinity Mobile service today!
Good news, Brackenridge! Mobile service from Xfinity is a new kind of network designed to save you money. Conveniently included with your Xfinity Internet service.
Brackenridge, PA residents can get up to five lines of unlimited talk and text included with your Xfinity Internet at no extra cost, so all you pay for is data.
Brackenridge Mobile phone users can enjoy America’s largest and most reliable 4G LTE network combined with the most WiFi hotspots nationwide. |
"""
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
The parameter class.
"""
from datetime import date
from datetime import time as time_class
from restx.platform_specifics import PLATFORM, PLATFORM_JYTHON
from org.mulesoft.restx.exception import RestxException
#
# Types for resource parameters
#
PARAM_STRING_LIST = "string_list"
PARAM_STRING = "string"
PARAM_PASSWORD = "password"
PARAM_BOOL = "boolean"
PARAM_DATE = "date"
PARAM_TIME = "time"
PARAM_NUMBER_LIST = "number_list"
PARAM_NUMBER = "number"
PARAM_URI = "uri"
#
# Each entry in the following table has the format:
# ( storage_types, runtime_types, conversion_func )
#
# 'storage_types' defines a list of types that this value may
# have after being read in via JSON. For example, 'date'
# will not be recognized by JSON, it is stored and loaded
# as a string. So, 'str' is one of the valid types for date
# parameters.
#
# 'runtime_types' is a list of types that are acceptable after
# proper conversion, so that we can actually work with that
# type in our programming language. For example, we really
# want dates to be of class 'date', which is why for date
# parameters we specify that type.
#
# 'conversion_func' is a function that can be used to convert
# from a storatge-type to a runtime-type. Calling this function
# also provides a proper sanity check, since those functions
# will throw errors if they fail.
#
# Note: Date is defined as YYYY-MM-DD
# Note: Time is defined as HH:MM:SS
#
def __numstr_to_num(x):
if type(x) in [ int, float ]:
return x
elif type(x) in [ str, unicode ]:
try:
return int(x)
except:
return float(x)
# Can't convert anything else
return None
def __bool_convert(x):
if type(x) is bool:
return x
else:
if x.lower() in [ "y", "yes", "true", "t", "1" ]:
return True
else:
return False
#
# Type conversion turns out to be quite odd. The more languages
# we enable, the more 'similar' types we have to recognize and
# deal with. For example, a component may expect a number as
# attribute. For a Java component that might be a BigDecimal,
# for a Python component, it might just be int or float.
# So, considering Python as the base, we start by defining all
# the python types for a particular RESTx type. Then we add the
# types of the other languages if and when appropriate.
TYPES_DICT = {
"STRING_LIST_TYPES" : [ list ],
"STRING_TYPES" : [ unicode, str ],
"PASSWORD_TYPES" : [ unicode, str ],
"BOOL_TYPES" : [ bool ],
"DATE_TYPES" : [ unicode, str ],
"TIME_TYPES" : [ unicode, str ],
"NUMBER_LIST_TYPES" : [ list ],
"NUMBER_TYPES" : [ int, float ],
"URI_TYPES" : [ unicode, str ],
}
if PLATFORM == PLATFORM_JYTHON:
# Now selectively add some of the Java types
from java.math import BigDecimal
TYPES_DICT["NUMBER_TYPES"].append(BigDecimal)
def __list_to_strlist(x):
if type(x) is not list:
x = [ x ]
return [ str(e) for e in x ]
def __list_to_numlist(x):
if type(x) is not list:
x = [ x ]
nlist = []
for e in x:
converted = __numstr_to_num(e)
if not converted:
return None
nlist.append(converted)
return nlist
TYPE_COMPATIBILITY = {
PARAM_STRING_LIST : (TYPES_DICT["STRING_LIST_TYPES"], [ list ], __list_to_strlist),
PARAM_STRING : (TYPES_DICT["STRING_TYPES"], [ str ], None),
PARAM_PASSWORD : (TYPES_DICT["PASSWORD_TYPES"], [ str ], None),
PARAM_BOOL : (TYPES_DICT["BOOL_TYPES"], [ bool ], __bool_convert),
PARAM_DATE : (TYPES_DICT["DATE_TYPES"], [ date ], lambda x : date(*[ int(elem) for elem in x.split("-")])),
PARAM_TIME : (TYPES_DICT["TIME_TYPES"], [ time_class ], lambda x : time_class(*[ int(elem) for elem in x.split(":")])),
PARAM_NUMBER_LIST : (TYPES_DICT["NUMBER_LIST_TYPES"], [ list ], __list_to_numlist),
PARAM_NUMBER : (TYPES_DICT["NUMBER_TYPES"], [ int, float ], __numstr_to_num),
PARAM_URI : (TYPES_DICT["URI_TYPES"], [ str ], None)
}
class ParameterDef(object):
"""
This class encapsulates a parameter definition.
Parameters are defined by each individual component.
Therefore, in its __init__() method each component
has to create its dictionary of ParameterDef classes
and make it available via the getParams() method.
By default, a parameter is 'required'. Note that
this parameter definition does not contain the
name of the parameter, since the name is merely
the key in the parameter definition dictionary,
which is maintained by each component.
"""
def __init__(self, ptype, desc="", required=True, default=None, choices=None):
"""
Define a new parameter.
A parameter is defined with the following attributes:
@param ptype: A type, such as PARAM_STRING, etc.
@type prtype: string
@param desc: A short, one-line description in human readable form.
@type desc: string
@param required: A flag indicating whether this parameter needs to be
set by the resource creator, or whether a default
value can be used.
@type required: boolean
@param default: A default value for this parameter of a suitable type.
Only used if 'required == False'.
@type default: Whatever is needed as default value
@param choices: If the allowed input values should be restricted to a
number of choices, specify them here as a list of strings.
@type choices: list
"""
self.ptype = ptype
self.desc = desc
self.required = required
if not self.required and default is None:
raise RestxException("A default value is required for optional parameters")
if self.required and default:
raise RestxException("A default value cannot be provided for a required parameter")
self.default = default
self.choices = choices
if self.choices:
str_choices = [ str(c) for c in self.choices ]
if self.default and str(self.default) not in str_choices:
raise RestxException("Specified default value is not listed in 'choices'")
if self.ptype not in [ PARAM_STRING, PARAM_NUMBER, PARAM_STRING_LIST, PARAM_NUMBER_LIST ]:
raise RestxException("Choices are not supported for this type.")
if self.ptype in [ PARAM_STRING_LIST, PARAM_NUMBER_LIST ]:
self.is_list = True
else:
self.is_list = False
def isList(self):
"""
Return an indication whether this is a list type or not.
"""
return self.is_list
def getDefaultVal(self):
"""
Return default value.
Javaesque naming convention, because the method was first needed
on the Java side of things.
@return: The default value.
@rtype: object
"""
return self.default
def as_dict(self):
"""
Unwraps this single parameter definition into a plain dictionary.
Needed for browsing or accessing the component's meta info.
@return: Dictionary representation of the parameter.
@rtype: dict
"""
d = dict(type = self.ptype,
desc = self.desc,
required = self.required)
if not self.required:
d['default'] = self.default
if self.choices:
d['val_choices'] = self.choices
if self.is_list:
d['multi_choice'] = True
return d
def html_type(self, name, initial=None):
"""
Return the HTML form field type for a value of this type.
Needed when we display a resource creation form.
@return: A string containing "checkbox" or "text"
@rtype: string
"""
if self.ptype == PARAM_BOOL:
yes_value = "checked " if initial == "yes" else ""
no_value = "checked " if initial == "no" else ""
return '''<label for="%s_yes"><input %stype="radio" id="%s_yes" name="%s" value="yes" />yes</label><br>
<label for="%s_no"><input %stype="radio" id="%s_no" name="%s" value="no" />no</label>''' % (name, yes_value, name, name, name, no_value, name, name)
else:
if self.choices:
if type(initial) is not list:
initial = [ initial ]
buf = '<select '
if self.ptype in [ PARAM_STRING_LIST, PARAM_NUMBER_LIST ]:
buf += "multiple size=%d " % min(8, len(self.choices))
multiple = True
else:
multiple = False
buf += 'name="%s" id="%s">' % (name, name)
if self.default and not multiple:
buf += '<option value="">--- Accept default ---</option>'
# Initial may be a string, since that all the type information we can have when we convert
# the form input to a data structure
buf += '%s</select>' % ( [ '<option value="%s"%s>%s</option>' % (c, 'selected="selected"' if initial and str(c) in initial else "", c) for c in self.choices ] )
return buf
if initial:
init_val = 'value="%s" ' % initial
else:
init_val = ''
if self.ptype == PARAM_PASSWORD:
type_str = "password"
else:
type_str = "text"
return '<input type="%s" name="%s" id="%s" %s/>' % (type_str, name, name, init_val)
|
Amol Sharma reported Viacom is planning to launch an interactive children’s TV channel in the U.S. that will allow parents to personalize content. The new “My Nickelodeon Junior” channel will be available in coming months to customers of Verizon Communications Inc.’s FiOS TV service with plans to roll it out to other U.S. pay-TV operators later.
Viacom Inc. plans to launch a children’s TV channel in the U.S. that can be programmed according to parents’ tastes and provide access to hundreds of old episodes of shows like “Dora the Explorer,” in a novel TV-delivery approach that combines elements of streaming services Pandora and Netflix Inc.
The new “My Nickelodeon Junior” interactive channel will be available in coming months to customers of Verizon Communications Inc.’s FiOS TV service, and Viacom plans to roll it out to other U.S. pay-TV operators later, the companies say.
It will be adjacent in the TV menu to the main “Nick Jr.” channel that is home to “Dora,” “The Backyardigans,” “Bubble Guppies,” and other fare targeted at a preschool audience.
TV viewers have two basic options nowadays: They can watch a channel with programming scheduled by a TV network, or sift through on-demand services to find a show they like. The My Nick Jr. technology introduces a third way to deliver TV–giving each household a customized channel.
Based on those preferences, My Nick Jr. will choose content to air from hundreds of episodes in the Nick Jr. library.
Children can rate shows by clicking on smile or frown icons, and the service will tweak the programming lineup accordingly. Parents can get reports on what their children watch and can program the channel to shut off after a set period. There won’t be ads.
The new service highlights how media companies and pay-TV operators are trying to modernize TV viewing at a time when consumers want greater control over what they watch and services like Netflix and Amazon.com Inc.’s Prime Instant Video are offering compelling alternatives to cable television.
TV networks and pay-TV operators also are developing sophisticated mobile-TV apps and beefing up video on-demand offerings.
Netflix and Amazon have become particularly popular as an outlet for children’s programming.
Terry Denson, Verizon’s vice president of content strategy and acquisition, said My Nick Jr. will help the company compete against these streaming-video services. He said other media companies and distributors could launch their own interactive channels.
For Viacom, streaming outlets are customers–it licenses shows to them. But the company also wants to ensure that the pay-TV ecosystem–which provides the bulk of its revenue and profit through carriage fees and advertising–is healthy and technologically advanced.
Viacom hopes the new channel will help it negotiate higher carriage fees and give characters in its shows greater exposure, helping its consumer products licensing business.
One risk for the media company is that the new channel could draw viewers away from existing Nickelodeon channels. In the U.S., the flagship Nickelodeon network has a preschool programming block that averaged 570,000 viewers among children 2 to 5 years old in 2013.
Like Walt Disney Co.’s Disney Junior and Comcast Corp.’s Sprout, the Nick Jr. channel caters to a preschool audience and shows repeats of episodes aired in that block. Viacom said it gained confidence before the U.S. launch by testing the service in France, where it thought the stakes of failure would be lower.
The interactive channel was a good fit for CanalSat, a unit of Vivendi SA, that was looking for ways to differentiate itself from the array of free over-the-air channels competing in France’s TV market. It fit nicely with CanalSat’s other efforts to personalize TV viewing, such as technology that recommends shows to viewers based on what they’ve previously viewed.
“The future of pay TV relies on personalization,” said Claire Basini, marketing director of CanalSat. “We were convinced of the potential” of Viacom’s service, she said.
Viacom could potentially extend the idea of a personalized channel to its other properties like MTV, executives said.
It is unclear how widely other companies will adopt the approach. One constraining factor: Many consumers still have set-top boxes hooked up to their TVs that can’t support interactive services like My Nick Jr.
Verizon and AT&T have the most advanced TV set-top boxes since they are the latest entrants in the market. Eventually, Verizon will add support for mobile devices, so consumers can launch My Nick Jr. from a tablet or smartphone–a feature that is available on CanalSat. |
# Copyright (c) 2009-2010 Aldo Cortesi
# Copyright (c) 2010 Lee McCuller
# Copyright (c) 2010 matt
# Copyright (c) 2010, 2014 dequis
# Copyright (c) 2010, 2012, 2014 roger
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2011 Paul Colomiets
# Copyright (c) 2011 Tzbob
# Copyright (c) 2012-2015 Tycho Andersen
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import contextlib
from typing import Dict, Set
from libqtile import utils
from libqtile.log_utils import logger
subscriptions = {} # type: Dict
SKIPLOG = set() # type: Set
def clear():
subscriptions.clear()
class Subscribe:
def __init__(self):
hooks = set([])
for i in dir(self):
if not i.startswith("_"):
hooks.add(i)
self.hooks = hooks
def _subscribe(self, event, func):
lst = subscriptions.setdefault(event, [])
if func not in lst:
lst.append(func)
return func
def startup_once(self, func):
"""Called when Qtile has started on first start
This hook is called exactly once per session (i.e. not on each
``lazy.restart()``).
**Arguments**
None
"""
return self._subscribe("startup_once", func)
def startup(self, func):
"""Called when qtile is started
**Arguments**
None
"""
return self._subscribe("startup", func)
def startup_complete(self, func):
"""Called when qtile is started after all resources initialized
**Arguments**
None
"""
return self._subscribe("startup_complete", func)
def shutdown(self, func):
"""Called before qtile is shutdown
**Arguments**
None
"""
return self._subscribe("shutdown", func)
def restart(self, func):
"""Called before qtile is restarted
**Arguments**
None
"""
return self._subscribe("restart", func)
def setgroup(self, func):
"""Called when group is changed
**Arguments**
None
"""
return self._subscribe("setgroup", func)
def addgroup(self, func):
"""Called when group is added
**Arguments**
* name of new group
"""
return self._subscribe("addgroup", func)
def delgroup(self, func):
"""Called when group is deleted
**Arguments**
* name of deleted group
"""
return self._subscribe("delgroup", func)
def changegroup(self, func):
"""Called whenever a group change occurs
**Arguments**
None
"""
return self._subscribe("changegroup", func)
def focus_change(self, func):
"""Called when focus is changed, including moving focus between groups or when
focus is lost completely
**Arguments**
None
"""
return self._subscribe("focus_change", func)
def float_change(self, func):
"""Called when a change in float state is made
**Arguments**
None
"""
return self._subscribe("float_change", func)
def group_window_add(self, func):
"""Called when a new window is added to a group
**Arguments**
* ``Group`` receiving the new window
* ``Window`` added to the group
"""
return self._subscribe("group_window_add", func)
def client_new(self, func):
"""Called before Qtile starts managing a new client
Use this hook to declare windows static, or add them to a group on
startup. This hook is not called for internal windows.
**Arguments**
* ``Window`` object
Examples
--------
::
@libqtile.hook.subscribe.client_new
def func(c):
if c.name == "xterm":
c.togroup("a")
elif c.name == "dzen":
c.cmd_static(0)
"""
return self._subscribe("client_new", func)
def client_managed(self, func):
"""Called after Qtile starts managing a new client
Called after a window is assigned to a group, or when a window is made
static. This hook is not called for internal windows.
**Arguments**
* ``Window`` object of the managed window
"""
return self._subscribe("client_managed", func)
def client_killed(self, func):
"""Called after a client has been unmanaged
**Arguments**
* ``Window`` object of the killed window.
"""
return self._subscribe("client_killed", func)
def client_focus(self, func):
"""Called whenever focus moves to a client window
**Arguments**
* ``Window`` object of the new focus.
"""
return self._subscribe("client_focus", func)
def client_mouse_enter(self, func):
"""Called when the mouse enters a client
**Arguments**
* ``Window`` of window entered
"""
return self._subscribe("client_mouse_enter", func)
def client_name_updated(self, func):
"""Called when the client name changes
**Arguments**
* ``Window`` of client with updated name
"""
return self._subscribe("client_name_updated", func)
def client_urgent_hint_changed(self, func):
"""Called when the client urgent hint changes
**Arguments**
* ``Window`` of client with hint change
"""
return self._subscribe("client_urgent_hint_changed", func)
def layout_change(self, func):
"""Called on layout change
**Arguments**
* layout object for new layout
* group object on which layout is changed
"""
return self._subscribe("layout_change", func)
def net_wm_icon_change(self, func):
"""Called on `_NET_WM_ICON` chance
**Arguments**
* ``Window`` of client with changed icon
"""
return self._subscribe("net_wm_icon_change", func)
def selection_notify(self, func):
"""Called on selection notify
**Arguments**
* name of the selection
* dictionary describing selection, containing ``owner`` and
``selection`` as keys
"""
return self._subscribe("selection_notify", func)
def selection_change(self, func):
"""Called on selection change
**Arguments**
* name of the selection
* dictionary describing selection, containing ``owner`` and
``selection`` as keys
"""
return self._subscribe("selection_change", func)
def screen_change(self, func):
"""Called when the output configuration is changed (e.g. via randr in X11).
**Arguments**
* ``xproto.randr.ScreenChangeNotify`` event (X11) or None (Wayland).
"""
return self._subscribe("screen_change", func)
def current_screen_change(self, func):
"""Called when the current screen (i.e. the screen with focus) changes
**Arguments**
None
"""
return self._subscribe("current_screen_change", func)
def enter_chord(self, func):
"""Called when key chord begins
**Arguments**
* name of chord(mode)
"""
return self._subscribe("enter_chord", func)
def leave_chord(self, func):
"""Called when key chord ends
**Arguments**
None
"""
return self._subscribe("leave_chord", func)
subscribe = Subscribe()
class Unsubscribe(Subscribe):
"""
This class mirrors subscribe, except the _subscribe member has been
overridden to removed calls from hooks.
"""
def _subscribe(self, event, func):
lst = subscriptions.setdefault(event, [])
try:
lst.remove(func)
except ValueError:
raise utils.QtileError(
"Tried to unsubscribe a hook that was not"
" currently subscribed"
)
unsubscribe = Unsubscribe()
def _fire_async_event(co):
loop = None
with contextlib.suppress(RuntimeError):
loop = asyncio.get_running_loop()
if loop is None:
asyncio.run(co)
else:
asyncio.ensure_future(co)
def fire(event, *args, **kwargs):
if event not in subscribe.hooks:
raise utils.QtileError("Unknown event: %s" % event)
if event not in SKIPLOG:
logger.debug("Internal event: %s(%s, %s)", event, args, kwargs)
for i in subscriptions.get(event, []):
try:
if asyncio.iscoroutinefunction(i):
_fire_async_event(i(*args, **kwargs))
elif asyncio.iscoroutine(i):
_fire_async_event(i)
else:
i(*args, **kwargs)
except: # noqa: E722
logger.exception("Error in hook %s", event)
|
You'll have nothing but fun in the sun wearing this darling new Prairie Underground dress. An easy shift style, with v-neckline, pockets for ease, and a fantastic checkered pattern. The hemp/cotton fabric is light and soft--this will be a great dress to throw into your weekend . bag when you jet off to someplace fun! |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-28 13:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pages', '0005_auto_20160923_1219'),
('organization-pages', '0013_auto_20161026_1025'),
]
operations = [
migrations.CreateModel(
name='PageRelatedTitle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=1024, null=True, verbose_name='title')),
('page', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='related_title', to='pages.Page', verbose_name='page')),
],
options={
'verbose_name': 'related title',
},
),
migrations.AlterOrderWithRespectTo(
name='pagerelatedtitle',
order_with_respect_to='page',
),
]
|
Our attention to detail and understanding of the requirements of our commercial clients ensures that we deliver within timescale and on budget.
Our dedicated contracts team will take your project from conception to completion and by always maintaining full ongoing communication throughout the project.
From luxury homes to flatted developments, Kitchens by J.S. Geddes design and specify always to suit your client’s requirements and the developer’s budget - but don’t take our word for it - speak to our previous commercial clients.
Enjoy commercial partner status with Poggenpohl, RWK. Siemens and Miele. |
import zipfile
import sys
import paradoxparser
import datetime
TECH_SCORE_MULTIPLIER = 10
ACCUMULATED_ENERGY_MULTIPLIER = 0.1
ACCUMULATED_MINERALS_MULTIPLIER = 0.05
ACCUMULATED_INFLUENCE_MULTIPLIER = 0.05
ENERGY_PRODUCTION_MULTIPLIER = 2
MINERAL_PRODUCTION_MULTIPLIER = 1.5
INFLUENCE_PRODUCTION_MULTIPLIER = 1
NUM_SUBJECTS_MULTIPLIER = 30
MILITARYPOWER_MULTIPLIER = 0.03
NUM_COLONIES_MULTIPLIER = 15
NUM_PLANETS_MULTIPLIER = 0.01
class Country:
def __init__(self):
self.name = ''
self.score = 0
self.techscore = 0
self.currentenergy = 0
self.currentminerals = 0
self.currentinfluence = 0
self.energyproduction = 0
self.mineralproduction = 0
self.influenceproduction = 0
self.physicsResearch = 0
self.societyResearch = 0
self.engineeringResearch = 0
self.population = 0
self.numsubjects = 0
self.militarypower = 0
self.numcolonies = 0
self.numplanets = 0
self.numarmies = 0
self.type = ''
self.id = '0'
def calcscore(self):
self.score += TECH_SCORE_MULTIPLIER * self.techscore
self.score += ACCUMULATED_ENERGY_MULTIPLIER * self.currentenergy
self.score += ACCUMULATED_MINERALS_MULTIPLIER * self.currentminerals
self.score += ACCUMULATED_INFLUENCE_MULTIPLIER * self.currentinfluence
self.score += ENERGY_PRODUCTION_MULTIPLIER * self.energyproduction
self.score += MINERAL_PRODUCTION_MULTIPLIER * self.mineralproduction
self.score += INFLUENCE_PRODUCTION_MULTIPLIER * self.influenceproduction
self.score += NUM_SUBJECTS_MULTIPLIER * self.numsubjects
self.score += MILITARYPOWER_MULTIPLIER * self.militarypower
self.score += NUM_COLONIES_MULTIPLIER * self.numcolonies
self.score += NUM_PLANETS_MULTIPLIER * self.numplanets
def _getResearchPenalty(self):
return 0.1 * max(0, self.numcolonies -1) + 0.01 * max(0, self.population-10)
def getPhysicsResearchWithPenalty(self):
return self.physicsResearch / (1 + self._getResearchPenalty())
def getSocietyResearchWithPenalty(self):
return self.societyResearch / (1 + self._getResearchPenalty())
def getEngineeringResearchWithPenalty(self):
return self.engineeringResearch / (1 + self._getResearchPenalty())
def getMatchedScope(text, scopeName):
countries = text[text.find(scopeName+'={'):]
t = 1
instring = False
for country_key_value_pair in range(len(scopeName+'={') + 1, len(countries)):
if countries[country_key_value_pair] == '{' and not instring:
if (t == 1):
k = countries[country_key_value_pair-1]
j = country_key_value_pair-1
while(k != '\t'):
j -= 1
k = countries[j]
t += 1
elif countries[country_key_value_pair] == '}' and not instring:
t -= 1
elif countries[country_key_value_pair] == '"':
instring = not instring
if (t == 0):
countries = countries[:country_key_value_pair+1]
break
result = paradoxparser.psr.parse(countries)
return result
def makeLedgerForSave(path, basePath):
save = zipfile.ZipFile(path)
f = save.open('gamestate')
s = str(f.read(), 'utf-8')
f.close()
playertaglocation = s.find('player={')
playertag = s[playertaglocation:s.find('}', playertaglocation)]
playercountry = playertag[playertag.find('country=')+len('country='):playertag.find('}')].strip()
country_raw_data = getMatchedScope(s,"country")[0][1]
planets = getMatchedScope(s,"planet")[0][1]
ret = ''
retlist = []
contactlist = []
num = 1
for i in country_raw_data:
if (i[1] != 'none'):
ret2 = ''
isUs = False
if (i[0] == playercountry):
isUs = True
contactlist.append(i[0])
relman_part = paradoxparser.paradox_dict_get_child_by_name(i[1], 'relations_manager')
if (relman_part is not None):
for j in relman_part:
countryid = paradoxparser.paradox_dict_get_child_by_name(j[1], 'country')
commun = paradoxparser.paradox_dict_get_child_by_name(j[1], 'communications')
if (commun != None):
contactlist.append(countryid)
country = Country()
country.id = i[0]
namepart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'name')
if (namepart is not None):
country.name = namepart.replace('"', '')
techpart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'tech_status')
if (techpart is not None):
country.techscore = sum(int(j[1]) for j in techpart if j[0] == 'level')
militarypowerpart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'military_power')
if (militarypowerpart is not None):
country.militarypower = float(militarypowerpart)
empiretype = paradoxparser.paradox_dict_get_child_by_name(i[1], 'type')
if (empiretype is not None):
country.type = empiretype.replace('"', '')
if (country.type not in ('fallen_empire', 'default')):
continue
subjectpart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'subjects')
if (subjectpart is not None):
country.numsubjects = len(subjectpart)
armiespart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'owned_armies')
if (armiespart is not None):
country.numarmies = len(armiespart)
planetsspart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'controlled_planets')
if (planetsspart is not None):
country.numplanets = len(planetsspart)
controlledplanetsspart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'owned_planets')
if (controlledplanetsspart is not None):
country.numcolonies = len(controlledplanetsspart)
country.population = 0
for planetId in controlledplanetsspart:
planetObject=planets[int(planetId)][1]
popObject= next((x[1] for x in planetObject if x[0]=='pop'),None)
# if the planet is under colonization, it doesn't have pop key.
if(popObject is not None):
country.population+=len(popObject)
modulespart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'modules')
if (modulespart is not None):
economymodule = paradoxparser.paradox_dict_get_child_by_name(modulespart, 'standard_economy_module')
if (economymodule is not None):
resourcesmodule = paradoxparser.paradox_dict_get_child_by_name(economymodule, 'resources')
if (resourcesmodule is not None):
energy = paradoxparser.paradox_dict_get_child_by_name(resourcesmodule, 'energy')
if (energy is not None):
if (type(energy) == str):
country.currentenergy = float(energy)
else:
country.currentenergy = float(energy[0])
minerals = paradoxparser.paradox_dict_get_child_by_name(resourcesmodule, 'minerals')
if (minerals is not None):
if (type(minerals) == str):
country.currentminerals = float(minerals)
else:
country.currentminerals = float(minerals[0])
influence = paradoxparser.paradox_dict_get_child_by_name(resourcesmodule, 'influence')
if (influence is not None):
if (type(influence) == str):
country.currentinfluence = float(influence)
else:
country.currentinfluence = float(influence[0])
lastmonthmodule = paradoxparser.paradox_dict_get_child_by_name(economymodule, 'last_month')
if (lastmonthmodule is not None):
energy = paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'energy')
if (energy is not None):
if (type(energy) == str):
country.energyproduction = float(energy)
else:
country.energyproduction = float(energy[0])
minerals = paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'minerals')
if (minerals is not None):
if (type(minerals) == str):
country.mineralproduction = float(minerals)
else:
country.mineralproduction = float(minerals[0])
influence = paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'influence')
if (influence is not None):
if (type(influence) == str):
country.influenceproduction = float(influence)
else:
country.influenceproduction = float(influence[0])
physicsResearch=paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'physics_research')
if(physicsResearch is not None):
if (type(physicsResearch) == str):
country.physicsResearch = float(physicsResearch)
else:
country.physicsResearch = float(physicsResearch[0])
societyResearch=paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'society_research')
if(societyResearch is not None):
if (type(societyResearch) == str):
country.societyResearch = float(societyResearch)
else:
country.societyResearch = float(societyResearch[0])
engineeringResearch=paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'engineering_research')
if(engineeringResearch is not None):
if (type(engineeringResearch) == str):
country.engineeringResearch = float(engineeringResearch)
else:
country.engineeringResearch = float(engineeringResearch[0])
country.calcscore()
ret2 += '<tr>'
ret2 += '<td>%s</td>' % num
if (isUs):
ret2 += '<td hiddenvalue=%s>★</td>' % num
else:
ret2 += '<td hiddenvalue=%s> </td>' % num
ret2 += '<td class="name">%s</td>' % country.name
ret2 += '<td>{:10.0f}</td>'.format(country.score).strip()
ret2 += '<td>{:10.0f}</td>'.format(country.militarypower)
ret2 += '<td>%d</td>' % country.techscore
ret2 += '<td>%d</td>' % country.numcolonies
ret2 += '<td>%d</td>' % country.numplanets
ret2 += '<td>%d</td>' % country.numsubjects
production = ('{:10.0f}'.format(country.energyproduction)).strip()
if (country.energyproduction >= 0):
netincome = '<td class="positive">+%s</td>' % production
else:
netincome = '<td class="negative">%s</td>' % production
ret2 += '<td>{:10.0f}</td>'.format(country.currentenergy) + netincome
production = ('{:10.0f}'.format(country.mineralproduction)).strip()
if (country.mineralproduction >= 0):
netincome = '<td class="positive">+%s</td>' % production
else:
netincome = '<td class="negative">%s</td>' % production
ret2 += '<td>{:10.0f}</td>'.format(country.currentminerals) + netincome
production = ('{:10.1f}'.format(country.influenceproduction)).strip()
if (country.influenceproduction >= 0):
netincome = '<td class="positive">+%s</td>' % production
else:
netincome = '<td class="negative">%s</td>' % production
ret2 += '<td>{:10.0f}</td>'.format(country.currentinfluence) + netincome
ret2 += '<td>%.1f</td>' % country.getPhysicsResearchWithPenalty()
ret2 += '<td>%.1f</td>' % country.getSocietyResearchWithPenalty()
ret2 += '<td>%.1f</td>' % country.getEngineeringResearchWithPenalty()
ret2 += '<td>%d</td>' % country.population
ret2 += '</tr>'
retlist.append((country.id, ret2))
num += 1
## print(country.name)
## print(country.techscore)
## print(country.militarypower)
## print(country.type)
## print(country.numsubjects)
## print(country.numarmies)
## print(country.numplanets)
## print(country.numcolonies)
## print(country.currentenergy)
## print(country.currentminerals)
## print(country.currentinfluence)
## print(country.energyproduction)
## print(country.mineralproduction)
## print(country.influenceproduction)
retlist2 = []
for i in retlist:
if (i[0] in contactlist):
retlist2.append(i[1])
ret = "\n".join(retlist2)
return ret
|
Simmer water, polenta, potato, carrot, and onion until tender (about 15 minutes).
Add corn, bring back to a boil and simmer another 5 minutes.
Add soymilk. Bring just to boil. Season to taste. Garnish with parsley.
I was a bit confused by the mention of onion in the directions, when it wasn't listed in the ingredients, but I pressed on. I had half an onion I needed to use up anyway, so I chopped it up and threw it in with the potatoes and carrots. I put in a dash of garlic powder and some soy sauce to add a bit of zest. It was warm and rich and tasty - just right for a cold winter day! Thanks for sharing. |
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
# "javascript" section for javascript. see @app.route('/config.js') in app/views.py
# oauth constants
QQ_OAUTH_STATE = "openhackathon" # todo state should not be constant. Actually it should be unguessable to prevent CSFA
HACKATHON_SERVER_ENDPOINT = "http://localhost:15000"
MYSQL_HOST = "localhost"
MYSQL_USER = "hackathon"
MYSQL_PWD = "hackathon"
MYSQL_DB = "hackathon"
MYSQL_PORT = 3306
Config = {
"environment": "local",
"endpoint": HACKATHON_SERVER_ENDPOINT,
"app": {
"secret_key": "secret_key"
},
"mysql": {
"connection": 'mysql://%s:%s@%s:%s/%s' % (MYSQL_USER, MYSQL_PWD, MYSQL_HOST, MYSQL_PORT, MYSQL_DB)
},
"login": {
"github": {
"user_info_url": 'https://api.github.com/user?access_token=',
"emails_info_url": 'https://api.github.com/user/emails?access_token='
},
"qq": {
"openid_url": 'https://graph.qq.com/oauth2.0/me?access_token=',
"user_info_url": 'https://graph.qq.com/user/get_user_info?access_token=%s&oauth_consumer_key=%s&openid=%s'
},
"gitcafe": {
# gitcafe domain: gcas.dgz.sh/gcs.dgz.sh for Staging, api.gitcafe.com/gitcafe.com for Production
"user_info_url": "https://gcas.dgz.sh/api/v1/user"
},
"weibo": {
"user_info_url": 'https://api.weibo.com/2/users/show.json?access_token=',
"email_info_url": 'https://api.weibo.com/2/account/profile/email.json?access_token='
},
"live": {
"user_info_url": 'https://apis.live.net/v5.0/me?access_token='
},
"token_expiration_minutes": 60 * 24
},
"azure": {
"cert_base": "/home/if/If/open-hackathon/open-hackathon/src/hackathon/certificates",
"container_name": "certificates"
},
"guacamole": {
"host": "http://localhost:8080"
},
"scheduler": {
"job_store": "mysql",
"job_store_url": 'mysql://%s:%s@%s:%s/%s' % (MYSQL_USER, MYSQL_PWD, MYSQL_HOST, MYSQL_PORT, MYSQL_DB)
},
"pre_allocate": {
"check_interval_minutes": 5,
"azure": 1,
"docker": 1
},
"storage": {
"type": "local",
"size_limit_kilo_bytes": 5 * 1024,
"azure": {
"account_name": "hackathon",
"account_key": "U4/oE3Ocwk9txQHw2qNOCCW2Fy05FBY3yQfzcKRNss5tnReyYTO7PDyeXQ8TWMMxXF07JrW7UXPyOhGgJlodEQ==",
"image_container": "images",
"template_container": "templates",
"blob_service_host_base": ".blob.core.chinacloudapi.cn"
}
},
"docker": {
"alauda": {
"token": "",
"namespace": "",
"endpoint": "https://api.alauda.cn"
}
},
"cloud_eclipse": {
"api": "http://www.idehub.cn/api/ide"
}
}
|
Seems normal. My placements were all 4 days a week for however long my supervisor worked. So there were times when I worked four 9 or 10 hours days at a nursing home.
We were not allowed to give ours any gifts, except a card. The program itself had a small gift for each of our supervisors that we could give them.
Are online SLP programs worth it? Did you have trouble getting hired?
Most programs that are online also have a physical program. So they wouldn't really know. In the interviews I've gone on in the last few months, I've related things to my practicums but I wouldn't say any questions were specific to my school. To OP, all I'm saying is that I wouldn't worry about going to an online program.
In no way (I believe) will your transcripts or diploma mention that you completed an online program. It would be up to you to disclose that.
I used my school email but have since lost access to it. I would suggest a personal email, but maybe make a new one that sounds professional if it isn't already.
Do SLP professors grade on a curve?
We got curved in grad school on occasion.
Why did you apply if you had no intention of going? I would take the offer.
I don't have the exact emails anymore because my undergrad account is gone now, but from Facebook it looks like June 1st for the one school. Not sure about the other one, sorry!
I was in the same boat as you 2 years ago. I went to West Chester and always thought I would go there. I got accepted off the waitlist but had already committed to Delaware. I am glad that I chose Delaware over WCU because of the professors at I knew how some of the professors at West Chester were.
Yes, I got off two waitlists. And my friend got accepted somewhere after first being rejected!
I've commuted about 40 min-1 hour for my whole graduate program so far. It hasn't seemed to affect my academics nor my social life negatively in any way.
I was told to fill out the graduate school application only when I got my acceptance email, not after I submitted my CSDCAS application. So my assumption is that that is still the case. Hope that helps.
Are you talking about the application to the UD graduate school in general? What is the supplemental application? I go there now.
Honestly don't remember but I've been in grad school for almost 2 years now and I haven't left.
I got this from Temple 2 years ago but I was not accepted. |
#!/usr/bin/python
#-*-coding:utf8-*-
import pygame
from pygame.locals import *
from gameobjects.vector2 import Vector2
from sys import exit
from random import randint
def main():
background_image_path=r'./picture/sushiplate.jpg'
sprite_image_path='./picture/fugu.png'
SCREEN_SIZE=(640,480)
clock=pygame.time.Clock()
position=Vector2(100.0,100.0)
heading=Vector2()
speed=100
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE,0,32)
pygame.display.set_caption("hello world")
# font=pygame.font.SysFont('arial',16)
# font_height=font.get_linesize()
background=pygame.image.load(background_image_path).convert()
sprite=pygame.image.load(sprite_image_path).convert_alpha()
destination=Vector2(randint(0,640),randint(0,480))
while True:
for event in pygame.event.get():
if event.type==QUIT:
exit()
screen.blit(background,(0,0))
screen.blit(sprite,position)
time_passed=clock.tick()
time_passed_seconds=time_passed/1000.0
vector_to_mouse=Vector2.from_points(position,destination)
if vector_to_mouse.get_length() > 1:
heading=vector_to_mouse.normalize()
position+=heading*time_passed_seconds*speed
else:
destination=Vector2(randint(0,640),randint(0,480))
pygame.display.update()
if __name__ == '__main__':
main() |
Zane’s Tavern is a sports bar, restaurant and locals’ favorite with pool table, foosball and shuffleboard. Check out NFL Sunday Ticket with 9 flat-screen TV’s! Apres specials include 75 cent wings, $2.75 Bud Pints and $10.00 Bud Pitchers*. Serving real Philly Cheesesteaks, soups, salads, burgers, sandwiches and the best wings in the valley! Kitchen open until midnight. Located on the upper level of the Snowmass Mall. |
##################################################################################################
# Copyright (c) 2012 Brett Dixon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##################################################################################################
from django.contrib import admin
from frog.models import Gallery, Image, Video, Tag
class GalleryAdmin(admin.ModelAdmin):
list_display = ('title', 'parent', 'owner', 'security')
class ImageAdmin(admin.ModelAdmin):
list_display = ('title', 'guid', 'author', 'thumbnail_tag')
class VideoAdmin(admin.ModelAdmin):
list_display = ('title', 'guid', 'author', 'thumbnail_tag')
class TagAdmin(admin.ModelAdmin):
list_display = ('name', 'parent')
list_filter = ('artist',)
admin.site.register(Gallery, GalleryAdmin)
admin.site.register(Image, ImageAdmin)
admin.site.register(Video, VideoAdmin)
admin.site.register(Tag, TagAdmin) |
To successfully apply a previous configured Parental Control policy to an additional computer the initial Username and Security Code information needs to be entered into the First Time Login screen once launched by the successful installation of the Telstra Online Security application.
Once this information has been entered and ‘Login’ button selected the user will be delivered to a page listing the previously configured Telstra Online Security Parental Control policy.
Once the login is successful, customer can select from the list of policies which they wish to activate on the additional computer.
A customer can use Parental Controls on three different computers with one BigPond email address. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
A simple example of assisilib usage, with arenas shown in a matplotlib
visualisation (for testing of shapes, figures that are not from screenshots, etc)
'''
from math import pi
from assisipy_utils import arena
import matplotlib.pyplot as plt
from assisipy_utils.arena import rendering
if __name__ == '__main__':
plt.figure(1); plt.clf()
fig, ax = plt.subplots(1,1,num=1)
# define an arena wall object
A = arena.RoundedRectArena(length=25.5, width=16.5, ww=0.5, corner_rad=4.9)
A2 = arena.RoundedRectBarrier(length=11., width=10., ww=0.5, corner_rad=2.,
label_stub='block', edges='wn')
T2= arena.Transformation(dx=8.75, dy=-4.75)
A2.transform(T2)
rendering.render_arena(A.segs, ax=ax, fc='0.5')
rendering.render_arena(A2.segs, ax=ax, fc='r')
yaw = pi
casu_poses = []
for (x,y) in [(x,y) for x in [-9, 0, 9] for y in [-4.5, +4.5] ]:
p = arena.transforms.Point(x, y, 0)
casu_poses.append( (p, yaw))
# special setup, with only 5 - remove the last one
del casu_poses[4]
rendering.render_CASUs(casu_poses, ax=ax)
ax.set_xlim(-20, +20)
ax.set_ylim(-20, +20)
ax.set_aspect('equal')
ax.grid('on')
plt.show()
|
Read this; it didn't come from Apple though. $22B iOS -vs- $19B Android.
> > better and better specs all the time, and lots of people buy them.
> says android should be a mere afterthought. |
import sys
FILEPATH = 'B-large-practice'
sys.stdin = open(FILEPATH + '.in', 'r')
sys.stdout = open(FILEPATH + '.out', 'w')
def get_line(): return raw_input()
def get_int(): return int(get_line())
def get_ints(): return [int(x) for x in get_line().split()]
def max_two_numbers(numbers):
largest = 0
second_largest = 0
for number in numbers:
if number > largest:
second_largest = largest
largest = number
elif number > second_largest:
second_largest = number
return largest + second_largest
def max_subtree(vertex, parent):
subtree = []
children = vertex.adjacent[:]
if parent:
children.remove(parent)
if len(children) < 2:
return 1
for child in children:
subtree.append(max_subtree(child, vertex))
return 1 + max_two_numbers(subtree)
def full_binary_tree(node_count, edges):
min_deletions = float('inf')
vertices = [Vertex(i) for i in xrange(1, node_count + 1)]
for v, w in edges:
vertices[v - 1].add_adjacent(vertices[w - 1])
vertices[w - 1].add_adjacent(vertices[v - 1])
for root in vertices:
min_deletions = min(min_deletions, node_count - max_subtree(root, None))
return min_deletions
class Vertex(object):
def __init__(self, id):
self.id = id
self.adjacent = []
def add_adjacent(self, vertex):
self.adjacent.append(vertex)
def remove_adjacent(self, vertex):
self.adjacent.remove(vertex)
if __name__ == '__main__':
for case in xrange(1, get_int() + 1):
node_count = get_int()
edges = [get_ints() for _ in xrange(node_count - 1)]
print 'Case #%d: %d' % (case, full_binary_tree(node_count, edges))
|
Book reviews, author interviews, music reviews. A revue of reviews!
What’s this all about then?
Book reviewer based on the Isle Of Wight. There's more on my "About" Page. Click on the tab above the picture of the Shanklin beach huts. |
#!/usr/bin/env python
'''
File name: main_ripp_mod.py
Author: Guillaume Viejo
Date created: 16/08/2017
Python Version: 3.5.2
'''
import sys
import numpy as np
import pandas as pd
import scipy.io
from functions import *
# from pylab import *
from multiprocessing import Pool
import os
import neuroseries as nts
from time import time
from pylab import *
from functions import quickBin
from numba import jit
@jit(nopython=True)
def scalarProduct(r):
tmp = np.sqrt(np.power(r, 2).sum(1))
denom = tmp[0:-1] * tmp[1:]
num = np.sum(r[0:-1]*r[1:], 1)
return num/(denom)
@jit(nopython=True)
def quickBin(spikelist, ts, bins, index):
rates = np.zeros((len(ts), len(bins)-1, len(index)))
for i, t in enumerate(ts):
tbins = t + bins
for j in range(len(spikelist)):
a, _ = np.histogram(spikelist[j], tbins)
rates[i,:,j] = a
return rates
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
anglehd = {}
anglenohd = {}
zanglehd = {}
zanglenohd = {}
for session in datasets:
print(session)
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
rip_ep,rip_tsd = loadRipples(data_directory+session)
rip_ep = sws_ep.intersect(rip_ep)
rip_tsd = rip_tsd.restrict(sws_ep)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
####################################################################################################################
# binning data
####################################################################################################################
spikeshd = {k:spikes[k] for k in np.where(hd_info_neuron==1)[0] if k not in []}
spikesnohd = {k:spikes[k] for k in np.where(hd_info_neuron==0)[0] if k not in []}
hdneurons = np.sort(list(spikeshd.keys()))
nohdneurons = np.sort(list(spikesnohd.keys()))
bin_size = 20
n_ex = 1000
normed = True
####################################################################################################################
# MIN MAX SWS
####################################################################################################################
# mean and standard deviation during SWS
mean_sws = pd.DataFrame(index = np.sort(list(spikes.keys())), columns = ['mean', 'std'])
for n in spikes.keys():
r = []
for e in sws_ep.index:
bins = np.arange(sws_ep.loc[e,'start'], sws_ep.loc[e,'end'], bin_size*1e3)
a, _ = np.histogram(spikes[n].restrict(sws_ep.loc[[e]]).index.values, bins)
r.append(a)
r = np.hstack(r)
r = r / (bin_size*1e-3)
mean_sws.loc[n,'min']= r.min()
mean_sws.loc[n,'max']= r.max()
bins = np.arange(0, 2000+2*bin_size, bin_size) - 1000 - bin_size/2
times = bins[0:-1] + np.diff(bins)/2
ts = rip_tsd.as_units('ms').index.values
####################################################################################################################
# HD NEURONS
####################################################################################################################
if len(spikeshd) >=5:
rates = quickBin([spikeshd[j].as_units('ms').index.values for j in hdneurons], ts, bins, hdneurons)
rates = rates/float(bin_size*1e-3)
angle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
for i, r in enumerate(rates):
tmp = scalarProduct(r)
angle[i] = np.nan_to_num(tmp, 0)
zangle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
a = mean_sws.loc[hdneurons,'min'].values.astype('float')
b = mean_sws.loc[hdneurons,'max'].values.astype('float')
zrates = (rates - a) / (b-a)
for i, r in enumerate(zrates):
zangle[i] = scalarProduct(r)
# random
rnd_tsd = nts.Ts(t = np.sort(np.hstack([np.random.randint(sws_ep.loc[i,'start']+500000, sws_ep.loc[i,'end']+500000, n_ex//len(sws_ep)) for i in sws_ep.index])))
ts = rnd_tsd.as_units('ms').index.values
rates2 = quickBin([spikeshd[j].as_units('ms').index.values for j in hdneurons], ts, bins, hdneurons)
rates2 = rates2/float(bin_size*1e-3)
shuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
for i, r in enumerate(rates2):
tmp = scalarProduct(r)
shuffled[i] = np.nan_to_num(tmp, 0)
zshuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
a = mean_sws.loc[hdneurons,'min'].values.astype('float')
b = mean_sws.loc[hdneurons,'max'].values.astype('float')
zrates2 = (rates2 - a) / (b-a)
for i, r in enumerate(zrates2):
zshuffled[i] = scalarProduct(r)
anglehd[session] = (angle.mean(1) - shuffled.mean(1))/shuffled.mean(1)
zanglehd[session] = (zangle.mean(1) - zshuffled.mean(1))/zshuffled.mean(1)
####################################################################################################################
# NO HD NEURONS
####################################################################################################################
if len(spikesnohd) >=5:
rates = quickBin([spikesnohd[j].as_units('ms').index.values for j in nohdneurons], ts, bins, nohdneurons)
rates = rates/float(bin_size*1e-3)
angle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
for i, r in enumerate(rates):
tmp = scalarProduct(r)
angle[i] = np.nan_to_num(tmp, 0)
zangle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
a = mean_sws.loc[nohdneurons,'min'].values.astype('float')
b = mean_sws.loc[nohdneurons,'max'].values.astype('float')
zrates = (rates - a) / (b-a)
for i, r in enumerate(zrates):
zangle[i] = scalarProduct(r)
# random
rnd_tsd = nts.Ts(t = np.sort(np.hstack([np.random.randint(sws_ep.loc[i,'start']+500000, sws_ep.loc[i,'end']+500000, n_ex//len(sws_ep)) for i in sws_ep.index])))
ts = rnd_tsd.as_units('ms').index.values
rates2 = quickBin([spikesnohd[j].as_units('ms').index.values for j in nohdneurons], ts, bins, nohdneurons)
rates2 = rates2/float(bin_size*1e-3)
shuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
for i, r in enumerate(rates2):
tmp = scalarProduct(r)
shuffled[i] = np.nan_to_num(tmp, 0)
zshuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
a = mean_sws.loc[nohdneurons,'min'].values.astype('float')
b = mean_sws.loc[nohdneurons,'max'].values.astype('float')
zrates2 = (rates2 - a) / (b-a)
for i, r in enumerate(zrates2):
zshuffled[i] = scalarProduct(r)
anglenohd[session] = (angle.mean(1) - shuffled.mean(1))/shuffled.mean(1)
zanglenohd[session] = (zangle.mean(1) - zshuffled.mean(1))/zshuffled.mean(1)
anglehd = pd.DataFrame.from_dict(anglehd)
anglenohd = pd.DataFrame.from_dict(anglenohd)
zanglehd = pd.DataFrame.from_dict(zanglehd)
zanglenohd = pd.DataFrame.from_dict(zanglenohd)
subplot(211)
plot(anglehd.mean(1))
plot(anglenohd.mean(1))
title("Scalar product")
subplot(212)
plot(zanglehd.mean(1))
plot(zanglenohd.mean(1))
title("Scalar product + zscored")
sys.exit()
store = pd.HDFStore('../figures/figures_articles_v4/figure2/test.h5')
if normed:
store.append('anglehd_normed', anglehd)
store.append('anglenohd_normed', anglenohd)
else:
store.append('anglehd', anglehd)
store.append('anglenohd', anglenohd)
store.close()
figure()
store = pd.HDFStore('../figures/figures_articles_v4/figure2/test.h5')
subplot(2,2,1)
plot(store['anglehd'].mean(1), label = 'HD')
plot(store['anglenohd'].mean(1), label = 'non-HD')
legend()
title("Scalar Product")
subplot(2,2,2)
plot(store['pearsonhd'].mean(1), label = 'HD')
plot(store['pearsonnohd'].mean(1), label = 'non-HD')
legend()
title("Pearson Correlation")
subplot(2,2,3)
plot(store['anglehd_normed'].mean(1), label = 'HD')
plot(store['anglenohd_normed'].mean(1), label = 'non-HD')
legend()
title("Scalar Product normalized")
subplot(2,2,4)
plot(store['pearsonhd_normed'].mean(1), label = 'HD')
plot(store['pearsonnohd_normed'].mean(1), label = 'non-HD')
legend()
title("Pearson Correlation normalized")
show()
sys.exit()
anglehd = pd.DataFrame.from_dict(anglehd)
anglenohd = pd.DataFrame.from_dict(anglenohd)
plot(anglehd.mean(1), label = 'hd')
plot(anglenohd.mean(1), label = 'nohd')
legend()
show()
sys.exit()
datatosave = cPickle.load(open("/mnt/DataGuillaume/MergedData/SWR_SCALAR_PRODUCT.pickle", 'rb'))
angleall = datatosave['cosalpha']
baselineall = datatosave['baseline']
hd = pd.DataFrame()
for s in angleall.keys():
if 'hd' in list(angleall[s].keys()):
tmp1 = angleall[s]['hd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=0.5)
tmp2 = baselineall[s]['hd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=0.5)
tmp = (tmp1.mean(1) - tmp2.mean(1))/tmp2.mean(1)
hd[s.split("/")[1]] = tmp
nohd = pd.DataFrame()
for s in angleall.keys():
if 'nohd' in list(angleall[s].keys()):
tmp1 = angleall[s]['nohd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)
tmp2 = baselineall[s]['nohd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)
tmp = (tmp1.mean(1) - tmp2.mean(1))/tmp2.mean(1)
nohd[s.split("/")[1]] = tmp
data = pd.DataFrame(index = hd.index.values, columns = pd.MultiIndex.from_product([['hd', 'nohd'], ['mean', 'sem']]))
data['hd', 'mean'] = hd.mean(1)
data['hd', 'sem'] = hd.sem(1)
data['nohd', 'mean'] = nohd.mean(1)
data['nohd', 'sem'] = nohd.sem(1)
data.to_hdf("../figures/figures_articles_v4/figure2/SWR_SCALAR_PRODUCT.h5", 'w')
subplot(111)
m = hd.mean(1)
v = hd.sem(1)
plot(hd.mean(1), label = 'hd')
fill_between(hd.index.values, m+v, m-v, alpha = 0.5)
# title("Only hd")
# subplot(212)
# title("No hd")
m = nohd.mean(1)
v = nohd.sem(1)
plot(nohd.mean(1), label = 'nohd')
fill_between(nohd.index.values, m+v, m-v, alpha = 0.5)
legend()
figure()
subplot(121)
plot(hd, color = 'grey')
plot(hd.mean(1), color = 'red')
title("HD")
subplot(122)
plot(nohd, color = 'grey')
plot(nohd.mean(1), color = 'black')
title("No HD")
show()
|
That this House takes note of the unnumbered Explanatory Memorandum submitted by the Ministry of Agriculture, Fisheries and Food on 8th December 1997 relating to the fixing of total allowable catches for 1998 and certain conditions under which they may be fished; and supports the Government's intentions to negotiate the best possible fishing opportunities for British fishermen based on sustainable fisheries management, effective enforcement and the need to ensure that the regional differences of fisheries and their communities are fully recognised.
'notes the contents of the unnumbered explanatory Memorandum submitted by the Ministry of Agriculture, Fisheries and Food on 8th December 1997 outlining the basis of the negotiations for the December Fisheries Council; and supports efforts to secure the best possible sustainable fishing opportunities for the United Kingdom industry, but recognises that, whatever the Council's outcome, the Government have yet again failed Britain's fishermen through their inability to provide an EU Treaty-based solution to the question of quota hopping, any form of workable plan to enable the industry to respond to the demands of MAGP IV and a statement reflecting the Government's priorities in the forthcoming discussions on CFP reform.'.
to phase out the use of drift nets. We know that the issue of drift nets is important, but when the British fishing industry is collapsing by the day and we are told that during its presidency the UK will concentrate on drift nets, one must question the priorities.
The House divided: Ayes 153, Noes 378. |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import sys,os
import string
import math
import argparse
def read_plugmap(filename):
debug=False
file=open(filename,"r")
doc={}
intypedef=False
indices={}
indices["HOLETYPE"]=8
indices["OBJECT"]=21
indices["ra"]=9
indices["dec"]=10
indices["xfoc"]=22
indices["yfoc"]=23
objects={}
for k in indices :
objects[k]=[]
for line in file.readlines() :
line=line.strip().replace('\t',' ')
if debug :
print "line: ",line
if len(line)==0 :
continue
if line[0]=="#" :
continue
if line.find("typedef")>=0 :
intypedef=True
if debug :
print "now in typedef"
continue
if intypedef and line.find("}")>=0 :
intypedef=False
if debug :
print "end of typedef"
continue
if intypedef :
continue
if line.find("PLUGMAPOBJ")>=0 :
tmp=line.split(" ")
entries=[]
for t in tmp :
if len(t)>0 :
entries.append(t)
for k in objects.keys() :
i=indices[k]
val=entries[i]
#print k,i,val
tmp=None
try :
tmp=string.atoi(val)
except ValueError :
pass
if tmp is None :
try :
val=string.atof(val)
except ValueError :
pass
if tmp is not None :
val=tmp
objects[k].append(val)
if debug :
print "added one PLUGMAPOBJ"
continue
tmp=line.strip().split(" ")
entries=[]
for t in tmp :
if len(t)>0 :
entries.append(t)
if len(entries)>=2 :
key=entries[0]
val=entries[1]
tmp=None
try :
tmp=string.atoi(val)
except ValueError :
pass
if tmp is None :
try :
val=string.atof(val)
except ValueError :
pass
if tmp is not None :
val=tmp
doc[key]=val
if debug :
print "added doc",key,val
# convert objects into np.array
for k in objects :
objects[k]=np.array(objects[k])
return doc,objects
class OpticalDistortion() :
def __init__(self,platescale) :
self.platescale=platescale # has units
# see ~/software/platedesign/trunk/pro/plate/ad2xyfocal.pro
coef=np.array([-0.000137627, -0.00125238, 1.5447e-09,
8.23673e-08, -2.74584e-13, -1.53239e-12,
6.04194e-18, 1.38033e-17, -2.97064e-23,
-3.58767e-23])
self.achromatic_distortion_pol=np.poly1d(coef[::-1])
# see ~/software/platedesign/trunk/pro/plate/apo_rdistort.pro
mm_per_rad =platescale*180/math.pi
self.chromatic_distort_radii=np.arcsin(np.linspace(0,90,10)*math.pi/(60*180))*mm_per_rad
print "RADII=",self.chromatic_distort_radii
self.chromatic_distort_wave=np.array([5300,4000,5500,6000,8000,10000,15350,15950,16550])
nw=self.chromatic_distort_wave.size
nr=self.chromatic_distort_radii.size
self.chromatic_distort=np.array([
[0.,36.26,72.53,108.84,145.18,181.53,217.90,254.29,290.77,327.44],
[0.,-0.002,-0.003,-0.004,-0.005,-0.005,-0.005,-0.004,-0.002,0.003],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.001,0.001,0.001,0.001,0.001,0.001,0.001,0.001,-0.001],
[0.,0.001,0.003,0.003,0.004,0.004,0.004,0.003,0.002,-0.003],
[0.,0.002,0.004,0.005,0.005,0.005,0.005,0.005,0.003,-0.004],
[0.,0.003,0.006,0.007,0.008,0.008,0.008,0.008,0.004,-0.006],
[0.,0.003,0.006,0.008,0.008,0.009,0.009,0.008,0.004,-0.006],
[0.,0.004,0.006,0.008,0.009,0.009,0.009,0.008,0.004,-0.007]])
# apply scaling
scale=np.zeros((nr))
scale[1:]=self.chromatic_distort_radii[1:]/self.chromatic_distort[0,1:]
self.chromatic_distort[1:] *= scale
self.chromatic_distort[0]=0.
# sort wave
ii=np.argsort(self.chromatic_distort_wave)
self.chromatic_distort_wave=self.chromatic_distort_wave[ii]
for j in range(nr) :
self.chromatic_distort[:,j]=self.chromatic_distort[ii,j]
# in ad2xyfocal, a reference wavelength of 5000A instead of 5500A is used !!
ref_distort = np.zeros((nr))
for j in range(nr) :
ref_distort[j]=np.interp(5000,self.chromatic_distort_wave,self.chromatic_distort[:,j])
self.chromatic_distort -= ref_distort
"""
plt.plot(self.chromatic_distort_wave,self.chromatic_distort[:,-1],"o-")
ww=np.linspace(4000,8000,200)*u.angstrom
r=self.chromatic_distort_radii[-1]
dd=np.zeros((ww.size))
for i in range(ww.size) :
dd[i]=self.chromatic_distortion(r,ww[i]).to(u.mm).value
plt.plot(ww,dd,c="r")
plt.show()
"""
def chromatic_distortion(self,radius,wavelength) : # with radius and wave with units , returns delta r to be added
i=np.where(self.chromatic_distort_wave>=wavelength)[0]
if i.size == 0 :
i=1
else :
i=min(max(1,i[0]),self.chromatic_distort_radii.size-1)
dist1=np.interp(radius,self.chromatic_distort_radii,self.chromatic_distort[i-1])
dist2=np.interp(radius,self.chromatic_distort_radii,self.chromatic_distort[i])
dist=np.interp(wavelength,[self.chromatic_distort_wave[i-1],self.chromatic_distort_wave[i]],[dist1,dist2])
return dist
def distortion(self,radius,wavelength) :
return self.achromatic_distortion_pol(radius) + self.chromatic_distortion(radius,wavelength)
# same result as idlutils/goddard/pro/astro/hadec2altaz.pro
# but with adr calibrated using astropy
def hadec2altaz(ha, dec, lat, wavelength=None) : # ha,dec,lat in deg, wave in a, returns alt,az
d2r = math.pi/180.
sh = math.sin(ha*d2r)
ch = math.cos(ha*d2r)
sd = math.sin(dec*d2r)
cd = math.cos(dec*d2r)
sl = math.sin(lat*d2r)
cl = math.cos(lat*d2r)
"""
x=np.array([cd*ch,cd*sh,sd])
r=np.array([[sl,0,-cl],[0,1,0],[cl,0,sl]])
x=r.dot(x)
x0=x[0]
x1=x[1]
x2=x[2]
"""
x0 = - ch * cd * sl + sd * cl
x1 = - sh * cd
x2 = ch * cd * cl + sd * sl
r=math.sqrt(x0**2+x1**2)
az = math.atan2(-x1,-x0) /d2r
alt = math.atan2(x2,r) / d2r
if wavelength is not None :
# arcsec per unit of tan(zenith)
fact=np.interp(wavelength,[3000,3500,4000,5000,5400,6000,7000,8000],[44.166347,43.365612,42.8640697818,42.292551282,42.1507465805,41.990386,41.811009,41.695723])
alt += fact*(r/x2)/3600.
return alt,az
# exact same routine as altaz2rpa in idl, needed to get same platescale definition
def altaz2xy(alt,az,altcen,azcen,platescale) :
d2r=math.pi/180
xx= -np.sin(az*d2r) * np.sin((90-alt)*d2r)
yy= -np.cos(az*d2r) * np.sin((90-alt)*d2r)
zz= np.cos((90-alt)*d2r)
xi= -xx*np.cos(azcen*d2r) + yy*np.sin(azcen*d2r)
yi= -yy*np.cos(azcen*d2r) - xx*np.sin(azcen*d2r)
zi= zz
xl= xi
yl= yi*np.sin((90-altcen)*d2r) + zi*np.cos((90-altcen)*d2r)
zl= zi*np.sin((90-altcen)*d2r) - yi*np.cos((90-altcen)*d2r)
rfocal=np.arcsin(np.sqrt(xl**2+zl**2))/d2r*platescale
posang=np.arctan2(-xl, zl)
return rfocal*np.cos(posang),rfocal*np.sin(posang)
def hadec2xy(ha,dec,alt0,az0,crot,srot,latitude,platescale,distortion,wavelength) :
alt,az = hadec2altaz(ha,dec,latitude,wavelength)
x,y = altaz2xy(alt,az,alt0,az0,platescale)
rscale = 1
if 1 :
# Distortion, see ad2xyfocal.pro
r = np.sqrt(x**2 + y**2)
if r>0 :
rscale = 1+distortion.distortion(r,wavelength)/r
# Rotate the focal plane so that +y points towards a point that is offset from
# the plate center along DEC by +1.5 degrees.
xr = rscale*(x*crot-y*srot)
yr = rscale*(x*srot+y*crot)
return -xr,yr,alt,az
def main() :
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', type=str, default='plPlugMapP-4392.par',
help='Input plugmap filename.')
parser.add_argument('--output', type=str, default='myPlugMapP-4392.list',
help='Output filename.')
parser.add_argument('--ha', type=float, default=0,
help='Design hour angle (degrees).')
args = parser.parse_args()
filename = args.input
ofilename = args.output
ha_obs = args.ha
doc, objects = read_plugmap(filename)
ra=objects["ra"]
dec=objects["dec"]
xfoc=objects["xfoc"]
yfoc=objects["yfoc"]
ha_design=doc["ha"]
ra0=doc["raCen"]
dec0=doc["decCen"]
mjd=doc["mjdDesign"]
print "design MJD=%d HA=%f ra=%f dec=%f"%(mjd,ha_design,ra0,dec0)
# APO lat=32.7797556 in plate_refrac.pro
latitude=32.7797556
# optical distortion
# from platedesign/trunk/pro/plate/get_platescale.pro
platescale = 217.7358
distortion = OpticalDistortion(platescale)
# only reference for this wavelength I could find is in code platedesign/trunk/pro/plate/adr.pro
refwave=5400.0
gal=np.where(objects["OBJECT"]=="GALAXY")[0]
qso=np.where(objects["OBJECT"]=="QSO")[0]
star=np.where(objects["OBJECT"]=="SPECTROPHOTO_STD")[0]
na=np.where(objects["OBJECT"]=="NA")[0]
nobj=xfoc.size
wave_design=refwave*np.ones((nobj))
wave_design[gal]=5400.
wave_design[qso]=4000.
wave_design[star]=5400.
wave_obs=7450*np.ones((nobj))
wave_obs[gal]=7450. # to study r1/r2
wave_obs[qso]=7450.
wave_obs[star]=7450.
# for design
alt0_design,az0_design = hadec2altaz(ha_design, dec0, latitude, refwave)
print "Design ALT (ref wave)=",alt0_design
print "Design AZ (ref wave)=",az0_design
# rotation of plate to get vertical dec
altfid,azfid = hadec2altaz(ha_design, dec0+1.5, latitude, refwave)
xfid,yfid = altaz2xy(altfid,azfid,alt0_design,az0_design,platescale)
rotation_angle = np.arctan2(xfid,yfid)
crot_design = np.cos(rotation_angle)
srot_design = np.sin(rotation_angle)
# same for obs
alt0_obs,az0_obs = hadec2altaz(ha_obs, dec0, latitude, refwave)
print "Obs ALT (ref wave)=",alt0_obs
print "Obs AZ (ref wave)=",az0_obs
# rotation of plate to get vertical dec
altfid,azfid = hadec2altaz(ha_obs, dec0+1.5, latitude, refwave)
xfid,yfid = altaz2xy(altfid,azfid,alt0_obs,az0_obs,platescale)
rotation_angle = np.arctan2(xfid,yfid)
crot_obs = np.cos(rotation_angle)
srot_obs = np.sin(rotation_angle)
# compute, at design hour angle = ha_design
xdesign=np.zeros((nobj))
ydesign=np.zeros((nobj))
alt_design=np.zeros((nobj))
az_design=np.zeros((nobj))
# compute, at observed hour angle = ha_obs
xobs=np.zeros((nobj))
yobs=np.zeros((nobj))
alt_obs=np.zeros((nobj))
az_obs=np.zeros((nobj))
selection=range(nobj)
for o in selection :
x,y,alt,az = hadec2xy(ha_design-(ra[o]-ra0),dec[o],alt0_design,az0_design,crot_design,srot_design,latitude,platescale,distortion,wave_design[o])
xdesign[o] = x
ydesign[o] = y
alt_design[o] = alt
az_design[o] = az
x,y,alt,az = hadec2xy(ha_obs-(ra[o]-ra0),dec[o],alt0_obs,az0_obs,crot_obs,srot_obs,latitude,platescale,distortion,wave_obs[o])
xobs[o] = x
yobs[o] = y
alt_obs[o] = alt
az_obs[o] = az
file=open(ofilename,"w")
file.write("#ra dec xfoc yfoc wavedesign xdesign ydesign altdesign azdesign waveobs xobs yobs altobs azobs hole obj\n")
for o in selection :
file.write("%f %f %f %f %f %f %f %f %f %f %f %f %f %f %s %s\n"%(ra[o],dec[o],xfoc[o],yfoc[o],wave_design[o],xdesign[o],ydesign[o],alt_design[o],az_design[o],wave_obs[o],xobs[o],yobs[o],alt_obs[o],az_obs[o],objects["HOLETYPE"][o],objects["OBJECT"][o]))
file.close()
print "wrote", ofilename
if __name__ == '__main__':
main()
|
It is no astonishment that I following to amalgamation things stirring something like here. One business however that I never misused out, one that took me until the end of time to pull the put into action on, was our associates room media console. The home sweet home sofas and living port macquarie misrepresented that however; and is our absolute sideboard. A astonishing modern, coastal choose for our space. It was therefore worth the wait.
I searched and searched for months and could not find the just right replacement. They were all too dark, too bulky, too traditional, too modern and subsequently I finally found what I thought was the perfect fit. regrettably in the manner of in the room I realized the wood tone was too golden for the tune compared to the extra woods in the room. Thankfully I realized that this sideboard looked incredible in the dining room, appropriately it was not a loss but did send me back upon the hunt for our perfect media console.
Then I found The home sweet home sofas and living port macquarie and it was adore at first sight. I was so keyed up after ordering that I would stop happening having the same wood announce dilemma behind this fragment as the last, but unconditionally optimistic this one would work and perform it did!! I on the order of cried past I proverb how perfect it was for our needs. Finding the just right weathered coastal fragment that yet complimented our protester and minimal aesthetic had proven adjacent to impossible. The home sweet home sofas and living port macquarie came into my cartoon and misused that. Im in love.
This piece can of course feign as a sideboard in your dining room we just opted to utilize as a media console instead. For us it works as a result skillfully as a media console is because of the rattan doors. The rattan is thick and substantial plenty to hide the components from view. yet yet possesses plenty gapping amongst the weave that you can keep the doors closed and feign every the tech subsequently a remote. WIN. Especially for us later little impatient kiddo hands roaming going on for the house.
Overall this is just a great fragment that could rouse politely in the middle of many designs styles. From modern, to coastal, eclectic, or minimal even in a more normal home. It has appropriately many elements that lend itself to consequently many styles. The tidy lines, the texture of the rattan, the weathered finish the length of to the smooth black legs. Its just a astonishing piece that we cherish and are thrilled to have in our space. |
# -*- coding: utf-8 -*-
#
# This file is part of couchapp released under the Apache 2 license.
# See the NOTICE for more information.
from __future__ import with_statement
import base64
import logging
import mimetypes
import os
import os.path
import re
import urlparse
import webbrowser
try:
import desktopcouch
except ImportError:
desktopcouch = None
from couchapp.errors import ResourceNotFound, AppError
from couchapp.macros import package_shows, package_views
from couchapp import util
if os.name == 'nt':
def _replace_backslash(name):
return name.replace("\\", "/")
else:
def _replace_backslash(name):
return name
re_comment = re.compile("((?:\/\*(?:[^*]|(?:\*+[^*\/]))*\*+\/)|(?:\/\/.*))")
DEFAULT_IGNORE = """[
// filenames matching these regexps will not be pushed to the database
// uncomment to activate; separate entries with ","
// ".*~$"
// ".*\\\\.swp$"
// ".*\\\\.bak$"
]"""
logger = logging.getLogger(__name__)
class LocalDoc(object):
def __init__(self, path, create=False, docid=None, is_ddoc=True):
self.docdir = path
self.ignores = []
self.is_ddoc = is_ddoc
ignorefile = os.path.join(path, '.couchappignore')
if os.path.exists(ignorefile):
# A .couchappignore file is a json file containing a
# list of regexps for things to skip
with open(ignorefile, 'r') as f:
self.ignores = util.json.loads(
util.remove_comments(f.read())
)
if not docid:
docid = self.get_id()
self.docid = docid
self._doc = {'_id': self.docid}
if create:
self.create()
def get_id(self):
"""
if there is an _id file, docid is extracted from it,
else we take the current folder name.
"""
idfile = os.path.join(self.docdir, '_id')
if os.path.exists(idfile):
docid = util.read(idfile).split("\n")[0].strip()
if docid: return docid
if self.is_ddoc:
return "_design/%s" % os.path.split(self.docdir)[1]
else:
return os.path.split(self.docdir)[1]
def __repr__(self):
return "<%s (%s/%s)>" % (self.__class__.__name__, self.docdir, self.docid)
def __str__(self):
return util.json.dumps(self.doc())
def create(self):
if not os.path.isdir(self.docdir):
logger.error("%s directory doesn't exist." % self.docdir)
rcfile = os.path.join(self.docdir, '.couchapprc')
ignfile = os.path.join(self.docdir, '.couchappignore')
if not os.path.isfile(rcfile):
util.write_json(rcfile, {})
util.write(ignfile, DEFAULT_IGNORE)
else:
logger.info("CouchApp already initialized in %s." % self.docdir)
def push(self, dbs, noatomic=False, browser=False, force=False,
noindex=False):
"""Push a doc to a list of database `dburls`. If noatomic is true
each attachments will be sent one by one."""
for db in dbs:
if noatomic:
doc = self.doc(db, with_attachments=False, force=force)
db.save_doc(doc, force_update=True)
attachments = doc.get('_attachments') or {}
for name, filepath in self.attachments():
if name not in attachments:
logger.debug("attach %s " % name)
db.put_attachment(doc, open(filepath, "r"),
name=name)
else:
doc = self.doc(db, force=force)
db.save_doc(doc, force_update=True)
indexurl = self.index(db.raw_uri, doc['couchapp'].get('index'))
if indexurl and not noindex:
if "@" in indexurl:
u = urlparse.urlparse(indexurl)
indexurl = urlparse.urlunparse((u.scheme, u.netloc.split("@")[-1],
u.path, u.params, u.query, u.fragment))
logger.info("Visit your CouchApp here:\n%s" % indexurl)
if browser:
self.browse_url(indexurl)
def browse(self, dbs):
for db in dbs:
doc = self.doc()
indexurl = self.index(db.raw_uri, doc['couchapp'].get('index'))
if indexurl:
self.browse_url(indexurl)
def browse_url(self, url):
if url.startswith("desktopcouch://"):
if not desktopcouch:
raise AppError("Desktopcouch isn't available on this"+
"machine. You can't access to %s" % url)
ctx = desktopcouch.local_files.DEFAULT_CONTEXT
bookmark_file = os.path.join(ctx.db_dir, "couchdb.html")
try:
username, password = re.findall("<!-- !!([^!]+)!!([^!]+)!! -->",
open(bookmark_file).read())[-1]
except ValueError:
raise IOError("Bookmark file is corrupt."+
"Username/password are missing.")
url = "http://%s:%s@localhost:%s/%s" % (username, password,
desktopcouch.find_port(), url[15:])
webbrowser.open_new_tab(url)
def attachment_stub(self, name, filepath):
att = {}
with open(filepath, "rb") as f:
re_sp = re.compile('\s')
att = {
"data": re_sp.sub('',base64.b64encode(f.read())),
"content_type": ';'.join(filter(None,
mimetypes.guess_type(name)))
}
return att
def doc(self, db=None, with_attachments=True, force=False):
""" Function to reetrieve document object from
document directory. If `with_attachments` is True
attachments will be included and encoded"""
manifest = []
objects = {}
signatures = {}
attachments = {}
self._doc = {'_id': self.docid}
# get designdoc
self._doc.update(self.dir_to_fields(self.docdir, manifest=manifest))
if not 'couchapp' in self._doc:
self._doc['couchapp'] = {}
self.olddoc = {}
if db is not None:
try:
self.olddoc = db.open_doc(self._doc['_id'])
attachments = self.olddoc.get('_attachments') or {}
self._doc.update({'_rev': self.olddoc['_rev']})
except ResourceNotFound:
self.olddoc = {}
if 'couchapp' in self.olddoc:
old_signatures = self.olddoc['couchapp'].get('signatures',
{})
else:
old_signatures = {}
for name, filepath in self.attachments():
signatures[name] = util.sign(filepath)
if with_attachments and not old_signatures:
logger.debug("attach %s " % name)
attachments[name] = self.attachment_stub(name, filepath)
if old_signatures:
for name, signature in old_signatures.items():
cursign = signatures.get(name)
if not cursign:
logger.debug("detach %s " % name)
del attachments[name]
elif cursign != signature:
logger.debug("detach %s " % name)
del attachments[name]
else:
continue
if with_attachments:
for name, filepath in self.attachments():
if old_signatures.get(name) != signatures.get(name) or force:
logger.debug("attach %s " % name)
attachments[name] = self.attachment_stub(name, filepath)
self._doc['_attachments'] = attachments
self._doc['couchapp'].update({
'manifest': manifest,
'objects': objects,
'signatures': signatures
})
if self.docid.startswith('_design/'): # process macros
for funs in ['shows', 'lists', 'updates', 'filters',
'spatial']:
if funs in self._doc:
package_shows(self._doc, self._doc[funs], self.docdir,
objects)
if 'validate_doc_update' in self._doc:
tmp_dict = dict(validate_doc_update=self._doc[
"validate_doc_update"])
package_shows( self._doc, tmp_dict, self.docdir,
objects)
self._doc.update(tmp_dict)
if 'views' in self._doc:
# clean views
# we remove empty views and malformed from the list
# of pushed views. We also clean manifest
views = {}
dmanifest = {}
for i, fname in enumerate(manifest):
if fname.startswith("views/") and fname != "views/":
name, ext = os.path.splitext(fname)
if name.endswith('/'):
name = name[:-1]
dmanifest[name] = i
for vname, value in self._doc['views'].iteritems():
if value and isinstance(value, dict):
views[vname] = value
else:
del manifest[dmanifest["views/%s" % vname]]
self._doc['views'] = views
package_views(self._doc,self._doc["views"], self.docdir,
objects)
if "fulltext" in self._doc:
package_views(self._doc,self._doc["fulltext"], self.docdir,
objects)
return self._doc
def check_ignore(self, item):
for i in self.ignores:
match = re.match(i, item)
if match:
logger.debug("ignoring %s" % item)
return True
return False
def dir_to_fields(self, current_dir='', depth=0,
manifest=[]):
""" process a directory and get all members """
fields={}
if not current_dir:
current_dir = self.docdir
for name in os.listdir(current_dir):
current_path = os.path.join(current_dir, name)
rel_path = _replace_backslash(util.relpath(current_path, self.docdir))
if name.startswith("."):
continue
elif self.check_ignore(name):
continue
elif depth == 0 and name.startswith('_'):
# files starting with "_" are always "special"
continue
elif name == '_attachments':
continue
elif depth == 0 and (name == 'couchapp' or name == 'couchapp.json'):
# we are in app_meta
if name == "couchapp":
manifest.append('%s/' % rel_path)
content = self.dir_to_fields(current_path,
depth=depth+1, manifest=manifest)
else:
manifest.append(rel_path)
content = util.read_json(current_path)
if not isinstance(content, dict):
content = { "meta": content }
if 'signatures' in content:
del content['signatures']
if 'manifest' in content:
del content['manifest']
if 'objects' in content:
del content['objects']
if 'length' in content:
del content['length']
if 'couchapp' in fields:
fields['couchapp'].update(content)
else:
fields['couchapp'] = content
elif os.path.isdir(current_path):
manifest.append('%s/' % rel_path)
fields[name] = self.dir_to_fields(current_path,
depth=depth+1, manifest=manifest)
else:
logger.debug("push %s" % rel_path)
content = ''
if name.endswith('.json'):
try:
content = util.read_json(current_path)
except ValueError:
logger.error("Json invalid in %s" % current_path)
else:
try:
content = util.read(current_path).strip()
except UnicodeDecodeError, e:
logger.warning("%s isn't encoded in utf8" % current_path)
content = util.read(current_path, utf8=False)
try:
content.encode('utf-8')
except UnicodeError, e:
logger.warning(
"plan B didn't work, %s is a binary" % current_path)
logger.warning("use plan C: encode to base64")
content = "base64-encoded;%s" % base64.b64encode(
content)
# remove extension
name, ext = os.path.splitext(name)
if name in fields:
logger.warning(
"%(name)s is already in properties. Can't add (%(fqn)s)" % {
"name": name, "fqn": rel_path })
else:
manifest.append(rel_path)
fields[name] = content
return fields
def _process_attachments(self, path, vendor=None):
""" the function processing directory to yeld
attachments. """
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for dirname in dirs:
if self.check_ignore(dirname):
dirs.remove(dirname)
if files:
for filename in files:
if self.check_ignore(filename):
continue
else:
filepath = os.path.join(root, filename)
name = util.relpath(filepath, path)
if vendor is not None:
name = os.path.join('vendor', vendor, name)
name = _replace_backslash(name)
yield (name, filepath)
def attachments(self):
""" This function yield a tuple (name, filepath) corresponding
to each attachment (vendor included) in the couchapp. `name`
is the name of attachment in `_attachments` member and `filepath`
the path to the attachment on the disk.
attachments are processed later to allow us to send attachments inline
or one by one.
"""
# process main attachments
attachdir = os.path.join(self.docdir, "_attachments")
for attachment in self._process_attachments(attachdir):
yield attachment
vendordir = os.path.join(self.docdir, 'vendor')
if not os.path.isdir(vendordir):
logger.debug("%s don't exist" % vendordir)
return
for name in os.listdir(vendordir):
current_path = os.path.join(vendordir, name)
if os.path.isdir(current_path):
attachdir = os.path.join(current_path, '_attachments')
if os.path.isdir(attachdir):
for attachment in self._process_attachments(attachdir,
vendor=name):
yield attachment
def index(self, dburl, index):
if index is not None:
return "%s/%s/%s" % (dburl, self.docid, index)
elif os.path.isfile(os.path.join(self.docdir, "_attachments",
'index.html')):
return "%s/%s/index.html" % (dburl, self.docid)
return False
def document(path, create=False, docid=None, is_ddoc=True):
return LocalDoc(path, create=create, docid=docid, is_ddoc=is_ddoc)
|
The Division of Neonatology & Developmental Biology at UCLA Mattel Children's Hospital provides state-of-the-art treatment and support to critically ill premature and full-term newborns and infants.
The UCLA Mattel Children's Hospital is part of the Ronald Reagan UCLA Medical Center which has been rated as one of the top five medical institutions in the United States by US News and World Report. The UCLA Mattel Children's Hospital has been a leading Neonatal Center in Southern California, a major referral center for high risk mothers and neonates.
Our Neonatal Intensive Care Unit (NICU) is a 22 ICU beds unit that are regional level III/IV beds. Neonatology faculty and fellows are responsible for in-born and out-born babies requiring tertiary level care derived from the Greater Los Angeles area and the surrounding regions. There is a congenital cardiac program on-site; an ECMO program; and a Head/Body Cooling Program in the Neonatal Intensive Care Unit. There is considerable depth in all the Pediatric Medical and Surgical Subspecialties that are all available on site. A widely respected infertility and fetal diagnostic program exists at UCLA.
The Neonatal Intensive Care Unit (NICU) at the Santa Monica-UCLA Medical Center and Orthopaedic Hospital, one of the two hospital campuses of the UCLA Health System, is a 16-bed nursery which provides community-based care for term and preterm infants with a variety of medical problems.
The NICU is part of a comprehensive program of women's and children's services available at the medical center and combines the best of academic and community neonatal care. The NICU admits about 200 infants per year, 55% weighing less than 2500 grams at birth. Family-centered, individualized newborn care is provided by a team of UCLA neonatologists, neonatal-perinatal care fellows, neonatal nurses, respiratory therapists, lactation specialists, occupational therapists, physical therapists, social workers, and case managers. Neonatologists provide consultations, as requested by pediatricians, obstetricians, and perinatologists; attend high-risk deliveries; care for premature and critically ill newborns; follow NICU graduates in the Santa Monica UCLA High-Risk Infant Follow-up Clinic. Patient care in the NICU is additionally enhanced by the availability of pediatric subspecialists from the UCLA Mattel Children's Hospital. When necessary, infants are transferred to Ronald Reagan UCLA Medical Center for more specialized respiratory, surgical, and cardiac care.
Our High Risk Infant Follow-Up Clinic provides care to infants recently discharged from the NICU. |
# coding: utf-8
from __future__ import unicode_literals
import pytest
from spacy.lookups import Lookups, Table
from spacy.strings import get_string_id
from spacy.vocab import Vocab
from ..util import make_tempdir
def test_lookups_api():
table_name = "test"
data = {"foo": "bar", "hello": "world"}
lookups = Lookups()
lookups.add_table(table_name, data)
assert len(lookups) == 1
assert table_name in lookups
assert lookups.has_table(table_name)
table = lookups.get_table(table_name)
assert table.name == table_name
assert len(table) == 2
assert table["hello"] == "world"
table["a"] = "b"
assert table["a"] == "b"
table = lookups.get_table(table_name)
assert len(table) == 3
with pytest.raises(KeyError):
lookups.get_table("xyz")
with pytest.raises(ValueError):
lookups.add_table(table_name)
table = lookups.remove_table(table_name)
assert table.name == table_name
assert len(lookups) == 0
assert table_name not in lookups
with pytest.raises(KeyError):
lookups.get_table(table_name)
def test_table_api():
table = Table(name="table")
assert table.name == "table"
assert len(table) == 0
assert "abc" not in table
data = {"foo": "bar", "hello": "world"}
table = Table(name="table", data=data)
assert len(table) == len(data)
assert "foo" in table
assert get_string_id("foo") in table
assert table["foo"] == "bar"
assert table[get_string_id("foo")] == "bar"
assert table.get("foo") == "bar"
assert table.get("abc") is None
table["abc"] = 123
assert table["abc"] == 123
assert table[get_string_id("abc")] == 123
table.set("def", 456)
assert table["def"] == 456
assert table[get_string_id("def")] == 456
def test_table_api_to_from_bytes():
data = {"foo": "bar", "hello": "world", "abc": 123}
table = Table(name="table", data=data)
table_bytes = table.to_bytes()
new_table = Table().from_bytes(table_bytes)
assert new_table.name == "table"
assert len(new_table) == 3
assert new_table["foo"] == "bar"
assert new_table[get_string_id("foo")] == "bar"
new_table2 = Table(data={"def": 456})
new_table2.from_bytes(table_bytes)
assert len(new_table2) == 3
assert "def" not in new_table2
@pytest.mark.skip(reason="This fails on Python 3.5")
def test_lookups_to_from_bytes():
lookups = Lookups()
lookups.add_table("table1", {"foo": "bar", "hello": "world"})
lookups.add_table("table2", {"a": 1, "b": 2, "c": 3})
lookups_bytes = lookups.to_bytes()
new_lookups = Lookups()
new_lookups.from_bytes(lookups_bytes)
assert len(new_lookups) == 2
assert "table1" in new_lookups
assert "table2" in new_lookups
table1 = new_lookups.get_table("table1")
assert len(table1) == 2
assert table1["foo"] == "bar"
table2 = new_lookups.get_table("table2")
assert len(table2) == 3
assert table2["b"] == 2
assert new_lookups.to_bytes() == lookups_bytes
@pytest.mark.skip(reason="This fails on Python 3.5")
def test_lookups_to_from_disk():
lookups = Lookups()
lookups.add_table("table1", {"foo": "bar", "hello": "world"})
lookups.add_table("table2", {"a": 1, "b": 2, "c": 3})
with make_tempdir() as tmpdir:
lookups.to_disk(tmpdir)
new_lookups = Lookups()
new_lookups.from_disk(tmpdir)
assert len(new_lookups) == 2
assert "table1" in new_lookups
assert "table2" in new_lookups
table1 = new_lookups.get_table("table1")
assert len(table1) == 2
assert table1["foo"] == "bar"
table2 = new_lookups.get_table("table2")
assert len(table2) == 3
assert table2["b"] == 2
@pytest.mark.skip(reason="This fails on Python 3.5")
def test_lookups_to_from_bytes_via_vocab():
table_name = "test"
vocab = Vocab()
vocab.lookups.add_table(table_name, {"foo": "bar", "hello": "world"})
assert len(vocab.lookups) == 1
assert table_name in vocab.lookups
vocab_bytes = vocab.to_bytes()
new_vocab = Vocab()
new_vocab.from_bytes(vocab_bytes)
assert len(new_vocab.lookups) == 1
assert table_name in new_vocab.lookups
table = new_vocab.lookups.get_table(table_name)
assert len(table) == 2
assert table["hello"] == "world"
assert new_vocab.to_bytes() == vocab_bytes
@pytest.mark.skip(reason="This fails on Python 3.5")
def test_lookups_to_from_disk_via_vocab():
table_name = "test"
vocab = Vocab()
vocab.lookups.add_table(table_name, {"foo": "bar", "hello": "world"})
assert len(vocab.lookups) == 1
assert table_name in vocab.lookups
with make_tempdir() as tmpdir:
vocab.to_disk(tmpdir)
new_vocab = Vocab()
new_vocab.from_disk(tmpdir)
assert len(new_vocab.lookups) == 1
assert table_name in new_vocab.lookups
table = new_vocab.lookups.get_table(table_name)
assert len(table) == 2
assert table["hello"] == "world"
|
First things first. Rest In Peace, Robin Williams! I've watched you since childhood; you were one of my favorite actor/comedians. The world will be a much sadder place without you. It's a sad irony that some of the funniest comedians deal with the deepest pain and the most ferocious demons.
You know what's so sad and infuriating to me about this? For the most part, nobody gives a damn about depression or other mental health issues until someone famous is affected by it or dies as a result of it. Every day, ordinary people go through life dealing with depression and get no sympathy from people in their lives. They're told to pull themselves out of it, stop whining, get over it, or, as I was told, stop wallowing in self-pity. These folks see depression not as a disability or medical condition, but as a character defect.
Depression is real. Trust me, I've dealt with it since early childhood. It makes you doubt and hate yourself. It saps your energy. It causes you to isolate. It even compounds other disabilities that you may have.
There is a huge stigma attached to depression, so I try hard not to talk about it. I go out of my way to hide it because if people knew it, I'd be treated differently. I've already experienced that with the medical profession, almost at the cost of my life. The severe stomach pains that I'd had for years were "all in my head" until my gallbladder ruptured and I needed emergency surgery. Every physical disability I have (and I have a couple of rare ones) was attributed to my depression until some smart doctor decided to run a specific blood test or order a specific brain scan.
Now, the life of an incredibly gifted actor has been lost to this condition and all of a sudden, people give a rodent's behind about depression - at least, until the news stops covering Robin William's death. Then, we'll go back to our usual prejudice, stigmatization, snarky remarks and discriminatory behavior - until the next famous person dies. |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from abc import ABCMeta, abstractmethod
from mantidqt.utils.observer_pattern import Observer, GenericObserver
class HomeTabSubWidget:
__metaclass__ = ABCMeta
"""
Abstract base class which all sub-widgets must inherit from. This is used to
enforce a common interface so that the home tab can keep a list of sub-widgets without
specifically naming each one. Since each sub-widget shares a common model (the context)
all the home tab needs to do is instruct them to update from their own model.
"""
@abstractmethod
def update_view_from_model(self):
# update from model
pass
class HomeTabPresenter(object):
def __init__(self, view, model, subwidgets):
self._view = view
self._model = model
self._subwidgets = subwidgets
self.instrumentObserver = HomeTabPresenter.InstrumentObserver(self)
self.loadObserver = HomeTabPresenter.LoadObserver(self)
self.groupingObserver = HomeTabPresenter.GroupingObserver(self)
self.enable_observer = HomeTabPresenter.EnableWidgetObserver(self)
self.disable_observer = HomeTabPresenter.DisableWidgetObserver(self)
self.update_view_from_model_observer = GenericObserver(self.update_all_widgets)
self.update_all_widgets()
def show(self):
self._view.show()
def update_all_widgets(self):
"""
Update all widgets from the context.
"""
for subwidget in self._subwidgets:
subwidget.update_view_from_model()
def enable_home_tab(self):
self._view.setEnabled(True)
def disable_home_tab(self):
self._view.setEnabled(False)
# ------------------------------------------------------------------------------------------------------------------
# Observer / Observable
# ------------------------------------------------------------------------------------------------------------------
class InstrumentObserver(Observer):
def __init__(self, outer):
Observer.__init__(self)
self.outer = outer
def update(self, observable, arg):
self.outer.update_all_widgets()
class LoadObserver(Observer):
def __init__(self, outer):
Observer.__init__(self)
self.outer = outer
def update(self, observable, arg):
self.outer.update_all_widgets()
class GroupingObserver(Observer):
def __init__(self, outer):
Observer.__init__(self)
self.outer = outer
def update(self, observable, arg):
self.outer.update_all_widgets()
class EnableWidgetObserver(Observer):
def __init__(self, outer):
Observer.__init__(self)
self.outer = outer
def update(self, observable, arg):
self.outer.enable_home_tab()
class DisableWidgetObserver(Observer):
def __init__(self, outer):
Observer.__init__(self)
self.outer = outer
def update(self, observable, arg):
self.outer.disable_home_tab()
|
1 Owner From New With Bmw Dealer Service History (Stamps At 18,37 And k) - The Broad Stance Of An Athlete With The Elegance Of A Coupe - GPS Satellite Navigation System, Luxurious Full Leather Upholstery, Mp3 iPhone iPod USB & Aux Music Ports, Voice Control For Media Functions, Automatic Smart Key, Front and Rear Radar-Type Parking Sensors, Bluetooth Wireless SmartPhone Connectivity, DAB Digital Radio For A Myriad Of Stations To Suit Your Mood, Cruise Control, Rain Sensing Windscreen Wipers, Bi Xenon Lights, Xenon Headlights, Automatic Headlights With Dusk Sensor, 18In Alloy Wheels, Day Running Lights, Front Fog Lights, Tinted Glass, Anti Dazzle Rear View Mirror, Engine Start/Stop Fuel Saving Technology, Electric Hill Holder, Cornering Brake Control, Regenerative Brakes, Concealed Roll-Over Protection, Sports Suspension, Stability Control System, Electronic Brake Distribution, Hydraulic Emergency Brake Assist, Isofix, Flat Tyre Indicator, Dual-Zone Climate Control, Front And Rear Electric Windows, Radio/CD Player With MP3 Compatibility, Euro 6 Compliant Emissions, All our vehicles may have previously been used for business purposes. Please check in store for the specific history of this car., RAC Approved & Price Checked Daily For Guaranteed Value. PX welcome. Please Note. Deposits up to 500 can be paid through debit or credit card. All balance payments are via bank transfer in full, in-store finance or GBP up to 7,000 limit. This vehicle is based at our Bristol branch (BS5 9PJ). Buy with Confidence. Carbase, the largest independent used car dealer in the South West.
1 Owner From New With Bmw Dealer Service History (Stamps At 19 And 39k) - The Broad Stance Of An Athlete With The Elegance Of A Coupe - GPS Satellite Navigation System, Luxurious Full Leather Upholstery, Mp3 iPhone iPod USB & Aux Music Ports, Automatic Smart Key, Front and Rear Radar-Type Parking Sensors, Bluetooth Wireless SmartPhone Connectivity, DAB Digital Radio For A Myriad Of Stations To Suit Your Mood, Cruise Control, Rain Sensing Windscreen Wipers, Bi Xenon Lights, Xenon Headlights, Automatic Headlights With Dusk Sensor, 18In Alloy Wheels, Day Running Lights, Front Fog Lights, Tinted Glass, Anti Dazzle Rear View Mirror, Engine Start/Stop Fuel Saving Technology, Electric Hill Holder, Cornering Brake Control, Regenerative Brakes, Stability Control System, Electronic Brake Distribution, Hydraulic Emergency Brake Assist, Isofix, Flat Tyre Indicator, Dual-Zone Climate Control, Front Electric Windows, Radio/CD Player With MP3 Compatibility, All our vehicles may have previously been used for business purposes. Please check in store for the specific history of this car., RAC Approved & Price Checked Daily For Guaranteed Value. PX welcome. Please Note. Deposits up to 500 can be paid through debit or credit card. All balance payments are via bank transfer in full, in-store finance or GBP up to 7,000 limit. This vehicle is based at our Weston-super-Mare branch (BS22 8NA). Buy with Confidence. Carbase, the largest used car dealer in the South West.
1 Owner From New - The Broad Stance Of An Athlete With The Elegance Of A Coupe - GPS Satellite Navigation System, Luxurious Full Leather Upholstery, Mp3 iPhone iPod USB & Aux Music Ports, Automatic Smart Key, Front and Rear Radar-Type Parking Sensors, Bluetooth Wireless SmartPhone Connectivity, DAB Digital Radio For A Myriad Of Stations To Suit Your Mood, Cruise Control, Rain Sensing Windscreen Wipers, Bi Xenon Lights, Xenon Headlights, Automatic Headlights With Dusk Sensor, 18In Alloy Wheels, Day Running Lights, Front Fog Lights, Tinted Glass, Anti Dazzle Rear View Mirror, Engine Start/Stop Fuel Saving Technology, Electric Hill Holder, Cornering Brake Control, Regenerative Brakes, Stability Control System, Electronic Brake Distribution, Hydraulic Emergency Brake Assist, Isofix, Flat Tyre Indicator, Dual-Zone Climate Control, Front Electric Windows, Radio/CD Player With MP3 Compatibility, All our vehicles may have previously been used for business purposes. Please check in store for the specific history of this car., RAC Approved & Price Checked Daily For Guaranteed Value. PX welcome. Please Note. Deposits up to 500 can be paid through debit or credit card. All balance payments are via bank transfer in full, in-store finance or GBP up to 7,000 limit. This vehicle is based at our Weston-super-Mare branch (BS22 8NA). Buy with Confidence. Carbase, the largest used car dealer in the South West.
1 Owner From New With Bmw Dealer Service History (Stamps At 15 And 30k) - The Broad Stance Of An Athlete With The Elegance Of A Coupe - £1435 Of Fitted Extras Including Electric Towbar, Front Lumbar Support, Wind Deflector, Extended Storage And Finished In Alpine White Paint - GPS Satellite Navigation System, Luxurious Full Leather Upholstery, Mp3 iPhone iPod USB & Aux Music Ports, Automatic Smart Key, Front and Rear Radar-Type Parking Sensors, Bluetooth Wireless SmartPhone Connectivity, DAB Digital Radio For A Myriad Of Stations To Suit Your Mood, Cruise Control, Rain Sensing Windscreen Wipers, Bi Xenon Lights, Xenon Headlights, Automatic Headlights With Dusk Sensor, 18In Alloy Wheels, Day Running Lights, Front Fog Lights, Tinted Glass, Anti Dazzle Rear View Mirror, Engine Start/Stop Fuel Saving Technology, Electric Hill Holder, Cornering Brake Control, Regenerative Brakes, Concealed Roll-Over Protection, Sports Suspension, Stability Control System, Electronic Brake Distribution, Hydraulic Emergency Brake Assist, Isofix, Flat Tyre Indicator, Dual-Zone Climate Control, Front And Rear Electric Windows, Radio/CD Player With MP3 Compatibility, Euro 6 Compliant Emissions, All our vehicles may have previously been used for business purposes. Please check in store for the specific history of this car., RAC Approved & Price Checked Daily For Guaranteed Value. PX welcome. Please Note. Deposits up to 500 can be paid through debit or credit card. All balance payments are via bank transfer in full, in-store finance or GBP up to 7,000 limit. This vehicle is based at our Bristol branch (BS5 9PJ). Buy with Confidence. Carbase, the largest independent used car dealer in the South West.
1 Owner From New With Bmw Dealer Service History (Stamps At 22 And 42k) - The Broad Stance Of An Athlete With The Elegance Of A Coupe - £745 Of Fitted Extras Including Brushed Aluminium Interior Trim With Black High Gloss Highlight And Finished In Black Sapphire Metallic Paint - GPS Satellite Navigation System, Luxurious Full Leather Upholstery, Mp3 iPhone iPod USB & Aux Music Ports, Voice Control For Media Functions, Automatic Smart Key, Front and Rear Radar-Type Parking Sensors, Bluetooth Wireless SmartPhone Connectivity, DAB Digital Radio For A Myriad Of Stations To Suit Your Mood, Cruise Control, Rain Sensing Windscreen Wipers, Automatic Headlights With Dusk Sensor, 18In Alloy Wheels, Day Running Lights, Front Fog Lights, Tinted Glass, Anti Dazzle Rear View Mirror, Engine Start/Stop Fuel Saving Technology, Electric Hill Holder, Cornering Brake Control, Sports Suspension, Stability Control System, Electronic Brake Distribution, Hydraulic Emergency Brake Assist, Isofix, Flat Tyre Indicator, Dual-Zone Climate Control, Front Electric Windows, Radio/CD Player With MP3 Compatibility, Euro 6 Compliant Emissions, All our vehicles may have previously been used for business purposes. Please check in store for the specific history of this car., RAC Approved & Price Checked Daily For Guaranteed Value. PX welcome. Please Note. Deposits up to 500 can be paid through debit or credit card. All balance payments are via bank transfer in full, in-store finance or GBP up to 7,000 limit. This vehicle is based at our Bristol branch (BS5 9PJ). Buy with Confidence. Carbase, the largest independent used car dealer in the South West.
1 Owner From New With Bmw Dealer Service History (Stamps At 19,39 And 61k) - The Broad Stance Of An Athlete With The Elegance Of A Coupe - Finished In £535 Extra Black Metallic Paint - BMW Professional Multimedia System (Advanced GPS Satellite Navigation System With Full Black Panel Display), Ultra Smooth And Responsive 8 Speed Automatic Transmission, 18 Inch M Double Spoke Alloy Wheels, Heated Front Seats, Loudspeaker System, Headlight Wash System, Dual Zone Automatic Air Conditioning, DAB Digital Radio, Bluetooth Wireless SmartPhone Connectivity With USB Audio Interface, M Sport Suspension, Anthracite Headlining, Start/Stop Fuel Saving Technology, Radio With CD Player, Auxiliary Input And Mp3 Compatibility, Luxurious Dakota Leather Upholstery, Electrically Heated And Adjsutable Door Mirrors, Front And Rear Radar Type Parking Sensors, Front Gog Lights, Heated Windscreen Washer Jets, All our vehicles may have previously been used for business purposes. Please check in store for the specific history of this car., RAC Approved & Price Checked Daily For Guaranteed Value. PX welcome. Please Note. Deposits up to 500 can be paid through debit or credit card. All balance payments are via bank transfer in full, in-store finance or GBP up to 7,000 limit. This vehicle is based at our Bristol branch (BS5 9PJ). Buy with Confidence. Carbase, the largest independent used car dealer in the South West.
1 Owner From New With Service History (Stamps At 16,32,48 And 65k) - The Broad Stance Of An Athlete With The Elegance Of A Coupe - 18In Alloy Wheels, Air Conditioning, Dual-Zone Climate Control, Bluetooth Wireless Smartphone Connectivity, Cruise Control, Dab Digital Radio For A Myriad Of Stations To Suit Your Mood, Front And Rear Electric Windows, Hill Holding Assist, Front And Rear Radar-Type Parking Sensors, Rain Sensing Windscreen Wipers, Heated Front Seats, Flat Tyre Indicator, All our vehicles may have previously been used for business purposes. Please check in store for the specific history of this car., RAC Approved & Price Checked Daily For Guaranteed Value. PX welcome. Please Note. Deposits up to 500 can be paid through debit or credit card. All balance payments are via bank transfer in full, in-store finance or GBP up to 7,000 limit. This vehicle is based at our Bristol branch (BS5 9PJ). Buy with Confidence. Carbase, the largest independent used car dealer in the South West.
1 Owner From New With Bmw Dealer Service History (Stamps At 18,35,53 And 71k) - The Broad Stance Of An Athlete With The Elegance Of A Coupe - BMW Professional GPS Satellite Navigation System, Air Conditioning, Dual-Zone Climate Control, Engine Start/Stop Fuel Saving Technology, Cruise Control, Dab Digital Radio For A Myriad Of Stations To Suit Your Mood, Front And Rear Radar-Type Parking Sensors, Heated Front Seats, 18In Alloy Wheels, Radio/Cd Player With Mp3 Compatibility, Bluetooth Wireless Smartphone Connectivity, Luxurious Full Leather Upholstery, Front And Rear Electric Windows, Tinted Glass, Hill Holding Assist, Rain Sensing Windscreen Wipers, Flat Tyre Indicator, All our vehicles may have previously been used for business purposes. Please check in store for the specific history of this car., RAC Approved & Price Checked Daily For Guaranteed Value. PX welcome. Please Note. Deposits up to 500 can be paid through debit or credit card. All balance payments are via bank transfer in full, in-store finance or GBP up to 7,000 limit. This vehicle is based at our Bristol branch (BS5 9PJ). Buy with Confidence. Carbase, the largest independent used car dealer in the South West.
1 Owner From New With Service History (Stamps At 24,33 And 49k Miles) - The Broad Stance Of An Athlete With The Elegance Of A Coupe - Fitted With £1550 Extra Ultra Smooth And Responsive 8 Speed Automatic Transmission And Finished In Alpine White Paint - GPS Satellite Navigation System, Luxurious Full Leather Upholstery, Mp3 iPhone iPod USB & Aux Music Ports, Automatic Smart Key, Front and Rear Radar-Type Parking Sensors, Bluetooth Wireless SmartPhone Connectivity, DAB Digital Radio For A Myriad Of Stations To Suit Your Mood, Cruise Control, Rain Sensing Windscreen Wipers, Bi Xenon Lights, Xenon Headlights, Automatic Headlights With Dusk Sensor, 18In Alloy Wheels, Day Running Lights, Front Fog Lights, Tinted Glass, Anti Dazzle Rear View Mirror, Engine Start/Stop Fuel Saving Technology, Electric Hill Holder, Cornering Brake Control, Regenerative Brakes, Stability Control System, Electronic Brake Distribution, Hydraulic Emergency Brake Assist, Isofix, Flat Tyre Indicator, Dual-Zone Climate Control, Front Electric Windows, Radio/CD Player With MP3 Compatibility, Euro 6 Compliant Emissions, All our vehicles may have previously been used for business purposes. Please check in store for the specific history of this car., RAC Approved & Price Checked Daily For Guaranteed Value. PX welcome. Please Note. Deposits up to 500 can be paid through debit or credit card. All balance payments are via bank transfer in full, in-store finance or GBP up to 7,000 limit. This vehicle is based at our Bristol branch (BS5 9PJ). Buy with Confidence. Carbase, the largest independent used car dealer in the South West.
The BMW 4 Series coupe is a class act. Many coupes aren't practical for everyday use, especially those with just two seats, but this is different. The 4 Series blows its competitors out of the water, as this four-seater model manages to offer plenty of space, yet still manages to feel sporty and fun.
A powerful engine and responsive handling make for a great ride, whether you're the driver or a passenger. You'll love the interior too - in typical BMW style, only the best materials are used, so everything looks and feels stylish.
If you'd like to find out more about the 4 Series, or want to take one for a test drive, please get in touch - our experts will be more than happy to help. |
import hashlib, random
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from geoalchemy2 import Geometry
db = SQLAlchemy()
class User(db.Model, UserMixin):
__tablename__ = "user"
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(32))
password = db.Column(db.String())
email = db.Column(db.String())
api_key = db.Column(db.String(64))
submitted = db.relationship('Post', backref='author', lazy='dynamic')
pings = db.relationship('Ping', backref='author', lazy='dynamic')
def __init__(self, username, password, email):
self.username = username
self.email = email
self.set_password(password)
self.new_api_key()
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, value):
return check_password_hash(self.password, value)
def new_api_key(self):
self.api_key = hashlib.sha224(str(random.getrandbits(256)).encode('utf-8')).hexdigest()
def is_authenticated(self):
if isinstance(self, AnonymousUserMixin):
return False
else:
return True
def is_active(self):
return True
def is_anonymous(self):
if isinstance(self, AnonymousUserMixin):
return True
else:
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User %r>' % self.username
class Post(db.Model):
""" A post containing location data """
__tablename__ = "post"
id = db.Column(db.Integer, primary_key=True)
post_type = db.Column(db.String(32), nullable=False)
title = db.Column(db.String(256), nullable=False)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow(), nullable=False)
loc = db.Column(Geometry('POINT'), nullable=False)
latitude = db.Column(db.Float, default=43.165556, nullable=False)
longitude = db.Column(db.Float, default=-77.611389, nullable=False)
private = db.Column(db.Boolean, default=False, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True, nullable=False)
__mapper_args__ = {'polymorphic_on': post_type }
def get_id(self):
return self.id
def get_location(self):
return self.loc
def __repr__(self):
return '<Post {0}>'.format(self.title)
class TextPost(Post):
""" A blog post """
__mapper_args__ = {'polymorphic_identity': 'text'}
text = db.Column(db.Text)
class ImagePost(Post):
""" An image post """
__mapper_args__ = {'polymorphic_identity': 'image'}
large = db.Column(db.Text)
medium = db.Column(db.Text)
small = db.Column(db.Text)
caption = db.Column(db.String(512))
class Ping(db.Model):
__tablename__ = "ping"
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow())
accuracy = db.Column(db.Float, default=100.0, nullable=False)
loc = db.Column(Geometry('POINT'))
|
Daniel „Hank“ Wunn and Julian „Clyde“ Fricke, friends from child days on, cupeled up, to form a manifested duo, breaking the clean rules and visions one would imagine in their „cocktail“ of architecture, typography, abstract paintings and vandalism.
The working process of the duo often looks like an intense battle of believes and technique, what surprisingly ends in an almost calm, confident and reserved composition, where the viewer will not be able to find the two sides but rather the perfect disaster of a duo clashing.
In the end the viewer always finds references to the own urban surrounding of the city, as well as art historian beliefs and views, mixed up with the urge to paint.
If you see works of the duo, you never get the feeling of pressure or a force to success, it is more a huge playground, that these two grown up`s attend in a repetitive manner. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script identifies when the bug is fixed, i.e., when the state of the bug
becomes "RESOLVED", "VERIFIED", or "CLOSED".
At first, this script extracts info. about transitions of the state of the bug.
This is done by parsing the history of the bug.
After that, this script identifies when the bug is fixed.
input: A file that records bug history (not bug report). It is obtained by using Masaki's script.
output: bug ID and the date that the bug is fixed.
"""
import os
import sys
import commands
import subprocess
import re
from lxml import html
def str_format(src):
src = src.replace("\n", "")
src = src.replace(" ", "")
return src
def extraction(file_name):
html_string = open(file_name).read()
tree = html.fromstring(html_string)
# check if the activity exists
try:
presence = tree.xpath('//*[@id="bugzilla-body"]/p[2]/text()')
if presence[0] == "\n No changes have been made to this bug yet.\n ":
match = re.findall(r'[0-9]+', file_name)
bug_id = match[0]
return_list = []
return_list.append(bug_id)
return_list.append("-1")
return return_list
except IndexError:
presence = tree.xpath('//*[@id="error_msg"]/text()[1]')
if "You are not authorized to access bug" in presence:
match = re.findall(r'[0-9]+', file_name)
bug_id = match[0]
return_list = []
return_list.append(bug_id)
return_list.append("-1")
return return_list
tr_len = len(tree.xpath('//*[@id="bugzilla-body"]/table/tr'))
if tr_len == 0:
return
fixed_date = ""
for tr_num in range(2, tr_len+1):
activity_detail = {"who":"", "when":"", "what":"", "removed":"", "added":""}
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td'
td_len = len(tree.xpath(xpath))
if td_len == 3:
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[1]/text()'
activity_detail["what"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[2]/text()'
activity_detail["removed"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[3]/text()'
activity_detail["added"] = str_format(tree.xpath(xpath)[0])
activity_detail["when"] = when
if td_len == 5:
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[1]/text()'
activity_detail["who"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[2]/text()'
activity_detail["when"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[3]/text()'
activity_detail["what"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[4]/text()'
activity_detail["removed"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[5]/text()'
activity_detail["added"] = str_format(tree.xpath(xpath)[0])
when = activity_detail["when"]
if (activity_detail["what"] == "Status"):
if (activity_detail["added"] == "REOPENED"):
fixed_date = ""
elif (activity_detail["added"] == "RESOLVED") or (activity_detail["added"] == "VERIFIED") or (activity_detail["added"] == "CLOSED"):
r = re.compile("\d{4}-\d{2}-\d{2}")
fixed_date = r.findall(activity_detail["when"])[0]
fixed_date = fixed_date.replace("-", "/")
r = re.compile("\d{2}:\d{2}")
fixed_time = r.findall(activity_detail["when"])[0]
fixed_date = fixed_date + " " + fixed_time
match = re.findall(r'[0-9]+', file_name)
bug_id = match[0]
return_list = []
return_list.append(bug_id)
if fixed_date == "":
return_list.append("-1")
else:
return_list.append(fixed_date)
return return_list
|
Example Resume Formats Impressive Service Industry Resume Examples Service Industry Resume Examples was posted in September 12 2018 at 3:31 am and has been seen by 34 users. Example Resume Formats Impressive Service Industry Resume Examples Service Industry Resume Examples is best picture that can use for individual and noncommercial purpose because All trademarks referenced here in are the properties of their respective owners. If you want to see other picture of Example Resume Formats Impressive Service Industry Resume Examples Service Industry Resume Examples you can browse our other pages and if you want to view image in full size just click image on gallery below.
Resumes Formats Inspiration Sample Resume Of Engineering Director Sample Resume High School Student No Job Experience For Students Professional Resume Objective Samples John J Jobseeker Top Gallery of comments general office clerk resume free samples Gallery Of Resume References Format Example Awesome Reference Sheet Resume Reference Page Template Example For List Professional Format Service Industry Resume Examples Service Industry Resume Examples Resume examples word format baxrayder Free Student Resume Templates httpwwwresumecareerfree Sample Resume Format For Freshers Software Engineers Latest Example Resume Formats. |
from os import listdir, rename, walk
from os.path import isfile, join, basename, abspath, dirname
import sys
import hashlib
def sha1OfFile(filepath):
filepath = abspath(filepath)
with open(filepath, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
if sys.argv[1][len(sys.argv[1]) - 1] == '/':
sys.argv[1] = sys.argv[1][:-1]
if sys.argv[2][len(sys.argv[2]) - 1] == '/':
sys.argv[2] = sys.argv[2][:-1]
original_files = {}
for root, subFolders, files in walk(sys.argv[1]):
for f in files:
p = join(root, f)
f = abspath(p)
original_files[sha1OfFile(f)] = basename(f)
found = 0;
not_found = 0;
for root, subFolders, files in walk(sys.argv[2]):
for f in files:
p = join(root, f)
f = abspath(p)
if isfile(join(sys.argv[2], f)):
if sha1OfFile(f) in original_files:
found += 1
rename(f, dirname(abspath(f)) + '/' + original_files[sha1OfFile(f)])
else:
not_found += 1
print 'Total original files: ' + str(len(original_files))
print 'Total recovered files found: ' + str(found)
print 'Total not recovered files found: ' + str(not_found)
|
Bank of Baroda has its branch in Choubey Colonyraipur, Raipur in Chhattisgarh. The IFSC code of Bank of Baroda Choubey Colonyraipur branch is BARB0CHOUBE. The Branch code allotted to Choubey Colonyraipur Branch of Bank of Baroda is Last six characters of IFSC Code.
What is the IFSC Code of Bank of Baroda Choubey Colonyraipur, Raipur, Chhattisgarh?
The IFSC Code of Bank of Baroda, Choubey Colonyraipur, is BARB0CHOUBE. The Bank of Baroda Choubey Colonyraipur Branch is located in Raipur District of Chhattisgarh State in India. Now you can check IFSC code of Bank of Baroda Choubey Colonyraipur with the help of Finacbooks website.
What is the MICR Number of Bank of Baroda Choubey Colonyraipur, Raipur, Chhattisgarh?
The MICR number of Bank of Baroda Choubey Colonyraipur, Raipur is 492012014.
What is the Branch Code of Bank of Baroda Choubey Colonyraipur, Raipur, Chhattisgarh?
The Branch Code of Bank of Baroda Choubey Colonyraipur, Raipur is Last six characters of IFSC Code.
Services Offered at Bank of Baroda Choubey Colonyraipur, Raipur, Chhattisgarh?
You can reach out to customer care team of Bank of Baroda via phone or email or by direct visit to Bank of Baroda, Choubey Colonyraipur, Raipur, Chhattisgarh. Please click here to check Bank of Baroda Customer care number.
How to Check IFSC code of Bank of Baroda Choubey Colonyraipur, Raipur online? |
# script to calculate trailed FT of the lightcurve
import astronomy as ast
import pylab as pl
import scipy.stats as sci
#X2 = pl.load('run2_flat.dat')
#X = pl.load('ec2117ans_2_cc.dat')
X = pl.load('S7651_FF.dat')
x = X[:,0]
y = X[:,1]
z = X[:,2]
# original lightcurve
#z = X2[:,2] + X2[:,1]
N = len(x)
fitlength = 100
#x -= int(x[0])
# ephemeris
T0 = 2453964.3307097
P = 0.1545255
#x = (x - T0) / P
ft = []
date = []
peaks = []
f0 = 3000
f1 = 4000
for i in range(0,N,int(fitlength/2.0)):
if i + fitlength/2.0 <= len(x):
print 'somewhere'
date.append(pl.average(x[i:i + fitlength]))
f,a = ast.signal.dft(x[i:i+fitlength],y[i:i+fitlength],f0,f1,1)
ft.append(a)
#sort,argsort = sci.fastsort(a)
#peaks.append(f[argsort[-1]])
# / len(x[i:i+fitlength]))
print i, i+fitlength
else:
print 'finally'
#x = fitwave(y[i:len(t)+1],t[i:len(t)+1],freq)
f,a = ast.signal.dft(x[i:len(x)+1],y[i:len(x)+1],f0,f1,1)
ft.append(a)
#sort,argsort = sci.fastsort(a)
#peaks.append(f[argsort[-1]])
date.append(pl.average(x[i:-1]))# / len(x[i:-1]))
print i
print '\n\n\n\n',N
print pl.shape(ft)
#pl.figure(figsize=(8,4))
pl.subplots_adjust(hspace=0.001)
ax1=pl.subplot(211)
# calculate phase
x = (x - T0) / P
date = (pl.array(date) - T0) / P
pl.scatter(x,y+z,marker='o',s=0.1)
#yl = pl.ylim()
#pl.ylim(yl[1],yl[0])
pl.xlim(date[0],date[-1])
pl.ylabel('Intensity')
yt = pl.yticks()
pl.yticks(yt[0][1:-1])
pl.subplot(212)
#im = pl.imshow(pl.array(ft).transpose(),aspect='auto',interpolation='bilinear',extent=(date[0],date[-1],f1,f0,),cmap=pl.cm.jet)
levels=pl.arange(0.000,0.00000005,0.000000001)
im = pl.contourf(pl.array(ft).transpose(),levels=levels,extent=(date[0],date[-1],f0,f1),cmap=pl.cm.jet)
pl.colorbar(im,orientation='horizontal',shrink=1.0)
#pl.xlabel('HJD (+2453965)')
pl.xlabel('Orbital Phase')
pl.ylabel('Frequency (cycles/day)')
yt = pl.yticks()
pl.yticks(yt[0][1:-1])
pl.subplots_adjust(bottom=0.24,right=0.98,left=0.15)
xticklabels = ax1.get_xticklabels()
pl.setp(xticklabels, visible=False)
#pl.figure()
#pl.plot(date,peaks,'-')
##im = pl.contourf(pl.array(ft).transpose(),levels=levels,extent=(date[0],date[-1],f1,f0),cmap=pl.cm.jet,origin='lower')
pl.show()
|
A set of five handmade lampworked glass beads in pale blue with pink speckles, they have a matt finish. The beads measure 7-9mm wide; they have holes that are approximately 1mm. Handmade by me in my home studio. These beads have been properly kiln annealed for strength and durability. |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
import base64
import hashlib
import pickle
import time
import uuid
from abc import ABCMeta, abstractmethod
from collections import defaultdict
_MetaDatabase = ABCMeta('_MetaDatabase', (object,), {})
class ABCDatabase(_MetaDatabase):
class Conflict(Exception):
"""Raises in case of conflict updates"""
class NotFound(Exception):
"""Raises in case attempt to query on missed document"""
def __init__(self, name):
self._name = name
self._start_time = int(time.time() * 10**6)
self._update_seq = 0
@property
def name(self):
"""Returns database symbolic name as string"""
return self._name
@property
def start_time(self):
"""Returns database start time in microseconds"""
return self._start_time
@property
def update_seq(self):
"""Returns current update sequence value"""
return self._update_seq
def info(self):
"""Returns database information object as dict"""
return {
'db_name': self.name,
'instance_start_time': str(self.start_time),
'update_seq': self.update_seq
}
@abstractmethod
def contains(self, idx, rev=None):
"""Verifies that document with specified idx exists"""
@abstractmethod
def check_for_conflicts(self, idx, rev):
"""Check that specified idx and rev provides no conflicts
or raises Conflict exception otherwise"""
@abstractmethod
def load(self, idx, rev=None):
"""Returns document by specified idx"""
@abstractmethod
def store(self, doc, rev=None):
"""Creates document or updates if rev specified"""
@abstractmethod
def remove(self, idx, rev):
"""Removes document by specified idx and rev"""
@abstractmethod
def revs_diff(self, idrevs):
"""Returns missed revisions for specified id - revs mapping"""
@abstractmethod
def bulk_docs(self, docs, new_edits=True):
"""Bulk update docs"""
@abstractmethod
def ensure_full_commit(self):
"""Ensures that all changes are actually stored on disk"""
@abstractmethod
def changes(self, since=0, feed='normal', style='all_docs', filter=None):
"""Ensures that all changes are actually stored on disk"""
@abstractmethod
def add_attachment(self, doc, name, data, ctype='application/octet-stream'):
"""Adds attachment to specified document"""
class MemoryDatabase(ABCDatabase):
def __init__(self, *args, **kwargs):
super(MemoryDatabase, self).__init__(*args, **kwargs)
self._docs = {}
self._changes = {}
def _new_rev(self, doc):
oldrev = doc.get('_rev')
if oldrev is None:
seq, _ = 0, None
else:
seq, _ = oldrev.split('-', 1)
seq = int(seq)
sig = hashlib.md5(pickle.dumps(doc)).hexdigest()
newrev = '%d-%s' % (seq + 1, sig)
return newrev.lower()
def check_for_conflicts(self, idx, rev):
if self.contains(idx):
if rev is None:
if idx.startswith('_local/'):
return
raise self.Conflict('Document update conflict')
elif not self.contains(idx, rev):
raise self.Conflict('Document update conflict')
elif rev is not None:
raise self.Conflict('Document update conflict')
def contains(self, idx, rev=None):
if idx not in self._docs:
return False
doc = self._docs[idx]
if rev is None:
return not doc.get('_deleted', False)
return self._docs[idx]['_rev'] == rev
def load(self, idx, rev=None):
if not self.contains(idx, rev):
raise self.NotFound(idx)
return self._docs[idx]
def store(self, doc, rev=None, new_edits=True):
if '_id' not in doc:
doc['_id'] = str(uuid.uuid4()).lower()
if rev is None:
rev = doc.get('_rev')
idx = doc['_id']
if new_edits:
self.check_for_conflicts(idx, rev)
doc['_rev'] = self._new_rev(doc)
else:
assert rev, 'Document revision missed'
doc['_rev'] = rev
idx, rev = doc['_id'], doc['_rev']
self._docs[idx] = doc
self._update_seq += 1
self._changes[idx] = self._update_seq
return idx, rev
def remove(self, idx, rev):
if not self.contains(idx):
raise self.NotFound(idx)
elif not self.contains(idx, rev):
raise self.Conflict('Document update conflict')
doc = {
'_id': idx,
'_rev': rev,
'_deleted': True
}
return self.store(doc, rev)
def revs_diff(self, idrevs):
res = defaultdict(dict)
for idx, revs in idrevs.items():
missing = []
if not self.contains(idx):
missing.extend(revs)
res[idx]['missing'] = missing
continue
doc = self._docs[idx]
for rev in revs:
if doc['_rev'] != rev:
missing.append(rev)
if missing:
res[idx]['missing'] = missing
return res
def bulk_docs(self, docs, new_edits=True):
res = []
for doc in docs:
try:
idx, rev = self.store(doc, None, new_edits)
res.append({
'ok': True,
'id': idx,
'rev': rev
})
except Exception as err:
res.append({'id': doc.get('_id'),
'error': type(err).__name__,
'reason': str(err)})
return res
def ensure_full_commit(self):
return {
'ok': True,
'instance_start_time': self.info()['instance_start_time']
}
def changes(self, since=0, feed='normal', style='all_docs', filter=None):
changes = sorted(self._changes.items(), key=lambda i: i[1])
if since:
for idx, seq in changes:
if since <= seq:
yield self.make_event(idx, seq)
break
for idx, seq in changes:
yield self.make_event(idx, seq)
def add_attachment(self, doc, name, data, ctype='application/octet-stream'):
atts = doc.setdefault('_attachments')
digest = 'md5-%s' % base64.b64encode(hashlib.md5(data).digest()).decode()
if doc.get('_rev'):
revpos = int(doc['_rev'].split('-')[0]) + 1
else:
revpos = 1
atts[name] = {
'data': data,
'digest': digest,
'length': len(data),
'content_type': ctype,
'revpos': revpos
}
def make_event(self, idx, seq):
doc = self._docs[idx]
event = {
'id': idx,
'changes': [{'rev': doc['_rev']}],
'seq': seq
}
if doc.get('_deleted'):
event['_deleted'] = True
return event
|
While flying to your new destination is no doubt a time-saving option, having your car so you can drive around in a new city is a significant convenience. The best solution? Hiring the services of a reliable company for auto shipping to and from Meskegon.
We offer auto shipping to and from Meskegon for a range of vehicles including motorcycle, truck, car, or boat in all size specifications. By choosing to work with us, you can expect highly efficient car transport services. Our company is well known for its dedication and exceptional services that customers rely on and appreciate.
By choosing Freedom Auto Transport, you’re choosing complete assurance that the auto shipping to and from Meskegon will be conducted with complete care and that your vehicle will arrive in the same condition as you entrusted it to us. Rely on streamlined, hassle-free, and punctual auto transport.
At Freedom Auto Transport, we value your time and money. Our focus is on answering your queries in the shortest time possible. Accordingly, we have for you a choice of inquiry channels for auto shipping to and from Meskegon.
When looking for auto shipping to and from Meskegon, add the word, “transport” in the internet forums of collectible car owner websites. By adding all the relevant details correctly, you can find the exact estimate of the costs of transportation. Reputable companies have efficient, interactive websites with updated information to give you accurate costs with no room for errors.
When asking for information about the auto shipping to and from Meskegon, ask for the insurance coverage they have for keeping your vehicle safe on the road. A legitimate company will guarantee the safety of your car with adequate and updated coverage.
At Freedom Auto Transport, our focus is on speedy and economical auto shipping to and from Meskegon. Several factors influence the time it takes for us to move your vehicle from the point of origin to the point of delivery. At the time of taking your booking, we will make sure to give you a clear view of the expected time of delivery. |
# -*- coding: utf-8 -*-
r"""The source type objects.
The source type objects define the source of the artifact data. In earlier
versions of the artifact definitions collector definitions had a similar
purpose as the source type. Currently the following source types are defined:
* artifact; the source is one or more artifact definitions;
* file; the source is one or more files;
* path; the source is one or more paths;
* Windows Registry key; the source is one or more Windows Registry keys;
* Windows Registry value; the source is one or more Windows Registry values;
* WMI query; the source is a Windows Management Instrumentation query.
The difference between the file and path source types are that file should
be used to define file entries that contain data and path, file entries that
define a location. E.g. on Windows %SystemRoot% could be considered a path
artifact definition, pointing to a location e.g. C:\Windows. And where
C:\Windows\System32\winevt\Logs\AppEvent.evt a file artifact definition,
pointing to the Application Event Log file.
"""
import abc
from artifacts import definitions
from artifacts import errors
class SourceType(object):
"""Class that implements the artifact definition source type interface."""
TYPE_INDICATOR = None
@property
def type_indicator(self):
"""The type indicator.
Raises:
NotImplementedError: if the type indicator is not defined.
"""
if not self.TYPE_INDICATOR:
raise NotImplementedError(u'Invalid source type missing type indicator.')
return self.TYPE_INDICATOR
@abc.abstractmethod
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
class ArtifactGroupSourceType(SourceType):
"""Class that implements the artifact group source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_ARTIFACT_GROUP
def __init__(self, names=None):
"""Initializes the source type object.
Args:
names: optional list of artifact definition names. The default is None.
Raises:
FormatError: when artifact names is not set.
"""
if not names:
raise errors.FormatError(u'Missing names value.')
super(ArtifactGroupSourceType, self).__init__()
self.names = names
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
return {u'names': self.names}
class FileSourceType(SourceType):
"""Class that implements the file source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_FILE
def __init__(self, paths=None, separator=u'/'):
"""Initializes the source type object.
Args:
paths: optional list of paths. The paths are considered relative
to the root of the file system. The default is None.
separator: optional string containing the path segment separator.
The default is /.
Raises:
FormatError: when paths is not set.
"""
if not paths:
raise errors.FormatError(u'Missing paths value.')
super(FileSourceType, self).__init__()
self.paths = paths
self.separator = separator
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
source_type_attributes = {u'paths': self.paths}
if self.separator != u'/':
source_type_attributes[u'separator'] = self.separator
return source_type_attributes
class CommandSourceType(SourceType):
"""Class that implements the command source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_COMMAND
def __init__(self, args=None, cmd=None):
"""Initializes the source type object.
Args:
args: list of strings that will be passed as arguments to the command.
cmd: string representing the command to run.
Raises:
FormatError: when args or cmd is not set.
"""
if args is None or cmd is None:
raise errors.FormatError(u'Missing args or cmd value.')
super(CommandSourceType, self).__init__()
self.args = args
self.cmd = cmd
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
return {u'cmd': self.cmd, u'args': self.args}
class PathSourceType(SourceType):
"""Class that implements the path source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_PATH
def __init__(self, paths=None, separator=u'/'):
"""Initializes the source type object.
Args:
paths: optional list of paths. The paths are considered relative
to the root of the file system. The default is None.
separator: optional string containing the path segment separator.
The default is /.
Raises:
FormatError: when paths is not set.
"""
if not paths:
raise errors.FormatError(u'Missing paths value.')
super(PathSourceType, self).__init__()
self.paths = paths
self.separator = separator
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
source_type_attributes = {u'paths': self.paths}
if self.separator != u'/':
source_type_attributes[u'separator'] = self.separator
return source_type_attributes
class DirectorySourceType(SourceType):
"""Class that implements the directory source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_DIRECTORY
def __init__(self, paths=None, separator=u'/'):
"""Initializes the source type object.
Args:
paths: optional list of paths. The paths are considered relative
to the root of the file system. The default is None.
separator: optional string containing the path segment separator.
The default is /.
Raises:
FormatError: when paths is not set.
"""
if not paths:
raise errors.FormatError(u'Missing directory value.')
super(DirectorySourceType, self).__init__()
self.paths = paths
self.separator = separator
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
source_type_attributes = {u'paths': self.paths}
if self.separator != u'/':
source_type_attributes[u'separator'] = self.separator
return source_type_attributes
class WindowsRegistryKeySourceType(SourceType):
"""Class that implements the Windows Registry key source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY
VALID_PREFIXES = [
r'HKEY_LOCAL_MACHINE',
r'HKEY_USERS',
r'HKEY_CLASSES_ROOT',
r'%%current_control_set%%',
]
def __init__(self, keys=None):
"""Initializes the source type object.
Args:
keys: optional list of key paths. The key paths are considered relative
to the root of the Windows Registry. The default is None.
Raises:
FormatError: when keys is not set.
"""
if not keys:
raise errors.FormatError(u'Missing keys value.')
if not isinstance(keys, list):
raise errors.FormatError(u'keys must be a list')
for key in keys:
self.ValidateKey(key)
super(WindowsRegistryKeySourceType, self).__init__()
self.keys = keys
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
return {u'keys': self.keys}
@classmethod
def ValidateKey(cls, key_path):
"""Validates this key against supported key names.
Args:
key_path: string containing the path fo the Registry key.
Raises:
FormatError: when key is not supported.
"""
for prefix in cls.VALID_PREFIXES:
if key_path.startswith(prefix):
return
if key_path.startswith(u'HKEY_CURRENT_USER\\'):
raise errors.FormatError(
u'HKEY_CURRENT_USER\\ is not supported instead use: '
u'HKEY_USERS\\%%users.sid%%\\')
raise errors.FormatError(u'Unupported Registry key path: {0}'.format(
key_path))
class WindowsRegistryValueSourceType(SourceType):
"""Class that implements the Windows Registry value source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE
def __init__(self, key_value_pairs=None):
"""Initializes the source type object.
Args:
key_value_pairs: optional list of key path and value name pairs.
The key paths are considered relative to the root
of the Windows Registry. The default is None.
Raises:
FormatError: when key value pairs is not set.
"""
if not key_value_pairs:
raise errors.FormatError(u'Missing key value pairs value.')
if not isinstance(key_value_pairs, list):
raise errors.FormatError(u'key_value_pairs must be a list')
for pair in key_value_pairs:
if not isinstance(pair, dict):
raise errors.FormatError(u'key_value_pair must be a dict')
if set(pair.keys()) != set([u'key', u'value']):
error_message = (
u'key_value_pair missing "key" and "value" keys, got: {0}'
).format(key_value_pairs)
raise errors.FormatError(error_message)
WindowsRegistryKeySourceType.ValidateKey(pair['key'])
super(WindowsRegistryValueSourceType, self).__init__()
self.key_value_pairs = key_value_pairs
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
return {u'key_value_pairs': self.key_value_pairs}
class WMIQuerySourceType(SourceType):
"""Class that implements the WMI query source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_WMI_QUERY
def __init__(self, query=None, base_object=None):
"""Initializes the source type object.
Args:
query: optional string containing the WMI query. The default is None.
Raises:
FormatError: when query is not set.
"""
if not query:
raise errors.FormatError(u'Missing query value.')
super(WMIQuerySourceType, self).__init__()
self.base_object = base_object
self.query = query
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
source_type_attributes = {u'query': self.query}
if self.base_object:
source_type_attributes[u'base_object'] = self.base_object
return source_type_attributes
class SourceTypeFactory(object):
"""Class that implements a source type factory."""
_source_type_classes = {
definitions.TYPE_INDICATOR_ARTIFACT_GROUP: ArtifactGroupSourceType,
definitions.TYPE_INDICATOR_COMMAND: CommandSourceType,
definitions.TYPE_INDICATOR_DIRECTORY: DirectorySourceType,
definitions.TYPE_INDICATOR_FILE: FileSourceType,
definitions.TYPE_INDICATOR_PATH: PathSourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY:
WindowsRegistryKeySourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE:
WindowsRegistryValueSourceType,
definitions.TYPE_INDICATOR_WMI_QUERY: WMIQuerySourceType,
}
@classmethod
def CreateSourceType(cls, type_indicator, attributes):
"""Creates a source type object.
Args:
type_indicator: the source type indicator.
attributes: a dictionary containing the source attributes.
Returns:
A source type object (instance of SourceType).
Raises:
The source type object (instance of SourceType) or None if the type
indicator is not supported.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
"""
if type_indicator not in cls._source_type_classes:
raise errors.FormatError(u'Unsupported type indicator: {0}.'.format(
type_indicator))
return cls._source_type_classes[type_indicator](**attributes)
@classmethod
def DeregisterSourceType(cls, source_type_class):
"""Deregisters a source type.
The source types are identified based on their type indicator.
Args:
source_type_class: the source type (subclass of SourceType).
Raises:
KeyError: if a source type is not set for the corresponding type
indicator.
"""
if source_type_class.TYPE_INDICATOR not in cls._source_type_classes:
raise KeyError(u'Source type not set for type: {0}.'.format(
source_type_class.TYPE_INDICATOR))
del cls._source_type_classes[source_type_class.TYPE_INDICATOR]
@classmethod
def GetSourceTypes(cls):
"""Retrieves the source types.
Returns:
A list of source types (subclasses of SourceType).
"""
return cls._source_type_classes.values()
@classmethod
def GetSourceTypeIndicators(cls):
"""Retrieves the source type indicators.
Returns:
A list of source type indicators.
"""
return cls._source_type_classes.keys()
@classmethod
def RegisterSourceType(cls, source_type_class):
"""Registers a source type.
The source types are identified based on their type indicator.
Args:
source_type_class: the source type (subclass of SourceType).
Raises:
KeyError: if source types is already set for the corresponding
type indicator.
"""
if source_type_class.TYPE_INDICATOR in cls._source_type_classes:
raise KeyError(u'Source type already set for type: {0}.'.format(
source_type_class.TYPE_INDICATOR))
cls._source_type_classes[source_type_class.TYPE_INDICATOR] = (
source_type_class)
@classmethod
def RegisterSourceTypes(cls, source_type_classes):
"""Registers source types.
The source types are identified based on their type indicator.
Args:
source_type_classes: a list of source types (instances of SourceType).
"""
for source_type_class in source_type_classes:
cls.RegisterSourceType(source_type_class)
|
Luke Showen brings a variety of talents to the real estate industry. He started out as a teacher and coach at Westfield High school, before moving into the private sector where he runs a family business as well as owning his own small businesses. He was a technology education teacher for 10 years where he taught construction and entrepreneurship classes. He has built his own home along with many others, so he has an “eye” for things in the home that sometimes get overlooked. He coached football, basketball and baseball and enjoys watching and attending all types of sporting events. Since leaving education he has worked in a family business that is based around sales, so he knows what it takes to “get the deal done”.
Honesty, integrity and understanding are what makes Luke standout in the real estate profession. He understands that selling or buying a home is a big decision and he works hard to gain the clients trust and sense of security when dealing with him.
What drives Luke in the real estate world is working with a client through all the steps of buying or selling and having them be happy with the process. He wants everyone to have had a great experience and to be able to speak highly of his abilities as an agent.
Luke believes in the Remax Connections mission statement that everyone should have the right to own a home. With his teaching background enabling him to work with clients and his sales abilities helping him get the deal done, he looks forward to helping all types of clients find the right home. |
'''
dailymotion urlresolver plugin
Copyright (C) 2011 cyrus007
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import re
import urllib2
from urlresolver import common
from vidxden import unpack_js
class HostingcupResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "hostingcup"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
self.pattern = 'http://(www.)?hostingcup.com/[0-9A-Za-z]+'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
try:
html = self.net.http_GET(web_url).content
page = ''.join(html.splitlines()).replace('\t','')
r = re.search("return p\}\(\'(.+?)\',\d+,\d+,\'(.+?)\'", page)
if r:
p, k = r.groups()
else:
raise Exception ('packed javascript embed code not found')
decrypted_data = unpack_js(p, k)
r = re.search('file.\',.\'(.+?).\'', decrypted_data)
if not r:
r = re.search('src="(.+?)"', decrypted_data)
if r:
stream_url = r.group(1)
else:
raise Exception ('stream url not found')
return stream_url
except urllib2.URLError, e:
common.addon.log_error('Hostingcup: got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 5000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log_error('**** Hostingcup Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]HOSTINGCUP[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://vidpe.com/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/([0-9A-Za-z]+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match(self.pattern, url) or 'hostingcup' in host
|
For rooms up to 430 sq. ft.
The BonecoS250 steam humidifier offers hygienic air humidification that is ideal for families with children. Thanks to the hand-warm steam, the unit ensures a safe and pleasant atmosphere in households with children and pets. The S250 automatically adjusts to your desired relative humidity level and immediately reduces output once that value is reached. Easy handling, precise controllability and simple cleaning in Cleaning mode embody the peak of Swiss quality. The S250 also has a fragrance container for use with essential oils.
Dimensions (L x W x H) 12.4 in. x 6.7 in. x 11 in. |
#!/usr/bin/env python3
from ev3dev.ev3 import *
from navigation import speak, abort_on_button, sleep
from setup_sample_data import people
import ev3dev.ev3 as ev3
from PIL import Image
import logging
import requests
import json
LOGGER = logging.getLogger(__name__)
def setup_color_sensor(mode='COL-REFLECT'):
#connect color sensor and check it's connected.
cl = ColorSensor()
assert cl.connected
cl.mode = mode
return cl
@abort_on_button
def get_color(url="http://127.0.0.1:5000", fake_data=False):
"""
gets numerical color value from color sensor
sends value to directory api to retrieve person details
:param url: host:port url of api server
:return: set of coordinates for desk
"""
cl = setup_color_sensor('COL-COLOR')
color = cl.value()
while color < 1:
LOGGER.debug("Waiting to read color")
color = cl.value()
sleep(.1)
# change the sensor mode which makes it emit a red light so we know it's read something
cl.mode= 'COL-REFLECT'
LOGGER.debug("looking for person with sid=%d", color)
if fake_data:
LOGGER.debug("using mock data")
person = people[color]
person_name = '{} {}'.format(person['first'], person['last'])
coordinates = (person['location_x'], person['location_y'])
else:
try:
filters = [dict(
name='sid',
op='==',
val=str(color),
)]
params = dict(q=json.dumps(dict(filters=filters, single=True)))
headers = {'Content-Type': 'application/json'}
LOGGER.debug("Making request [%s] params=[%s]", url, params)
result = requests.get(
url="{url}/api/person".format(url=url),
params=params,
headers=headers,
)
if result.status_code == 404:
LOGGER.error("Person [%s] not found", color)
raise Exception
elif result.status_code != 200:
LOGGER.error("Query error %s - %s", result.status_code, result.reason)
raise Exception
except:
LOGGER.exception("Exception making request")
raise
person = json.loads(result.content.decode('utf-8'))
person_name = '{} {}'.format(person['first'], person['last'])
coordinates = (person['desk']['location_x'], person['desk']['location_y'])
LOGGER.debug("Person=%s, x=%s, y=%s", person_name, coordinates[0], coordinates[1])
message = "Ah Taking you to {}".format(person_name)
speak(message)
return coordinates
|
« It’s All About That “Baste” Thanksgiving Eve Party!
Come out Thanksgiving morning to mark the 41st running of Richmond’s Turkey Trot 10K and help Feed More!
Join Richmond Road Runners Club and the race’s sponsor Wegmans for the 10K road race and a kids fun run in and around the University of Richmond.
Runners and volunteers are encouraged to bring jars of peanut butter and/or cans of tuna on race day. All food donations will be delivered to Feed More and distributed through our network of nearly 300 agencies to our neighbors across Central Virginia in need. |
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import xml.etree.ElementTree as Et
import sys
import inspect
import ctypes
import os
import string
import dbus
import dbus.service
import dbus.mainloop.glib
try:
from . import cfg
except SystemError:
import cfg
STDOUT_TTY = os.isatty(sys.stdout.fileno())
def rtype(dbus_type):
"""
Decorator making sure that the decorated function returns a value of
specified type.
:param dbus_type: The specific dbus type to return value as
"""
def decorator(fn):
def decorated(*args, **kwargs):
return dbus_type(fn(*args, **kwargs))
return decorated
return decorator
# Field is expected to be a number, handle the corner cases when parsing
@rtype(dbus.UInt64)
def n(v):
if not v:
return 0
return int(float(v))
@rtype(dbus.UInt32)
def n32(v):
if not v:
return 0
return int(float(v))
# noinspection PyProtectedMember
def init_class_from_arguments(obj_instance):
for k, v in list(sys._getframe(1).f_locals.items()):
if k != 'self':
nt = k
# If the current attribute has a value, but the incoming does
# not, don't overwrite it. Otherwise the default values on the
# property decorator don't work as expected.
cur = getattr(obj_instance, nt, v)
# print 'Init class %s = %s' % (nt, str(v))
if not (cur and len(str(cur)) and (v is None or len(str(v))) == 0):
setattr(obj_instance, nt, v)
def get_properties(f):
"""
Walks through an object instance or it's parent class(es) and determines
which attributes are properties and if they were created to be used for
dbus.
:param f: Object to inspect
:return: A dictionary of tuples with each tuple being:
0 = An array of dicts with the keys being: p_t, p_name,
p_access(type, name, access)
1 = Hash of property names and current value
"""
interfaces = dict()
for c in inspect.getmro(f.__class__):
h = vars(c)
for p, value in h.items():
if isinstance(value, property):
# We found a property, see if it has a metadata type
key = attribute_type_name(p)
if key in h:
interface = h[key][1]
if interface not in interfaces:
interfaces[interface] = ([], {})
access = ''
if getattr(f.__class__, p).fget:
access += 'read'
if getattr(f.__class__, p).fset:
access += 'write'
interfaces[interface][0].append(
dict(
p_t=getattr(f, key)[0],
p_name=p,
p_access=access))
interfaces[interface][1][p] = getattr(f, p)
return interfaces
def get_object_property_diff(o_prop, n_prop):
"""
Walk through each object properties and report what has changed and with
the new values
:param o_prop: Old keys/values
:param n_prop: New keys/values
:return: hash of properties that have changed and their new value
"""
rc = {}
for intf_k, intf_v in o_prop.items():
for k, v in list(intf_v[1].items()):
# print('Comparing %s:%s to %s:%s' %
# (k, o_prop[intf_k][1][k], k, str(n_prop[intf_k][1][k])))
if o_prop[intf_k][1][k] != n_prop[intf_k][1][k]:
new_value = n_prop[intf_k][1][k]
if intf_k not in rc:
rc[intf_k] = dict()
rc[intf_k][k] = new_value
return rc
def add_properties(xml, interface, props):
"""
Given xml that describes the interface, add property values to the XML
for the specified interface.
:param xml: XML to edit
:param interface: Interface to add the properties too
:param props: Output from get_properties
:return: updated XML string
"""
root = Et.fromstring(xml)
if props:
for c in root:
# print c.attrib['name']
if c.attrib['name'] == interface:
for p in props:
temp = '<property type="%s" name="%s" access="%s"/>\n' % \
(p['p_t'], p['p_name'], p['p_access'])
c.append(Et.fromstring(temp))
return Et.tostring(root, encoding='utf8')
return xml
def attribute_type_name(name):
"""
Given the property name, return string of the attribute type
:param name:
:return:
"""
return "_%s_meta" % name
_type_map = dict(
s=dbus.String,
o=dbus.ObjectPath,
t=dbus.UInt64,
x=dbus.Int64,
u=dbus.UInt32,
i=dbus.Int32,
n=dbus.Int16,
q=dbus.UInt16,
d=dbus.Double,
y=dbus.Byte,
b=dbus.Boolean)
def _pass_through(v):
"""
If we have something which is not a simple type we return the original
value un-wrapped.
:param v:
:return:
"""
return v
def _dbus_type(t, value):
return _type_map.get(t, _pass_through)(value)
def dbus_property(interface_name, name, dbus_type, doc=None):
"""
Creates the get/set properties for the given name. It assumes that the
actual attribute is '_' + name and the attribute metadata is stuffed in
_name_type.
There is probably a better way todo this.
:param interface_name: Dbus interface this property is associated with
:param name: Name of property
:param dbus_type: dbus string type eg. s,t,i,x
:param doc: Python __doc__ for the property
:return:
"""
attribute_name = '_' + name
def getter(self):
t = getattr(self, attribute_name + '_meta')[0]
return _dbus_type(t, getattr(self.state, attribute_name[1:]))
prop = property(getter, None, None, doc)
def decorator(cls):
setattr(cls, attribute_name + '_meta', (dbus_type, interface_name))
setattr(cls, name, prop)
return cls
return decorator
def parse_tags(tags):
if len(tags):
if ',' in tags:
return tags.split(',')
return sorted([tags])
return dbus.Array([], signature='s')
def _common_log(msg, *attributes):
cfg.stdout_lock.acquire()
tid = ctypes.CDLL('libc.so.6').syscall(186)
msg = "%d:%d - %s" % (os.getpid(), tid, msg)
if STDOUT_TTY and attributes:
print(color(msg, *attributes))
else:
print(msg)
cfg.stdout_lock.release()
sys.stdout.flush()
# Serializes access to stdout to prevent interleaved output
# @param msg Message to output to stdout
# @return None
def log_debug(msg, *attributes):
if cfg.DEBUG:
_common_log(msg, *attributes)
def log_error(msg, *attributes):
_common_log(msg, *attributes)
# noinspection PyUnusedLocal
def handler(signum, frame):
cfg.run.value = 0
log_debug('Signal handler called with signal %d' % signum)
if cfg.loop is not None:
cfg.loop.quit()
def pv_obj_path_generate():
return cfg.PV_OBJ_PATH + "/%d" % next(cfg.pv_id)
def vg_obj_path_generate():
return cfg.VG_OBJ_PATH + "/%d" % next(cfg.vg_id)
def lv_object_path_method(name, meta):
if name[0] == '[':
return _hidden_lv_obj_path_generate
elif meta[0][0] == 't':
return _thin_pool_obj_path_generate
elif meta[0][0] == 'C' and 'pool' in meta[1]:
return _cache_pool_obj_path_generate
return _lv_obj_path_generate
# Note: None of the individual LV path generate functions should be called
# directly, they should only be dispatched through lv_object_path_method
def _lv_obj_path_generate():
return cfg.LV_OBJ_PATH + "/%d" % next(cfg.lv_id)
def _thin_pool_obj_path_generate():
return cfg.THIN_POOL_PATH + "/%d" % next(cfg.thin_id)
def _cache_pool_obj_path_generate():
return cfg.CACHE_POOL_PATH + "/%d" % next(cfg.cache_pool_id)
def _hidden_lv_obj_path_generate():
return cfg.HIDDEN_LV_PATH + "/%d" % next(cfg.hidden_lv)
def job_obj_path_generate():
return cfg.JOB_OBJ_PATH + "/%d" % next(cfg.job_id)
def color(text, *user_styles):
styles = {
# styles
'reset': '\033[0m',
'bold': '\033[01m',
'disabled': '\033[02m',
'underline': '\033[04m',
'reverse': '\033[07m',
'strike_through': '\033[09m',
'invisible': '\033[08m',
# text colors
'fg_black': '\033[30m',
'fg_red': '\033[31m',
'fg_green': '\033[32m',
'fg_orange': '\033[33m',
'fg_blue': '\033[34m',
'fg_purple': '\033[35m',
'fg_cyan': '\033[36m',
'fg_light_grey': '\033[37m',
'fg_dark_grey': '\033[90m',
'fg_light_red': '\033[91m',
'fg_light_green': '\033[92m',
'fg_yellow': '\033[93m',
'fg_light_blue': '\033[94m',
'fg_pink': '\033[95m',
'fg_light_cyan': '\033[96m',
# background colors
'bg_black': '\033[40m',
'bg_red': '\033[41m',
'bg_green': '\033[42m',
'bg_orange': '\033[43m',
'bg_blue': '\033[44m',
'bg_purple': '\033[45m',
'bg_cyan': '\033[46m',
'bg_light_grey': '\033[47m'
}
color_text = ''
for style in user_styles:
try:
color_text += styles[style]
except KeyError:
return 'def color: parameter {} does not exist'.format(style)
color_text += text
return '\033[0m{0}\033[0m'.format(color_text)
def pv_range_append(cmd, device, start, end):
if (start, end) == (0, 0):
cmd.append(device)
else:
if start != 0 and end == 0:
cmd.append("%s:%d-" % (device, start))
else:
cmd.append(
"%s:%d-%d" %
(device, start, end))
def pv_dest_ranges(cmd, pv_dest_range_list):
if len(pv_dest_range_list):
for i in pv_dest_range_list:
pv_range_append(cmd, *i)
def round_size(size_bytes):
bs = 512
remainder = size_bytes % bs
if not remainder:
return size_bytes
return size_bytes + bs - remainder
_ALLOWABLE_CH = string.ascii_letters + string.digits + '#+.:=@_\/%'
_ALLOWABLE_CH_SET = set(_ALLOWABLE_CH)
_ALLOWABLE_VG_LV_CH = string.ascii_letters + string.digits + '.-_+'
_ALLOWABLE_VG_LV_CH_SET = set(_ALLOWABLE_VG_LV_CH)
_LV_NAME_RESERVED = ("_cdata", "_cmeta", "_corig", "_mimage", "_mlog",
"_pmspare", "_rimage", "_rmeta", "_tdata", "_tmeta", "_vorigin")
# Tags can have the characters, based on the code
# a-zA-Z0-9._-+/=!:&#
_ALLOWABLE_TAG_CH = string.ascii_letters + string.digits + "._-+/=!:&#"
_ALLOWABLE_TAG_CH_SET = set(_ALLOWABLE_TAG_CH)
def _allowable_tag(tag_name):
# LVM should impose a length restriction
return set(tag_name) <= _ALLOWABLE_TAG_CH_SET
def _allowable_vg_name(vg_name):
if vg_name is None:
raise ValueError("VG name is None or empty")
vg_len = len(vg_name)
if vg_len == 0 or vg_len > 127:
raise ValueError("VG name (%s) length (%d) not in the domain 1..127" %
(vg_name, vg_len))
if not set(vg_name) <= _ALLOWABLE_VG_LV_CH_SET:
raise ValueError("VG name (%s) contains invalid character, "
"allowable set(%s)" % (vg_name, _ALLOWABLE_VG_LV_CH))
if vg_name == "." or vg_name == "..":
raise ValueError('VG name (%s) cannot be "." or ".."' % (vg_name))
def _allowable_lv_name(vg_name, lv_name):
if lv_name is None:
raise ValueError("LV name is None or empty")
lv_len = len(lv_name)
# This length is derived from empirical testing
if lv_len == 0 or (len(vg_name) + lv_len) > 125:
raise ValueError("LV name (%s) length (%d) + VG name length "
"not in the domain 1..125" % (lv_name, lv_len))
if not set(lv_name) <= _ALLOWABLE_VG_LV_CH_SET:
raise ValueError("LV name (%s) contains invalid character, "
"allowable (%s)" % (lv_name, _ALLOWABLE_VG_LV_CH))
if any(x in lv_name for x in _LV_NAME_RESERVED):
raise ValueError("LV name (%s) contains a reserved word, "
"reserved set(%s)" % (lv_name, str(_LV_NAME_RESERVED)))
if lv_name.startswith("snapshot") or lv_name.startswith("pvmove"):
raise ValueError("LV name (%s) starts with a reserved word, "
"reserved set(%s)" % (lv_name, str(["snapshot", "pvmove"])))
if lv_name[0] == '-':
raise ValueError("LV name (%s) cannot start with a '-' "
"character" % lv_name)
def validate_device_path(interface, device):
if not set(device) <= _ALLOWABLE_CH_SET:
raise dbus.exceptions.DBusException(
interface, 'Device path (%s) has invalid characters, '
'allowable (%s)' % (device, _ALLOWABLE_CH))
def validate_vg_name(interface, vg_name):
try:
_allowable_vg_name(vg_name)
except ValueError as ve:
raise dbus.exceptions.DBusException(
interface, str(ve))
def validate_lv_name(interface, vg_name, lv_name):
try:
_allowable_lv_name(vg_name, lv_name)
except ValueError as ve:
raise dbus.exceptions.DBusException(
interface, str(ve))
def validate_tag(interface, tag):
if not _allowable_tag(tag):
raise dbus.exceptions.DBusException(
interface, 'tag (%s) contains invalid character, allowable set(%s)'
% (tag, _ALLOWABLE_TAG_CH))
|
Crochet Hot pads in various colors and sizes and made with Red Heart Super Saver Acrylic Yarn. Colors include light green to dark green, blue with brown and white, and light brown with dark brown. These hot pads would make a great addition to any house or kitchen and would make wonderful gift for Christmas, Birthdays, or house warming parties. They are great to use for those "hot" items you have made for dinner or just to set any hot plate or dish on. Made in a non-smoking environment. Care instructions: Hand wash and lie flat to dry.
*Please be aware that hot pads are sold on an individual basis and not in a set*. |
# Parses arithmetic operations with modificaitons:
# +,-,*,/,(,) work as they should
# & is the AND probability (right associativity)
# | is the OR probability (right associativity)
# ^ is the XOR probability (right associativity)
# sorting: mine. returns list containing level n brackets and indexs
# infix2rpn: infix to postfix (reverse polish notation) using shunting-yard algorithim taken and modified from http://andreinc.net/2010/10/05/converting-infix-to-rpn-shunting-yard-algorithm/
# parse: mine. base function calling everythin else.
# parse_pre: mine. adds a ton of spaces and replacements.
# parse_hgcc: mine. Convert ! => to numbers.
# parse_data: mine. Equation parser.
# rpn2num: RPN evaluation taken and modified from https://rosettacode.org/wiki/Parsing/RPN_calculator_algorithm#Python
import math
from fractions import *
from Hyper_Calculator import * #Math stuff
def sorting(data):
dict = {}
stack = []
test = data.find('(')
if test != -1:
level = 0
for i,c in enumerate(data):
#print(dict)
if c == '(':
level = level + 1
stack.append(i)
if (level not in dict): dict[level] = list() #initilize
elif c == ')':
if (not level) or (len(stack) != level): return [] #) found before (
dict[level].append([stack.pop(),i+1])
level = level - 1
#print(level)
if level != 0: return [] # no closing bracket
return dict
else:
return []
'''
Created on Oct 5, 2010
@author: nomemory
'''
#Associativity constants for operators
LEFT_ASSOC = 0
RIGHT_ASSOC = 1
#Supported operators
OPERATORS = {
'+' : (5, LEFT_ASSOC),
'-' : (5, LEFT_ASSOC),
'*' : (10, LEFT_ASSOC),
'/' : (10, LEFT_ASSOC),
'&' : (0, LEFT_ASSOC),
'|' : (0, LEFT_ASSOC),
'^' : (0, LEFT_ASSOC)
}
#Test if a certain token is operator
def isOperator(token):
return token in OPERATORS.keys()
#Test the associativity type of a certain token
def isAssociative(token, assoc):
if not isOperator(token):
raise ValueError('Invalid token: %s' % token)
return OPERATORS[token][1] == assoc
#Compare the precedence of two tokens
def cmpPrecedence(token1, token2):
if not isOperator(token1) or not isOperator(token2):
raise ValueError('Invalid tokens: %s %s' % (token1, token2))
return OPERATORS[token1][0] - OPERATORS[token2][0]
#Transforms an infix expression to RPN
def infix2rpn(tokens):
tokens = tokens.split()
out = []
stack = []
for token in tokens:
if isOperator(token):
while len(stack) != 0 and isOperator(stack[-1]):
if (isAssociative(token, LEFT_ASSOC) and cmpPrecedence(token, stack[-1]) <= 0) or (isAssociative(token, RIGHT_ASSOC) and cmpPrecedence(token, stack[-1]) < 0):
out.append(stack.pop())
continue
break
stack.append(token)
elif token == '(':
stack.append(token)
elif token == ')':
while len(stack) != 0 and stack[-1] != '(':
out.append(stack.pop())
stack.pop()
else:
out.append(token)
while len(stack) != 0:
out.append(stack.pop())
return out
def rpn2num(list):
a=[]
b={
'+': lambda x,y: y+x,
'-': lambda x,y: y-x,
'*': lambda x,y: y*x,
'/': lambda x,y: y/x,
'&': lambda x,y: y*x,
'|': lambda x,y: y+x-y*x,
'^': lambda x,y: x*(1-y)+(1-x)*y
}
for c in list:
if c in b: a.append(b[c](a.pop(),a.pop()))
else: a.append(float(c))
return a[0]
def parse_pre(data):
data = data.replace("AND",'&')
data = data.replace("XOR",'^')
data = data.replace("OR",'|')
data = data.replace("and",'&')
data = data.replace("xor",'^')
data = data.replace("or",'|')
for i in ['&','|','^','+','-','*','/',')','(']: data = data.replace(i,' '+i+' ')
return data
def parse_hgcc(data):
while True:
s = data.find('!')
if s != -1:
e = data.find(" ",s+1)
if e == -1: v = data[s:] #reached end of equation
else: v = data[s:e]
t = v.split(',')
result = HGCC(int(t[2]),int(t[1]),int(t[0][1:]),int(t[3]),find=">=")
data = data.replace(v,str(float(result)))
else:
break
return data
def parse_data(input):
while True:
output = sorting(input)
if len(output) > 0:
i = output[len(output)][0]
tmp = infix2rpn(input[i[0]:i[1]])
tmp = rpn2num(tmp)
input = input.replace(input[i[0]:i[1]],str(tmp))
else: break
return rpn2num(infix2rpn(input))
def parse(data):
try:
data = parse_pre(data)
data = parse_hgcc(data)
data = parse_data(data)
except:
data = "ERROR"
return data |
Terminal currently under development to serve the Dominican Republic with clean, affordable fuels and reliable power supply. It will also serve as a hub for the distribution and export of natural gas and NGLS to other parts of the Central Caribbean and Antilles. Project development is managed through SeaOne Caribbean, LLC and affiliate SeaOne Dominicana, SRL. |
#!/usr/bin/env python
# Copyright (C) 2014-2018 Shea G Craig, 2018 Mosen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""distribution_point.py
Classes representing the various types of file storage available to
the JAMF Pro Server.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import shutil
import socket
import subprocess
import sys
import io
import math
import multiprocessing
import threading
sys.path.insert(0, "/Library/AutoPkg/JSSImporter")
import requests
try:
# Python 2.6-2.7
from HTMLParser import HTMLParser
except ImportError:
# Python 3
from html.parser import HTMLParser
# 2 and 3 compatible
try:
from urllib.parse import urlparse, urlencode, unquote, quote
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode, unquote, quote
from urllib2 import urlopen, Request, HTTPError
from . import casper
from . import abstract
from .exceptions import JSSError
try:
from .contrib.mount_shares_better import mount_share
except ImportError:
# mount_shares_better uses PyObjC. If using non-system python,
# chances are good user has not set up PyObjC, so fall back to
# subprocess to mount. (See mount methods).
mount_share = None
from .tools import is_osx, is_linux, is_package
try:
import boto.s3
from boto.s3.connection import S3Connection, OrdinaryCallingFormat, S3ResponseError
from boto.s3.key import Key
BOTO_AVAILABLE = True
except ImportError:
print(
"boto is not available, you will not be able to use the AWS distribution point type"
)
BOTO_AVAILABLE = False
PKG_FILE_TYPE = "0"
EBOOK_FILE_TYPE = "1"
IN_HOUSE_APP_FILE_TYPE = "2"
def auto_mounter(original):
"""Decorator for automatically mounting, if needed."""
def mounter(*args):
"""If not mounted, mount."""
self = args[0]
if not self.is_mounted():
self.mount()
return original(*args)
return mounter
# pylint: disable=too-few-public-methods
class Repository(object):
"""Base class for file repositories.
This class is not usable on its own; however, it provides the base
init which all subclasses should use.
Attributes:
connection (dict): Dictionary for storing connection arguments.
required_attrs (Set): A set of the keys which must be supplied to the initializer,
otherwise a JSSError will be raised.
Raises:
JSSError: If mandatory arguments are not supplied to the initializer.
"""
required_attrs = set()
def __init__(self, **connection_args):
"""Store the connection information."""
if self.required_attrs.issubset(set(connection_args.keys())):
self.connection = connection_args
self._build_url()
else:
missing_attrs = self.required_attrs.difference(set(connection_args.keys()))
raise JSSError(
"Missing REQUIRED argument(s) %s to %s distribution point."
% (list(missing_attrs), self.__class__)
)
def __repr__(self):
"""Return string representation of connection arguments."""
output = ["Distribution Point: %s" % self.connection["url"]]
output.append("Type: %s" % type(self))
output.append("Connection Information:")
for key, val in self.connection.items():
output.append("\t%s: %s" % (key, val))
return "\n".join(output) + "\n"
def _build_url(self):
"""Private build url method."""
raise NotImplementedError
# pylint: enable=too-few-public-methods
class FileRepository(Repository):
"""Local file shares."""
def _build_url(self):
"""Build a connection URL."""
pass
def copy_pkg(self, filename, _):
"""Copy a package to the repo's Package subdirectory.
Args:
filename: Path for file to copy.
_: Ignored. Used for compatibility with JDS repos.
"""
basename = os.path.basename(filename)
self._copy(
filename, os.path.join(self.connection["mount_point"], "Packages", basename)
)
def _copy(self, filename, destination): # pylint: disable=no-self-use
"""Copy a file or folder to the repository.
Will mount if needed.
Args:
filename: Path to copy.
destination: Remote path to copy file to.
"""
full_filename = os.path.abspath(os.path.expanduser(filename))
if os.path.isdir(full_filename):
shutil.copytree(full_filename, destination)
elif os.path.isfile(full_filename):
shutil.copyfile(full_filename, destination)
def delete(self, filename):
"""Delete a file from the repository.
Args:
filename: String filename only (i.e. no path) of file to
delete.
"""
folder = "Packages"
path = os.path.join(self.connection["mount_point"], folder, filename)
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.remove(path)
def exists(self, filename):
"""Report whether a file exists on the distribution point.
Determines file type by extension.
Args:
filename: Filename you wish to check. (No path! e.g.:
"AdobeFlashPlayer-14.0.0.176.pkg")
"""
filepath = os.path.join(self.connection["mount_point"], "Packages", filename)
return os.path.exists(filepath)
def __contains__(self, filename):
"""Magic method to allow constructs similar to:
if 'abc.pkg' in dp:
"""
return self.exists(filename)
class LocalRepository(FileRepository):
"""JAMF Pro repo located on a local filesystem path."""
required_attrs = {"mount_point", "share_name"}
def __init__(self, **connection_args):
"""Set up Local file share.
Args:
connection_args: Dict with the following key/val pairs:
mount_point: Path to a valid mount point.
share_name: The fileshare's name.
"""
super(LocalRepository, self).__init__(**connection_args)
self.connection["url"] = "local://%s" % self.connection["mount_point"]
class MountedRepository(FileRepository):
"""Parent class for mountable file shares.
Attributes:
fs_type: Class attribute, string protocol type (currently AFP
or SMB).
"""
fs_type = "undefined"
def __init__(self, **connection_args):
"""Init a MountedRepository by calling super."""
super(MountedRepository, self).__init__(**connection_args)
def mount(self):
"""Mount the repository."""
if not self.is_mounted():
# ensure the mountpoint exists.
if not os.path.exists(self.connection["mount_point"]):
os.mkdir(self.connection["mount_point"])
self._mount()
def _mount(self):
"""Private mount method."""
raise NotImplementedError
def umount(self, forced=True):
"""Try to unmount our mount point.
Defaults to using forced method. If OS is Linux, it will not
delete the mount point.
Args:
forced: Bool whether to force the unmount. Default is True.
"""
if self.is_mounted():
if is_osx():
cmd = ["/usr/sbin/diskutil", "unmount", self.connection["mount_point"]]
if forced:
cmd.insert(2, "force")
subprocess.check_call(cmd)
else:
cmd = ["umount", self.connection["mount_point"]]
if forced:
cmd.insert(1, "-f")
subprocess.check_call(cmd)
def is_mounted(self):
"""Test for whether a mount point is mounted.
If it is currently mounted, determine the path where it's
mounted and update the connection's mount_point accordingly.
"""
mount_check = subprocess.check_output("mount").decode().splitlines()
# The mount command returns lines like this on OS X...
# //[email protected]/JSS%20REPO on /Users/Shared/JSS REPO
# (afpfs, nodev, nosuid, mounted by local_me)
# and like this on Linux...
# //pretendco.com/jamf on /mnt/jamf type cifs (rw,relatime,
# <options>...)
valid_mount_strings = self._get_valid_mount_strings()
was_mounted = False
if is_osx():
mount_string_regex = re.compile(r"\(([\w]*),*.*\)$")
mount_point_regex = re.compile(r"on ([\w/ -]*) \(.*$")
elif is_linux():
mount_string_regex = re.compile(r"type ([\w]*) \(.*\)$")
mount_point_regex = re.compile(r"on ([\w/ -]*) type .*$")
else:
raise JSSError("Unsupported OS.")
for mount in mount_check:
fs_match = re.search(mount_string_regex, mount)
fs_type = fs_match.group(1) if fs_match else None
# Automounts, non-network shares, and network shares
# all have a slightly different format, so it's easiest to
# just split.
mount_string = mount.split(" on ")[0]
# Does the mount_string match one of our valid_mount_strings?
if [
mstring for mstring in valid_mount_strings if mstring in mount_string
] and self.fs_type == fs_type:
# Get the mount point string between from the end back to
# the last "on", but before the options (wrapped in
# parenthesis). Considers alphanumerics, / , _ , - and a
# blank space as valid, but no crazy chars.
match = re.search(mount_point_regex, mount)
mount_point = match.group(1) if match else None
was_mounted = True
# Reset the connection's mount point to the discovered
# value.
if mount_point:
self.connection["mount_point"] = mount_point
if self.connection["jss"].verbose:
print(
"%s is already mounted at %s.\n"
% (self.connection["url"], mount_point)
)
# We found the share, no need to continue.
break
if not was_mounted:
# If the share is not mounted, check for another share
# mounted to the same path and if found, incremement the
# name to avoid conflicts.
count = 1
while os.path.ismount(self.connection["mount_point"]):
self.connection["mount_point"] = "%s-%s" % (
self.connection["mount_point"],
count,
)
count += 1
# Do an inexpensive double check...
return os.path.ismount(self.connection["mount_point"])
def _get_valid_mount_strings(self):
"""Return a tuple of potential mount strings.
Casper Admin seems to mount in a number of ways:
- hostname/share
- fqdn/share
Plus, there's the possibility of:
- IPAddress/share
Then factor in the possibility that the port is included too!
This gives us a total of up to six valid addresses for mount
to report.
"""
results = set()
join = os.path.join
url = self.connection["url"]
share_name = quote(self.connection["share_name"], safe="~()*!.'$")
port = self.connection["port"]
# URL from python-jss form:
results.add(join(url, share_name))
results.add(join("%s:%s" % (url, port), share_name))
# IP Address form:
# socket.gethostbyname() will return an IP address whether
# an IP address, FQDN, or .local name is provided.
ip_address = socket.gethostbyname(url)
results.add(join(ip_address, share_name))
results.add(join("%s:%s" % (ip_address, port), share_name))
# Domain name only form:
domain_name = url.split(".")[0]
results.add(join(domain_name, share_name))
results.add(join("%s:%s" % (domain_name, port), share_name))
# FQDN form using getfqdn:
# socket.getfqdn() could just resolve back to the ip
# or be the same as the initial URL so only add it if it's
# different than both.
fqdn = socket.getfqdn(ip_address)
results.add(join(fqdn, share_name))
results.add(join("%s:%s" % (fqdn, port), share_name))
return tuple(results)
@auto_mounter
def _copy(self, filename, destination):
"""Copy a file or folder to the repository.
Will mount if needed.
Args:
filename: Path to copy.
destination: Remote path to copy file to.
"""
super(MountedRepository, self)._copy(filename, destination)
@auto_mounter
def delete(self, filename):
"""Delete a file from the repository.
Args:
filename: String filename only (i.e. no path) of file to
delete.
"""
super(MountedRepository, self).delete(filename)
@auto_mounter
def exists(self, filename):
"""Report whether a file exists on the distribution point.
Determines file type by extension.
Args:
filename: Filename you wish to check. (No path! e.g.:
"AdobeFlashPlayer-14.0.0.176.pkg")
"""
return super(MountedRepository, self).exists(filename)
def __repr__(self):
"""Return a formatted string of connection info."""
# Do an "update" to get current mount points.
self.is_mounted()
output = super(MountedRepository, self).__repr__()
output += "Mounted: %s\n" % self.is_mounted()
return output
@property
def _encoded_password(self):
"""Returns the safely url-quoted password for this DP."""
return quote(self.connection["password"], safe="~()*!.'$")
class AFPDistributionPoint(MountedRepository):
"""Represents an AFP repository."""
protocol = "afp"
fs_type = "afpfs"
required_attrs = {"url", "mount_point", "username", "password", "share_name"}
def __init__(self, **connection_args):
"""Set up an AFP connection.
Args:
connection_args (dict): Dict with the following key/val pairs:
url: URL to the mountpoint,including volume name e.g.:
"my_repository.domain.org/jamf" (Do _not_ include
protocol or auth info.)
mount_point: Path to a valid mount point.
share_name: The fileshare's name.
username: Share R/W username.
password: Share R/W password.
"""
super(AFPDistributionPoint, self).__init__(**connection_args)
# Check to see if share is mounted, and update mount point
self.is_mounted()
def _build_url(self):
"""Build the URL string to mount this file share."""
if self.connection.get("username") and self.connection.get("password"):
auth = "%s:%s@" % (self.connection["username"], self._encoded_password)
else:
auth = ""
# Optional port number
port = self.connection.get("port")
port = ":" + port if port else ""
self.connection["mount_url"] = "%s://%s%s%s/%s" % (
self.protocol,
auth,
self.connection["url"],
port,
self.connection["share_name"],
)
def _mount(self):
"""Mount based on which OS is running."""
# mount_afp "afp://scraig:<password>@address/share" <mnt_point>
if is_osx():
if self.connection["jss"].verbose:
print(self.connection["mount_url"])
if mount_share:
self.connection["mount_point"] = mount_share(
self.connection["mount_url"]
)
else:
# Non-Apple OS X python:
args = [
"mount",
"-t",
self.protocol,
self.connection["mount_url"],
self.connection["mount_point"],
]
if self.connection["jss"].verbose:
print(" ".join(args))
subprocess.check_call(args)
elif is_linux():
args = [
"mount_afp",
"-t",
self.protocol,
self.connection["mount_url"],
self.connection["mount_point"],
]
if self.connection["jss"].verbose:
print(" ".join(args))
subprocess.check_call(args)
else:
raise JSSError("Unsupported OS.")
class SMBDistributionPoint(MountedRepository):
"""Represents a SMB distribution point."""
protocol = "smbfs"
required_attrs = {
"url",
"share_name",
"mount_point",
"domain",
"username",
"password",
}
def __init__(self, **connection_args):
"""Set up a SMB connection.
Args:
connection_args: Dict with the following key/val pairs:
url: URL to the mountpoint,including volume name e.g.:
"my_repository.domain.org/jamf" (Do _not_ include
protocol or auth info.)
mount_point: Path to a valid mount point.
share_name: The fileshare's name.
domain: Specify the domain.
username: Share R/W username.
password: Share R/W password.
"""
super(SMBDistributionPoint, self).__init__(**connection_args)
if is_osx():
self.fs_type = "smbfs"
if is_linux():
self.fs_type = "cifs"
# Check to see if share is mounted, and update.
self.is_mounted()
def _build_url(self):
"""Build the URL string to mount this file share."""
if self.connection.get("username") and self.connection.get("password"):
auth = "%s:%s@" % (self.connection["username"], self._encoded_password)
if self.connection.get("domain"):
auth = r"%s;%s" % (self.connection["domain"], auth)
else:
auth = ""
# Optional port number
port = self.connection.get("port")
port = ":" + port if port else ""
# Construct mount_url
self.connection["mount_url"] = "//%s%s%s/%s" % (
auth,
self.connection["url"],
port,
self.connection["share_name"],
)
def _mount(self):
"""Mount based on which OS is running."""
# mount -t cifs -o \
# username=<user>,password=<password>,domain=<domain>,port=445 \
# //server/share /mnt/<mountpoint>
if is_osx():
if mount_share:
mount_url = "smb:%s" % self.connection["mount_url"]
if self.connection["jss"].verbose:
print(mount_url)
self.connection["mount_point"] = mount_share(mount_url)
else:
# Non-Apple OS X python:
args = [
"mount",
"-t",
self.protocol,
self.connection["mount_url"],
self.connection["mount_point"],
]
if self.connection["jss"].verbose:
print(" ".join(args))
subprocess.check_call(args)
elif is_linux():
args = [
"mount",
"-t",
"cifs",
"-o",
"username=%s,password=%s,domain=%s,port=%s"
% (
self.connection["username"],
self.connection["password"],
self.connection["domain"],
self.connection["port"],
),
"//%s/%s" % (self.connection["url"], self.connection["share_name"]),
self.connection["mount_point"],
]
if self.connection["jss"].verbose:
print(" ".join(args))
subprocess.check_call(args)
else:
raise JSSError("Unsupported OS.")
class DistributionServer(Repository):
"""Abstract class for representing JDS and CDP type repos.
The JSS has a folder to which packages are uploaded via a private
API call to dbfileupload. From there, the JSS handles the
distribution to its Cloud and JDS points.
There are caveats to its exists() method which you should be
aware of, along with a private API exists_with_casper method, which
probably works more like what one would expect. Please see those
methods for more information.
"""
required_attrs = {"jss"}
destination = "0"
def __init__(self, **connection_args):
"""Set up a connection to a distribution server.
Args:
connection_args: Dict, with required key:
jss: A JSS Object.
"""
super(DistributionServer, self).__init__(**connection_args)
self.connection["url"] = self.connection["jss"].base_url
def _build_url(self):
"""Build the URL for POSTing files. 10.2 and earlier.
This actually still works in some scenarios, but it seems like it will be deprecated soon.
"""
self.connection["upload_url"] = "%s/%s" % (
self.connection["jss"].base_url,
"dbfileupload",
)
self.connection["delete_url"] = "%s/%s" % (
self.connection["jss"].base_url,
"casperAdminSave.jxml",
)
def _build_url_modern(self):
"""Build the URL for POSTing files.
This uses the UploadServlet that has been used to handle most file uploads into JAMF Pro.
"""
self.connection["upload_url"] = "%s/%s" % (
self.connection["jss"].base_url,
"upload",
)
self.connection["delete_url"] = "%s/%s" % (
self.connection["jss"].base_url,
"casperAdminSave.jxml",
)
def copy_pkg(self, filename, id_=-1):
"""Copy a package to the distribution server.
Bundle-style packages must be zipped prior to copying.
Args:
filename: Full path to file to upload.
id_: ID of Package object to associate with, or -1 for new
packages (default).
"""
self._copy(filename, id_=id_, file_type=PKG_FILE_TYPE)
def _copy(self, filename, id_=-1, file_type=0):
"""Upload a file to the distribution server. 10.2 and earlier
Directories/bundle-style packages must be zipped prior to
copying.
"""
if os.path.isdir(filename):
raise TypeError(
"Distribution Server type repos do not permit directory "
"uploads. You are probably trying to upload a non-flat "
"package. Please zip or create a flat package."
)
basefname = os.path.basename(filename)
resource = open(filename, "rb")
headers = {
"DESTINATION": self.destination,
"OBJECT_ID": str(id_),
"FILE_TYPE": file_type,
"FILE_NAME": basefname,
}
response = self.connection["jss"].session.post(
url=self.connection["upload_url"], data=resource, headers=headers
)
if self.connection["jss"].verbose:
print(response)
def _copy_new(self, filename, id_=-1, file_type=0):
"""Upload a file to the distribution server.
Directories/bundle-style packages must be zipped prior to
copying.
"""
if os.path.isdir(filename):
raise TypeError(
"Distribution Server type repos do not permit directory "
"uploads. You are probably trying to upload a non-flat "
"package. Please zip or create a flat package."
)
basefname = os.path.basename(filename)
resource = open(filename, "rb")
headers = {
"sessionIdentifier": "com.jamfsoftware.jss.objects.packages.Package:%s"
% str(id_),
"fileIdentifier": "FIELD_FILE_NAME_FOR_DIST_POINTS",
}
response = self.connection["jss"].session.post(
url=self.connection["upload_url"], data=resource, headers=headers
)
print(response)
if self.connection["jss"].verbose:
print(response)
def delete_with_casper_admin_save(self, pkg):
"""Delete a pkg from the distribution server.
Args:
pkg: Can be a jss.Package object, an int ID of a package, or
a filename.
"""
# The POST needs the package ID.
if pkg.__class__.__name__ == "Package":
package_to_delete = pkg.id
elif isinstance(pkg, int):
package_to_delete = pkg
elif isinstance(pkg, str):
package_to_delete = self.connection["jss"].Package(pkg).id
else:
raise TypeError
data_dict = {
"username": self.connection["jss"].user,
"password": self.connection["jss"].password,
"deletedPackageID": package_to_delete,
}
self.connection["jss"].session.post(
url=self.connection["delete_url"], data=data_dict
)
# There's no response if it works.
def delete(self, filename):
"""Delete a package distribution server.
This method simply finds the Package object from the database
with the API GET call and then deletes it. This will remove the
file from the database blob.
For setups which have file share distribution points, you will
need to delete the files on the shares also.
Args:
filename: Filename (no path) to delete.
"""
if is_package(filename):
self.connection["jss"].Package(filename).delete()
def exists(self, filename):
"""Check for the existence of a package.
Unlike other DistributionPoint types, JDS and CDP types have no
documented interface for checking whether the server and its
children have a complete copy of a file. The best we can do is
check for an object using the API /packages URL--JSS.Package()
and look for matches on the filename.
If this is not enough, please use the alternate
exists_with_casper method. For example, it's possible to create
a Package object but never upload a package file, and this
method will still return "True".
Also, this may be slow, as it needs to retrieve the complete
list of packages from the server.
"""
# Technically, the results of the casper.jxml page list the
# package files on the server. This is an undocumented
# interface, however.
result = False
if is_package(filename):
packages = self.connection["jss"].Package().retrieve_all()
for package in packages:
if package.findtext("filename") == filename:
result = True
break
return result
def exists_using_casper(self, filename):
"""Check for the existence of a package file.
Unlike other DistributionPoint types, JDS and CDP types have no
documented interface for checking whether the server and its
children have a complete copy of a file. The best we can do is
check for an object using the API /packages URL--JSS.Package()
and look for matches on the filename.
If this is not enough, this method uses the results of the
casper.jxml page to determine if a package exists. This is an
undocumented feature and as such should probably not be relied
upon.
It will test for whether the file exists on ALL configured
distribution servers. This may register False if the JDS is busy
syncing them.
"""
casper_results = casper.Casper(self.connection["jss"])
distribution_servers = casper_results.find("distributionservers")
# Step one: Build a list of sets of all package names.
all_packages = []
for distribution_server in distribution_servers:
packages = set()
for package in distribution_server.findall("packages/package"):
packages.add(os.path.basename(package.find("fileURL").text))
all_packages.append(packages)
# Step two: Intersect the sets.
base_set = all_packages.pop()
for packages in all_packages:
base_set = base_set.intersection(packages)
# Step three: Check for membership.
return filename in base_set
class JDS(DistributionServer):
"""Class for representing a JDS and its controlling JSS.
The JSS has a folder to which packages are uploaded. From there, the
JSS handles the distribution to its Cloud and JDS points.
This class should be considered experimental!
- There are caveats to its .exists() method
- It is unclear at the moment what the interaction is in systems
that have both a JDS and a CDP, especially depending on which is the
master.
"""
required_attrs = {"jss"}
destination = "1"
class CDP(DistributionServer):
"""Class for representing a CDP and its controlling JSS.
The JSS has a folder to which packages are uploaded. From there, the
JSS handles the distribution to its Cloud and JDS points.
This class should be considered experimental!
- There are caveats to its .exists() method
- It is unclear at the moment what the interaction is in systems
that have both a JDS and a CDP, especially depending on which is the
master.
"""
required_attrs = {"jss"}
destination = "2"
class CloudDistributionServer(Repository):
"""Abstract class for representing JCDS type repos.
"""
def package_index_using_casper(self):
"""Get a list of packages on the JCDS
Similar to JDS and CDP, JCDS types have no
documented interface for checking whether the server and its
children have a complete copy of a file. The best we can do is
check for an object using the API /packages URL--JSS.Package()
and look for matches on the filename.
If this is not enough, this method uses the results of the
casper.jxml page to determine if a package exists. This is an
undocumented feature and as such should probably not be relied
upon.
It will test for whether the file exists on only cloud distribution points.
"""
casper_results = casper.Casper(self.connection["jss"])
cloud_distribution_points = casper_results.find("cloudDistributionPoints")
# Step one: Build a list of sets of all package names.
all_packages = []
for distribution_point in cloud_distribution_points:
if distribution_point.findtext("name") != "Jamf Cloud":
continue # type 4 might be reserved for JCDS?
for package in distribution_point.findall("packages/package"):
package_obj = casper_results.find(
"./packages/package[id='%s']" % (package.findtext("id"),)
)
all_packages.append(
{
"id": package.findtext("id"),
"checksum": package.findtext("checksum"),
"size": package.findtext("size"),
"lastModified": package.findtext("lastModified"),
"fileURL": unquote(package.findtext("fileURL")),
"name": package_obj.findtext("name"),
"filename": package_obj.findtext("filename"),
}
)
return all_packages
def _jcds_upload_chunk(
filename, base_url, upload_token, chunk_index, chunk_size, total_chunks
):
"""Upload a single chunk of a file to JCDS.
Args:
filename (str): The full path to the file being uploaded.
base_url (str): The JCDS base URL which includes the regional hostname and the tenant id.
upload_token (str): The upload token, scraped from legacy/packages.html
chunk_index (int): The zero-based index of the chunk being uploaded.
total_chunks (int): The total count of chunks to upload
Returns:
dict: JSON Response from JCDS
"""
print("Working on Chunk [{}/{}]".format(chunk_index + 1, total_chunks))
resource = open(filename, "rb")
resource.seek(chunk_index * chunk_size)
chunk_data = resource.read(chunk_size)
basefname = os.path.basename(filename)
chunk_url = "{}/{}/part?chunk={}&chunks={}".format(
base_url, basefname, chunk_index, total_chunks
)
chunk_reader = io.BytesIO(chunk_data)
headers = {"X-Auth-Token": upload_token}
response = requests.post(
url=chunk_url, headers=headers, files={"file": chunk_reader},
)
return response.json()
# Semaphore controlling max workers for chunked uploads
jcds_semaphore = threading.BoundedSemaphore(value=3)
class JCDSChunkUploadThread(threading.Thread):
def __init__(self, *args, **kwargs):
self.filename = kwargs["filename"]
self.base_url = kwargs["base_url"]
self.upload_token = kwargs["upload_token"]
self.chunk_index = kwargs["chunk_index"]
self.chunk_size = kwargs["chunk_size"]
self.total_chunks = kwargs["total_chunks"]
super_kwargs = dict(kwargs)
del super_kwargs["filename"]
del super_kwargs["base_url"]
del super_kwargs["upload_token"]
del super_kwargs["chunk_index"]
del super_kwargs["chunk_size"]
del super_kwargs["total_chunks"]
super(JCDSChunkUploadThread, self).__init__(*args, **super_kwargs)
def run(self):
jcds_semaphore.acquire()
try:
print(
"Working on Chunk [{}/{}]".format(
self.chunk_index + 1, self.total_chunks
)
)
resource = open(self.filename, "rb")
resource.seek(self.chunk_index * self.chunk_size)
chunk_data = resource.read(self.chunk_size)
basefname = os.path.basename(self.filename)
chunk_url = "{}/{}/part?chunk={}&chunks={}".format(
self.base_url, basefname, self.chunk_index, self.total_chunks
)
chunk_reader = io.BytesIO(chunk_data)
headers = {"X-Auth-Token": self.upload_token}
response = requests.post(
url=chunk_url, headers=headers, files={"file": chunk_reader},
)
return response.json()
except:
pass
finally:
jcds_semaphore.release()
class AWS(CloudDistributionServer, abstract.AbstractRepository):
"""Class for representing an AWS Cloud Distribution Point and its controlling JSS.
"""
required_attrs = {"jss", "bucket"}
def __init__(self, **connection_args):
"""Set up a connection to an AWS S3 bucket.
It is more secure to use the following environment variables provided by boto:
AWS_ACCESS_KEY_ID - The access key id to the jamf bucket
AWS_SECRET_ACCESS_KEY - The secret access key to the jamf bucket
You may also use the file ~/.boto as described in the boto documentation.
Args:
connection_args: Dict, with required keys:
jss: A JSS Object.
bucket: Name of the JAMF bucket.
aws_access_key_id (optional): The access key id
secret_access_key (optional): The secret access key, use environment instead.
host (optional): A bucket host. Seems to be needed if your bucket is not in the default location
eg. southeast asia ap 2
chunk_size (optional): The chunk size for large objects >50mb
Throws:
S3ResponseError if the bucket does not exist
"""
super(AWS, self).__init__(**connection_args)
self.s3 = S3Connection(
aws_access_key_id=connection_args.get("aws_access_key_id", None),
aws_secret_access_key=connection_args.get("aws_secret_access_key", None),
host=connection_args.get("host", boto.s3.connection.NoHostProvided),
)
try:
self.bucket = self.s3.get_bucket(connection_args["bucket"])
except S3ResponseError as e:
raise JSSError(
"got error getting bucket, may not exist: {}".format(
connection_args["bucket"]
)
)
self.connection["url"] = self.bucket
self.chunk_size = connection_args.get("chunk_size", 52428800) # 50 mb default
def _build_url(self):
"""Build a connection URL."""
pass
def copy_pkg(self, filename, id_=-1):
"""Copy a package to the repo's Package subdirectory.
Args:
filename: Path for file to copy.
id_: Unused
"""
self._copy(filename, id_=id_)
def _copy(self, filename, id_=-1): # type: (str, int) -> None
"""Copy a file or folder to the bucket.
Does not yet support chunking.
Args:
filename: Path to copy.
destination: Remote path to copy file to.
"""
bucket_key = os.path.basename(filename)
exists = self.bucket.get_key(bucket_key)
if exists:
print("Already exists")
else:
k = Key(self.bucket)
k.key = bucket_key
k.set_metadata("jamf-package-id", id_)
k.set_contents_from_filename(filename)
def delete(self, filename): # type: (str) -> None
bucket_key = os.path.basename(filename)
self.bucket.delete_key(bucket_key)
def exists(self, filename): # type: (str) -> bool
"""Check whether a package already exists by checking for a bucket item with the same filename.
Args:
filename: full path to filename. Only the name itself will be checked.
Returns:
True if the package exists, else false
"""
k = self.bucket.get_key(os.path.basename(filename))
return k is not None
class JCDS(CloudDistributionServer):
"""Class for representing a JCDS and its controlling jamfcloud JSS.
The JSS allows direct upload to the JCDS by exposing the access token from the package upload page.
This class should be considered experimental!
"""
required_attrs = {"jss"}
destination = "3"
workers = 3
chunk_size = 1048768
def __init__(self, **connection_args):
"""Set up a connection to a distribution server.
Args:
connection_args (dict):
jss (JSS): The associated JAMF Pro Server instance
"""
super(JCDS, self).__init__(**connection_args)
self.connection["url"] = "JCDS"
def _scrape_tokens(self):
"""Scrape JCDS upload URL and upload access token from the jamfcloud instance."""
jss = self.connection["jss"]
response = jss.scrape("legacy/packages.html?id=-1&o=c")
matches = re.search(
r'data-base-url="([^"]*)"', response.content.decode("utf-8")
)
if matches is None:
raise JSSError(
"Did not find the JCDS base URL on the packages page. Is this actually Jamfcloud?"
)
jcds_base_url = matches.group(1)
matches = re.search(
r'data-upload-token="([^"]*)"', response.content.decode("utf-8")
)
if matches is None:
raise JSSError(
"Did not find the JCDS upload token on the packages page. Is this actually Jamfcloud?"
)
jcds_upload_token = matches.group(1)
h = HTMLParser()
jcds_base_url = h.unescape(jcds_base_url)
self.connection["jcds_base_url"] = jcds_base_url
self.connection["jcds_upload_token"] = jcds_upload_token
self.connection[
"url"
] = jcds_base_url # This is to make JSSImporter happy because it accesses .connection
def _build_url(self):
"""Build a connection URL."""
pass
def copy_pkg(self, filename, id_=-1):
"""Copy a package to the JAMF Cloud distribution server.
Bundle-style packages must be zipped prior to copying.
Args:
filename: Full path to file to upload.
id_: ID of Package object to associate with, or -1 for new
packages (default).
"""
self._copy(filename, id_=id_)
def _build_chunk_url(self, filename, chunk, chunk_total):
"""Build the path to the chunk being uploaded to the JCDS."""
return "{}/{}/part?chunk={}&chunks={}".format(
self.connection["jcds_base_url"], filename, chunk, chunk_total
)
def _copy_multiprocess(self, filename, upload_token, id_=-1):
"""Upload a file to the distribution server using multiple processes to upload several chunks in parallel.
Directories/bundle-style packages must be zipped prior to copying.
"""
fsize = os.stat(filename).st_size
total_chunks = int(math.ceil(fsize / JCDS.chunk_size))
p = multiprocessing.Pool(3)
def _chunk_args(chunk_index):
return [
filename,
self.connection["jcds_base_url"],
upload_token,
chunk_index,
JCDS.chunk_size,
total_chunks,
]
for chunk in xrange(0, total_chunks):
res = p.apply_async(_jcds_upload_chunk, _chunk_args(chunk))
data = res.get(timeout=10)
print(
"id: {0}, version: {1}, size: {2}, filename: {3}, lastModified: {4}, created: {5}".format(
data["id"],
data["version"],
data["size"],
data["filename"],
data["lastModified"],
data["created"],
)
)
def _copy_threaded(self, filename, upload_token, id_=-1):
"""Upload a file to the distribution server using multiple threads to upload several chunks in parallel."""
fsize = os.stat(filename).st_size
total_chunks = int(math.ceil(fsize / JCDS.chunk_size))
for chunk in xrange(0, total_chunks):
t = JCDSChunkUploadThread(
filename=filename,
base_url=self.connection["jcds_base_url"],
upload_token=upload_token,
chunk_index=chunk,
chunk_size=JCDS.chunk_size,
total_chunks=total_chunks,
)
t.start()
def _copy_sequential(self, filename, upload_token, id_=-1):
"""Upload a file to the distribution server using the same process as python-jss.
Directories/bundle-style packages must be zipped prior to copying.
"""
fsize = os.stat(filename).st_size
total_chunks = int(math.ceil(fsize / JCDS.chunk_size))
basefname = os.path.basename(filename)
resource = open(filename, "rb")
headers = {
"X-Auth-Token": self.connection["jcds_upload_token"],
# "Content-Type": "application/octet-steam",
}
for chunk in xrange(0, total_chunks):
resource.seek(chunk * JCDS.chunk_size)
chunk_data = resource.read(JCDS.chunk_size)
chunk_reader = io.BytesIO(chunk_data)
chunk_url = self._build_chunk_url(basefname, chunk, total_chunks)
response = self.connection["jss"].session.post(
url=chunk_url, headers=headers, files={"file": chunk_reader},
)
if self.connection["jss"].verbose:
print(response.json())
resource.close()
def _copy(self, filename, id_=-1, file_type=0):
"""Upload a file to the distribution server. 10.2 and earlier
Directories/bundle-style packages must be zipped prior to
copying.
JCDS returns a JSON structure like this::
{
u'status': u'PENDING',
u'created': u'2018-07-10T03:21:17.000Z',
u'lastModified': u'2018-07-11T03:55:32.000Z',
u'filename': u'SkypeForBusinessInstaller-16.18.0.51.pkg',
u'version': 6,
u'md5': u'',
u'sha512': u'',
u'id': u'3a7e6a7479fc4000bf53a9693d906b11',
u'size': 35658112
}
"""
if os.path.isdir(filename):
raise TypeError(
"JCDS Server type repos do not permit directory "
"uploads. You are probably trying to upload a non-flat "
"package. Please zip or create a flat package."
)
if "jcds_upload_token" not in self.connection:
self._scrape_tokens()
self._copy_threaded(filename, self.connection["jcds_upload_token"])
# if False:
# self._copy_sequential(filename, self.connection['jcds_upload_token'])
# else:
# self._copy_threaded(filename, self.connection['jcds_upload_token'])
def exists(self, filename):
"""Check whether a package file already exists."""
packages = self.package_index_using_casper()
for p in packages:
url, token = p["fileURL"].split("?", 2)
urlparts = url.split("/")
if urlparts[-1] == filename:
return True
return False
def __repr__(self):
"""Return string representation of connection arguments."""
output = [
"JAMF Cloud Distribution Server: %s" % self.connection["jss"].base_url
]
output.append("Type: %s" % type(self))
output.append("Connection Information:")
for key, val in self.connection.items():
output.append("\t%s: %s" % (key, val))
return "\n".join(output) + "\n"
|
God sees, he knows and he cares.
I wait for you Lord God. While l wait, I follow you. I see my own heart is bent and broken and sinful. Forgive me Lord and help me to serve you by loving and caring for those you put in my path today. Even if they hate me Lord you love them. Maybe they just haven’t heard how much you love them. Help me share the story of your love for this lost world of which I am a sojourner. |
import logging
from csirtg_indicator import Indicator
from pprint import pprint
from cif.utils import resolve_ns
import arrow
CONFIDENCE = 9
PROVIDER = 'spamhaus.org'
CODES = {
'127.0.1.2': {
'tags': 'suspicious',
'description': 'spammed domain',
},
'127.0.1.3': {
'tags': 'suspicious',
'description': 'spammed redirector / url shortener',
},
'127.0.1.4': {
'tags': 'phishing',
'description': 'phishing domain',
},
'127.0.1.5': {
'tags': 'malware',
'description': 'malware domain',
},
'127.0.1.6': {
'tags': 'botnet',
'description': 'Botnet C&C domain',
},
'127.0.1.102': {
'tags': 'suspicious',
'description': 'abused legit spam',
},
'127.0.1.103': {
'tags': 'suspicious',
'description': 'abused legit spammed redirector',
},
'127.0.1.104': {
'tags': 'phishing',
'description': 'abused legit phish',
},
'127.0.1.105': {
'tags': 'malware',
'description': 'abused legit malware',
},
'127.0.1.106': {
'tags': 'botnet',
'description': 'abused legit botnet',
},
'127.0.1.255': {
'description': 'BANNED',
},
}
class SpamhausFqdn(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.is_advanced = True
def _resolve(self, data):
data = '{}.dbl.spamhaus.org'.format(data)
data = resolve_ns(data)
if data and data[0]:
return data[0]
def process(self, i, router):
if 'search' in i.tags:
return
if i.itype == 'fqdn' and i.provider != 'spamhaus.org':
try:
r = self._resolve(i.indicator)
try:
r = CODES.get(str(r), None)
except Exception as e:
# https://www.spamhaus.org/faq/section/DNSBL%20Usage
self.logger.error(e)
self.logger.info('check spamhaus return codes')
r = None
if r:
confidence = CONFIDENCE
if ' legit ' in r['description']:
confidence = 6
f = Indicator(**i.__dict__())
f.tags = [r['tags']]
if 'hunter' not in f.tags:
f.tags.append('hunter')
f.description = r['description']
f.confidence = confidence
f.provider = PROVIDER
f.reference_tlp = 'white'
f.reference = 'http://www.spamhaus.org/query/dbl?domain={}'.format(f.indicator)
f.lasttime = f.reporttime = arrow.utcnow()
x = router.indicators_create(f)
self.logger.debug('Spamhaus FQDN: {}'.format(x))
except KeyError as e:
self.logger.error(e)
except Exception as e:
self.logger.error('[Hunter: SpamhausFqdn] {}: giving up on indicator {}'.format(e, i))
Plugin = SpamhausFqdn
|
PicsArt Photo Studio v11.7.4 Unlocked APK is Here!
Creativity is more than just a photo filter—PicsArt is your best choice for making amazing photo edits and photo collages, capturing images with the camera, creating digital drawings, and communicating with a global community of creative people who have joined our mission to beautify the world.
250 million people have already downloaded this free app for powerful image editing and photo montages, using hundreds of tools, filters, and effects that you can find only in professional photo editing programs. PicsArt’s photo editor, collage maker, and drawing tools are the best way to keep your inspiration flowing and allow you to create beautiful images anytime, anywhere. Transform your photos into works of art and let the world discover them!
An easy-to-use photo editor, providing a wide array of photo manipulation instruments such as a clone tool, crop tool, photo blending and enhancement tool, photo filters, live camera, and hundreds of customizable brush filters, masks, shape masks, text tool, and more.
Instant sharing on PicsArt, Facebook, Twitter, Instagram, Dropbox, or via email.
A collage maker to create photo collages in grids, collage frames, free-form, or with photos in the background. It is easy to make collages and share in FB and Instagram.
A photo camera, with multiple live filters.
Trending this month: Get festive with Holiday clip art and Christmas clip art.
Winter is coming! We’ve got you covered with Winter clip art too. It will transform your creations into a winter wonderland.
Make Christmas collages using Christmas clip art and Christmas emojis. Decorate your photos like you would a Christmas tree!
See how the world celebrates with holiday photo and editing challenges.
Discover pictures you love and learn to make them.
We release new tutorials every week. We got you!
0 Response to "PicsArt Photo Studio v11.7.4 Unlocked APK is Here!" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import jinja2
import yaml
import server
def append_path(filename, file_path):
''' If a filename starts with './' then it is relative to the config.yml path.
'''
if filename.startswith('./'):
filename = os.path.join(file_path, filename)
else:
filename = os.path.join(SDK_PATH, filename)
return filename
def load_config(config_filename):
''' Get the config dictionary from the file 'config.yml' '''
with open(config_filename) as f:
config = yaml.safe_load(f)
return config
def parse_brackets(string):
''' ex: 'pwm', '4' = parse_brackets('pwm[4]') '''
start, end = map(lambda char : string.find(char), ('[', ']'))
if start >= 0 and end >= 0:
return string[0 : start], string[start + 1 : end]
else:
return string, '1'
def read_parameters(string, parameters):
string, parameter = parse_brackets(string)
if parameter.isdigit():
parameter = int(parameter)
else:
assert parameter in parameters
parameter = parameters[parameter]
return string, parameter
def build_memory(memory, parameters):
for address in memory:
address['name'], address['n_blocks'] = read_parameters(address['name'], parameters)
assert (address['n_blocks'] > 0)
# Protection
if not 'protection' in address:
address['prot_flag'] = 'PROT_READ|PROT_WRITE'
elif address['protection'] == 'read':
address['prot_flag'] = 'PROT_READ'
elif address['protection'] == 'write':
address['prot_flag'] = 'PROT_WRITE'
return memory
def build_registers(registers, parameters):
new_registers = []
for register in registers:
register, parameter = read_parameters(register, parameters)
if parameter == 1:
new_registers.append(register)
else:
for i in range(parameter):
new_registers.append(register+str(i))
registers = new_registers
return registers
def append_memory_to_config(config):
parameters = config.get('parameters', {})
config['memory'] = build_memory(config.get('memory', {}), parameters)
config['control_registers'] = build_registers(config.get('control_registers', {}), parameters)
config['ps_control_registers'] = build_registers(config.get('ps_control_registers', {}), parameters)
config['status_registers'] = build_registers(config.get('status_registers', {}), parameters)
config['ps_status_registers'] = build_registers(config.get('ps_status_registers', {}), parameters)
return config
def build_json(dict):
dict_json = json.dumps(dict, separators=(',', ':')).replace('"', '\\"')
return dict_json
def dump_if_changed(filename, new_dict):
changed = False
if os.path.isfile(filename):
with open(filename, 'r') as yml_file:
old_dict = yaml.safe_load(yml_file)
if old_dict != new_dict:
changed = True
if not os.path.isfile(filename) or changed:
with open(filename, 'w') as yml_file:
yaml.dump(new_dict, yml_file)
#########################
# Jinja2 template engine
#########################
def get_renderer():
renderer = jinja2.Environment(
block_start_string = '{%',
block_end_string = '%}',
variable_start_string = '{{',
variable_end_string = '}}',
loader = jinja2.FileSystemLoader([os.path.join(SDK_PATH, 'fpga'), os.path.join(SDK_PATH, 'server/templates')])
)
def quote(list_):
return ['"%s"' % element for element in list_]
def remove_extension(filename):
toks = filename.split('.')
return toks[0]
def replace_KMG(string):
return string.replace('K', '*1024').replace('M', '*1024*1024').replace('G', '*1024*1024*1024')
renderer.filters['quote'] = quote
renderer.filters['remove_extension'] = remove_extension
renderer.filters['replace_KMG'] = replace_KMG
return renderer
def fill_template(config, template_filename, output_filename):
template = get_renderer().get_template(template_filename)
with open(output_filename, 'w') as output:
output.write(template.render(config=config))
###################
# Main
###################
SDK_PATH = os.getenv('SDK_PATH', '')
if __name__ == "__main__":
cmd = sys.argv[1]
config_filename = sys.argv[2]
output_filename = sys.argv[3]
output_dirname = os.path.dirname(output_filename)
if not os.path.exists(output_dirname):
os.makedirs(output_dirname)
config = load_config(config_filename)
config_path = os.path.dirname(config_filename)
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding('utf-8')
if cmd == '--name':
with open(output_filename, 'w') as f:
f.write(config['name'])
elif cmd == '--memory_yml':
for field in ['drivers', 'web', 'cores', 'modules', 'name', 'board', 'version']:
config.pop(field, None)
dump_if_changed(output_filename, config)
elif cmd == '--config_tcl':
fill_template(append_memory_to_config(config), 'config.tcl', output_filename)
elif cmd == '--cores':
for module in config.get('modules', []):
module_path = os.path.dirname(module)
module = append_path(module, module_path)
module_config = load_config(module)
module_cores = module_config.get('cores')
if module_cores is not None:
config['cores'].extend(module_cores)
config['cores'] = list(set(config['cores']))
for i in range(len(config['cores'])):
config['cores'][i] = append_path(config['cores'][i], config_path)
with open(output_filename, 'w') as f:
f.write(' '.join(config.get('cores', [])))
elif cmd == '--board':
config['board'] = append_path(config['board'], config_path)
with open(output_filename, 'w') as f:
f.write(config['board'])
elif cmd == '--drivers':
for i, path in enumerate(config.get('drivers', [])):
config['drivers'][i] = append_path(path, config_path)
with open(output_filename, 'w') as f:
f.write(' '.join(config.get('drivers', [])))
elif cmd == '--xdc':
for i, path in enumerate(config.get('xdc', [])):
config['xdc'][i] = append_path(path, config_path)
with open(output_filename, 'w') as f:
f.write(' '.join(config.get('xdc', [])))
elif cmd == '--memory_hpp':
config = append_memory_to_config(config)
config['json'] = build_json(config)
fill_template(config, 'memory.hpp', output_filename)
elif cmd == '--render_template':
template_filename = sys.argv[4]
for i in range(len(config['drivers'])):
config['drivers'][i] = append_path(config['drivers'][i], config_path)
server.render_template(template_filename, output_filename, server.get_drivers(config['drivers']))
elif cmd == '--render_interface':
driver_filename_hpp = sys.argv[4]
id_ = server.get_driver_id(config['drivers'], driver_filename_hpp)
server.render_driver(server.get_driver(driver_filename_hpp, id_), output_filename)
elif cmd == '--web':
for i, path in enumerate(config.get('web', [])):
config['web'][i] = append_path(path, config_path)
with open(output_filename, 'w') as f:
f.write(' '.join(config.get('web', [])))
elif cmd == '--version':
config['version'] = config.get('version', '0.0.0')
with open(output_filename, 'w') as f:
f.write(config['version'])
else:
raise ValueError('Unknown command') |
Will you be Sure You Want to be able to Wear the Kilt for you to Work?
one Kilts Are Cool When compared with Pants.
While all of us think paréo are awesome from the style sense, we in fact mean heat range when we declare putting on one is cooler than wearing pants. If a person function outside or in a new hot, stuffy setting, the kilt will assistance you feel more leisurely through the day. Kilts enable your legs to feel the breeze enabling atmosphere circulate underneath these individuals. You’ll find that you perspiration fewer and just feel better practical in a new kilt, which can enhance your productivity.
2. Kilts Present Plenty of Storage.
When you need a lot of products and tools on the job, a work kilt is ideal for an individual. Kilts that are developed especially for work are usually packed with pockets for having little items and loops with regard to stashing tools like hammers and screw motorists. You will have everything that a person need to complete tasks right at your tips of the fingers, signifying less bending to help get into your current tool kit and less trips backwards and forwards to your work truck or maybe work bench.
a few. Jupe Are Durable.
Do the job put on crinoline are produced from sturdy fabric that will resist rips and cry. They can be every bit like rugged, in the event not even more sturdy, than high quality function pants and pants. Work crinoline are actually produced by hand, so that they give a level of top quality you only can’t get with get the job done pants that you buy to get.
4. Kilts Can Fixed Your Brand Apart.
In case you are self-employed or maybe own small businesses00, wearing a good kilt practical or obtaining your entire staff have on them can be some sort of branding option. You can easily be certainly certain that every person will remember this repair shop in the kilt or the local plumber throughout the kilt!
For just about all of there are many benefits, a function wear kilt has one disadvantage that you need to have to consider in advance of a person make the choice in order to wear one on typically the job–it leaves your thighs exposed. On a lot of task sites, this is simply not a issue, but if you currently have to wear special shielding calf put on, you may not be able for you to securely wear a kilt on the job. Men who possess to get upon their knees often during the day may want to invest within a couple of high topper to wear alongside along with their kilts as nicely. |
#!/usr/bin/python
#TODO: create a torrent file for reconstruction purposes!
import sys
import argparse
import os
import hashlib
from layer_block import BlockStore
output_dir = "/mnt/shared_volume/dskrs_field"
def input_check(arg):
#check if file exists
if os.path.isfile(arg) is False:
raise argparse.ArgumentTypeError('Input file does not exist')
return arg
def block_check(arg):
value = int(arg)
if value <= 0:
raise argparse.ArgumentTypeError('Block size should be a positive value')
if ((value % 4096) != 0):
raise argparse.ArgumentTypeError('Block size should be 4K aligned')
return value
def init_vars():
parser = argparse.ArgumentParser(description='dioskr input parameters.')
parser.add_argument('input_file', default='', type=input_check, help='input file path')
parser.add_argument('block_size', default='4096', type=block_check, help='size of block deduplication in bytes')
#parser.add_argument('output_dir', help='output directory')
args = parser.parse_args()
return args.input_file, args.block_size
if __name__ == "__main__":
print '=======dioskrS v0.1========'
input_file, block_size = init_vars()
block_eng = BlockStore(input_file, block_size, output_dir)
blocks_nr = 0
while True:
block = block_eng.get_sync()
blocks_nr += 1
hash_object = hashlib.sha512(block)
hex_dig = hash_object.hexdigest()
if block == "":
break
print 'Finished reading file! Number of %d blocks: %d' % (block_size, blocks_nr)
|
The time to enroll for coverage through the 2019 Health Insurance Marketplace is here. Open enrollment runs through Dec. 15, and it’s a critical time of year for Americans who depend on the marketplace (also known as the health insurance exchange) for healthcare coverage.
The enrollment process will be familiar to anyone who’s previously bought insurance through the exchange. But there are some important changes to the Affordable Care Act for 2019, including the end of the individual mandate that once required people to purchase insurance or face a fine. Depending on where you live, you could also see changes to the specifics of your plan’s coverage or the price of your premiums.
Whether you’re keeping your plan from last year or looking to enroll through the exchange for the first time, we’ve put together a guide to what you need to know about open enrollment.
The 2018 nationwide open enrollment period runs from Nov. 1 until Dec. 15, with insurance plans going into effect on Jan. 1, 2019. A handful of states have extended their deadlines, allowing residents to enroll after Dec. 15 for insurance plans that take effect as late as February or March.
The marketplace open enrollment window may coincide with some employers’ open enrollment periods for health insurance. If you have access to insurance through your job or through your spouse, check with your employer or your partner’s employer for enrollment dates, plan options and other details.
Do I qualify for coverage on the marketplace?
To be eligible for coverage through the marketplace, you must be a U.S. citizen or national who’s living in the U.S., isn’t currently incarcerated and isn’t already insured through your job, a spouse or a parent.
If you or your dependents qualify for Medicaid, Medicare or the Children’s Health Insurance Program (CHIP), you aren’t eligible for marketplace coverage.
One exception: If your employer charges more than 9.86 percent of your yearly household income for health insurance, you can choose to purchase insurance through the marketplace instead.
You can enroll for coverage online, over the phone, with in-person help from an assister, through an agent or broker, or by mail.
If you previously enrolled with the help of an in-person navigator, it might be more difficult to do that this year. The Trump administration cut funding for navigators to $10 million for 2019, down from $36.8 million the previous year and $63 million the year before that.
“Those living in rural areas will be the most affected by these cuts, as these programs may not have the budget to conduct outreach there,” said Rosemarie Day, founder and CEO of Day Health Strategies and previously the founding chief operating officer of the Massachusetts health exchange.
What if I don’t want health insurance? Will I have to pay a fine?
New this year, you can choose to forego coverage without facing a penalty. At the end of 2017, Congress passed a tax bill that repealed the individual mandate, which required individuals to buy health insurance. Up until now, you could be hit with a fine equal to 2.5 percent of your household income or $695 per person — whichever was higher — for opting out of health insurance.
Without the individual mandate, people who are generally healthy might be more likely to skip coverage, says Harry Nelson, founder of the healthcare law firm Nelson Hardiman and co-author of the book From Obamacare to Trumpcare: Why You Should Care. The repeal could also have an impact on people who remain insured.
Are premiums going up in 2019?
The average increase in premiums across the country is only around 3 percent, Day says, which is on par with inflation. But that average doesn’t account for differences across state lines.
In states where premiums are going up, the cost for insurance companies to treat sick enrollees is part of the equation.
What do I need to prepare before I enroll?
You’ll need to provide basic information about your household size, as well as employer and income information for every person in your household. Walk through this checklist from the Department of Health and Human Services to get ready for enrollment.
As you compare plans, you’ll see options in four categories: Bronze, Silver, Gold and Platinum. In general, Bronze plans have the lowest premiums, which increase as you progress to Silver, Gold and Platinum.
The tradeoff for plans with lower premiums is that they generally cover fewer medical expenses. Likewise, if you pay a higher premium for a higher-tier plan, you’ll usually receive better coverage for the procedures, prescriptions and services you need. But the categories aren’t indicators of the quality of care — they only represent the scope of each plan’s coverage.
Although Gold and Platinum plans come with higher monthly premiums, these plans are usually a good option for people who need frequent medical care or prescriptions. If you don’t expect to make regular visits to the doctor, you might be comfortable with a Bronze plan.
If you’re under 30, you may be eligible for a catastrophic plan, which comes with a low premium and a high deductible. Coverage doesn’t kick in until you meet the $7,900 deductible, so these plans are intended to provide coverage only in the case of an unexpected disaster.
EPO, HMO, POS or PPO: Which is right for me?
Some plans let you choose your own doctors, but others only cover services from a pool of in-network providers.
Exclusive provider organization (EPO) plans and health maintenance organization (HMO) plans only cover providers within your plan’s network. Point of service (POS) and preferred provider organization (PPO) plans cover visits to doctors whether or not they’re in the network, but in-network providers are still usually less expensive, and some plans may require a referral for out-of-network doctors.
If you have a favorite doctor or you want to confirm coverage for a specific procedure or prescription, read the plan’s details as you shop around to make sure you’ll be covered before you enroll.
Will my current plan’s coverage change in 2019?
The Affordable Care Act guarantees 10 essential health benefits, which include preventive services, prescriptions, pregnancy and childbirth care, and mental health services, among others.
But when the Trump administration released its final rule for ACA implementation earlier this year, it gave states new leeway to define their own “benchmark” plans.
The 10 essential health benefits are still safe, Day says, but states could take advantage of the leeway to cut back on coverage in certain areas. This new flexibility allowed Alabama to reduce the number of prescription drugs it would cover, while Illinois took the opposite approach, expanding its benchmark plan to cover a wider range of chronic pain treatments and mental health services, according to Modern Healthcare.
Do I qualify for a subsidy to help cover costs?
Your eligibility for a subsidy or premium tax credit depends on your income and the number of people in your household. You can use this calculator to see if you qualify for any savings. If you don’t qualify for savings, you may still be able to purchase a plan through the marketplace at full price.
How likely is a repeal-and-replace of the ACA?
When Donald Trump was elected president in 2016, talk of efforts to repeal and replace the ACA (also known as Obamacare) quickly picked up steam, but attempts from Republicans in Congress have thus far been unsuccessful.
Nevertheless, the administration has succeeded in dismantling key components of the ACA by eliminating the individual mandate and granting more flexibility to states. For now, Nelson doubts that a full repeal-and-replace effort will come to fruition.
If you miss the Dec. 15 deadline, you can still enroll if you qualify for a Special Enrollment Period. Certain life events, like getting married, losing coverage or having a baby, qualify you to enroll outside of the open enrollment period. This questionnaire will help you determine if you qualify.
Really helpful article! Thank you for sharing this information.
Thank you for the insights. The article does not state whether or not you will need to re-apply if you had health insurance through the marketplace in 2018.
If you had coverage and decide to keep the same plan, do you need to reapply?
Great intro article for open market health care coverage. Love the site. Love ZocDoc. This is great easy to digest information for someone who is interested in open market coverage.
Thanks so much for this! The exchange can feel overwhelming, and this article does a great job of laying out all the info I’ll need to make decisions. I’m feeling much more equipped now to tackle the process. |
#! /usr/bin/python
"""
Twython is a library for Python that wraps the Twitter API.
It aims to abstract away all the API endpoints, so that additions to the \
library and/or the Twitter API won't cause any overall problems.
Questions, comments? [email protected]
"""
__author__ = "Ryan McGrath <[email protected]>"
__version__ = "1.4.1"
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import urllib.parse
import http.client
import httplib2
import mimetypes
from email.generator import _make_boundary
import re
import oauth2 as oauth
# Twython maps keyword based arguments to Twitter API endpoints. The endpoints
# table is a file with a dictionary of every API endpoint that Twython supports.
from .twitter_endpoints import base_url, api_table
from urllib.error import HTTPError
import json as simplejson
class TwythonError(AttributeError):
"""
Generic error class, catch-all for most Twython issues.
Special cases are handled by APILimit and AuthError.
Note: To use these, the syntax has changed as of Twython 1.3. \
To catch these, you need to explicitly import them into your code,
e.g:
from twython import TwythonError, APILimit, AuthError
"""
def __init__(self, msg, error_code=None):
self.msg = msg
if error_code == 400:
raise APILimit(msg)
def __str__(self):
return repr(self.msg)
class APILimit(TwythonError):
"""
Raised when you've hit an API limit. Try to avoid these, read the API
docs if you're running into issues here, Twython does not concern \
itself with this matter beyond telling you that you've done goofed.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class AuthError(TwythonError):
"""
Raised when you try to access a protected resource and it fails due to \
some issue with your authentication.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Twython(object):
def __init__(self, twitter_token = None, twitter_secret = None,\
oauth_token = None, oauth_token_secret = None, headers=None):
"""setup(self, oauth_token = None, headers = None)
Instantiates an instance of Twython. Takes optional parameters for \
authentication and such (see below).
Parameters:
twitter_token - Given to you when you register your application\
with Twitter.
twitter_secret - Given to you when you register your \
application with Twitter.
oauth_token - If you've gone through the authentication process\
and have a token for this user,pass it in and \
it'll be used for all requests going forward.
oauth_token_secret - see oauth_token; it's the other half.
headers - User agent header, dictionary style aka \
{'User-Agent': 'Bert'}
** Note: versioning is not currently used by search.twitter \
functions; when Twitter moves their junk, it'll be supported.
"""
# Needed for hitting that there API.
self.request_token_url = 'http://twitter.com/oauth/request_token'
self.access_token_url = 'http://twitter.com/oauth/access_token'
self.authorize_url = 'http://twitter.com/oauth/authorize'
self.authenticate_url = 'http://twitter.com/oauth/authenticate'
self.twitter_token = twitter_token
self.twitter_secret = twitter_secret
self.oauth_token = oauth_token
self.oauth_secret = oauth_token_secret
# If there's headers, set them, otherwise be an embarassing parent for \
# their own good.
self.headers = headers
if self.headers is None:
self.headers = {'User-agent': 'Twython Python Twitter Library v1.3'}
consumer = None
token = None
if self.twitter_token is not None and self.twitter_secret is not None:
consumer = oauth.Consumer(self.twitter_token, self.twitter_secret)
if self.oauth_token is not None and self.oauth_secret is not None:
token = oauth.Token(oauth_token, oauth_token_secret)
# Filter down through the possibilities here - if they have a token, \
# if they're first stage, etc.
if consumer is not None and token is not None:
self.client = oauth.Client(consumer, token)
elif consumer is not None:
self.client = oauth.Client(consumer)
else:
# If they don't do authentication, but still want to request \
# unprotected resources, we need an opener.
self.client = httplib2.Http()
def __getattr__(self, api_call):
"""
The most magically awesome block of code you'll see in 2010.
Rather than list out 9 million damn methods for this API, \
we just keep a table (see above) of every API endpoint and their \
corresponding function id for this library. This pretty much gives
unlimited flexibility in API support - there's a slight chance of a\
performance hit here, but if this is going to be your bottleneck...\
well, don't use Python. ;P
For those who don't get what's going on here, Python classes have \
this great feature known as __getattr__().
It's called when an attribute that was called on an object doesn't \
seem to exist - since it doesn't exist,we can take over and find \
the API method in our table. We then return a function that \
downloads and parses what we're looking for, based on the keywords \
passed in.
I'll hate myself for saying this, but this is heavily inspired by \
Ruby's "method_missing".
"""
def get(self, **kwargs):
# Go through and replace any mustaches that are in our API url.
fn = api_table[api_call]
base = re.sub(
'\{\{(?P<m>[a-zA-Z_]+)\}\}',
lambda m: "%s" % kwargs.get(m.group(1), '1'),\
# The '1' here catches the API version. Slightly hilarious.
base_url + fn['url']
)
# Then open and load that shiiit, yo.
# TODO: check HTTP method and junk, handle errors/authentication
if fn['method'] == 'POST':
resp, content = self.client.request(base, fn['method'], \
urllib.parse.urlencode(dict([k, v.encode('utf-8')] \
for k, v in list(kwargs.items()))))
else:
url = base + "?" + "&".join(["%s=%s" %(key, value) \
for (key, value) in list(kwargs.items())])
resp, content = self.client.request(url, fn['method'])
return simplejson.loads(content.decode('utf-8'))
if api_call in api_table:
return get.__get__(self)
else:
raise TwythonError(api_call)
def get_authentication_tokens(self):
"""
get_auth_url(self)
Returns an authorization URL for a user to hit.
"""
resp, content = self.client.request(self.request_token_url, "GET")
if resp['status'] != '200':
raise AuthError("Seems something couldn't be verified with your \
OAuth junk. Error: %s, Message: %s" % (resp['status'], content))
request_tokens = dict(urllib.parse.parse_qsl(content))
request_tokens['auth_url'] = "%s?oauth_token=%s" % \
(self.authenticate_url, request_tokens['oauth_token'])
return request_tokens
def get_authorized_tokens(self):
"""
get_authorized_tokens
Returns authorized tokens after they go through the auth_url phase.
"""
resp, content = self.client.request(self.access_token_url, "GET")
return dict(urllib.parse.parse_qsl(content))
# --------------------------------------------------------------------------
# The following methods are all different in some manner or require special\
# attention with regards to the Twitter API.
# Because of this, we keep them separate from all the other endpoint \
# definitions - ideally this should be change-able,but it's not high on \
# the priority list at the moment.
# --------------------------------------------------------------------------
@staticmethod
def constructApiURL(base_url, params):
return base_url + "?" + "&".join(["%s=%s" %(Twython.unicode2utf8(key),\
urllib.parse.quote_plus(Twython.unicode2utf8(value))) \
for (key, value) in list(params.items())])
@staticmethod
def shortenURL(url_to_shorten, shortener = "http://is.gd/api.php", \
query = "longurl"):
"""shortenURL(url_to_shorten, shortener = "http://is.gd/api.php", \
query = "longurl")
Shortens url specified by url_to_shorten.
Parameters:
url_to_shorten - URL to shorten.
shortener - In case you want to use a url shortening service \
other than is.gd.
"""
try:
content = urllib.request.urlopen(shortener + "?" + \
urllib.parse.urlencode(\
{query: Twython.unicode2utf8(url_to_shorten)})).read()
return content
except HTTPError as e:
raise TwythonError("shortenURL() failed with a %s error code." % \
repr(e.code))
def bulkUserLookup(self, ids = None, screen_names = None, version = None):
""" bulkUserLookup(self, ids = None, screen_names = None, \
version = None)
A method to do bulk user lookups against the Twitter API. \
Arguments (ids (numbers) / screen_names (strings)) should be flat \
Arrays that contain their respective data sets.
Statuses for the users in question will be returned inline if \
they exist. Requires authentication!
"""
apiURL = "http://api.twitter.com/1/users/lookup.json?lol=1"
if ids is not None:
apiURL += "&user_id="
for id in ids:
apiURL += repr(id) + ","
if screen_names is not None:
apiURL += "&screen_name="
for name in screen_names:
apiURL += name + ","
try:
resp, content = self.client.request(apiURL, "GET")
return simplejson.loads(content)
except HTTPError as e:
raise TwythonError("bulkUserLookup() failed with a %s error code." \
% repr(e.code), e.code)
def search(self, **kwargs):
"""search(search_query, **kwargs)
Returns tweets that match a specified query.
Parameters:
See the documentation at http://dev.twitter.com/doc/get/search.\
Pass in the API supported arguments as named parameters.
e.g x.search(q = "python")
"""
searchURL = Twython.constructApiURL(\
"http://search.twitter.com/search.json", kwargs)
try:
resp, content = self.client.request(searchURL, "GET")
return simplejson.loads(content)
except HTTPError as e:
raise TwythonError("getSearchTimeline() failed with a %s error \
code." % repr(e.code), e.code)
def searchTwitter(self, **kwargs):
"""use search(search_query, **kwargs)
searchTwitter(q = "python", page = "2")"""
return search(self, **kwargs)
def searchGen(self, search_query, **kwargs):
"""searchGen(search_query, **kwargs)
Returns a generator of tweets that match a specified query.
Parameters:
See the documentation at http://dev.twitter.com/doc/get/search.\
Pass in the API supported arguments as named parameters.
e.g x.search(search_query="python", page="2")
"""
searchURL = Twython.constructApiURL(\
"http://search.twitter.com/search.json?q=%s" % Twython.unicode2utf8(\
search_query), kwargs)
try:
resp, content = self.client.request(searchURL, "GET")
data = simplejson.loads(content)
except HTTPError as e:
raise TwythonError("searchTwitterGen() failed with a %s error \
code." % repr(e.code), e.code)
if not data['results']:
raise StopIteration
for tweet in data['results']:
yield tweet
if 'page' not in kwargs:
kwargs['page'] = '2'
else:
try:
kwargs['page'] = int(kwargs['page'])
kwargs['page'] += 1
kwargs['page'] = str(kwargs['page'])
except TypeError:
raise TwythonError("searchGen() exited because page takes str")
except e:
raise TwythonError("searchGen() failed with %s error code" %\
repr(e.code), e.code)
for tweet in self.searchGen(search_query, **kwargs):
yield tweet
def isListMember(self, list_id, id, username, version = 1):
""" isListMember(self, list_id, id, version)
Check if a specified user (id) is a member of the list in question \
(list_id).
**Note: This method may not work for private/protected lists, \
unless you're authenticated and have access to those lists.
Parameters:
list_id - Required. The slug of the list to check against.
id - Required. The ID of the user being checked in the list.
username - User who owns the list you're checking against \
(username)
version (number) - Optional. API version to request.
Entire Twython class defaults to 1, but you can override on a
function-by-function or class basis - (version=2), etc.
"""
try:
resp, content = self.client.request("http://api.twitter.com/%d/%s/\
%s/members/%s.json" % (version, username, list_id, repr(id)))
return simplejson.loads(content)
except HTTPError as e:
raise TwythonError("isListMember() failed with a %d error code." % \
repr(e.code), e.code)
def isListSubscriber(self, username, list_id, id, version = 1):
""" isListSubscriber(self, list_id, id, version)
Check if a specified user (id) is a subscriber of the list in
question (list_id).
**Note: This method may not work for private/protected lists,
unless you're authenticated and have access to those lists.
Parameters:
list_id - Required. The slug of the list to check against.
id - Required. The ID of the user being checked in the list.
username - Required. The username of the owner of the list \
that you're seeing if someone is subscribed to.
version (number) - Optional. API version to request.\
Entire Twython class defaults to 1, but you can override on a \
function-by-function or class basis - (version=2), etc.
"""
try:
resp, content = "http://api.twitter.com/%d/%s/%s/following/%s.json"\
% (version, username, list_id, repr(id))
return simplejson.loads(content)
except HTTPError as e:
raise TwythonError("isListMember() failed with a %d error code." % \
repr(e.code) , e.code)
# The following methods are apart from the other Account methods, \
# because they rely on a whole multipart-data posting function set.
def updateProfileBackgroundImage(self, filename, tile="true", version = 1):
""" updateProfileBackgroundImage(filename, tile="true")
Updates the authenticating user's profile background image.
Parameters:
image - Required. Must be a valid GIF, JPG, or PNG image of \
less than 800 kilobytes in size. Images with width larger than \
2048 pixels will be forceably scaled down.
tile - Optional (defaults to true). If set to true the \
background image will be displayed tiled. The image will not \
be tiled otherwise.
** Note: It's sad, but when using this method, pass the tile \
value as a string, e.g tile="false"
version (number) - Optional. API version to request.
Entire Twython class defaults to 1, but you can override on a \
function-by-function or class basis - (version=2), etc.
"""
try:
files = [("image", filename, open(filename, 'rb').read())]
fields = []
content_type, body = Twython.encode_multipart_formdata(fields, \
files)
headers = {'Content-Type': content_type, 'Content-Length': \
str(len(body))}
r = urllib.request.Request("http://api.twitter.com/%d/account/\
update_profile_background_image.json?tile=%s" %\
(version, tile), body, headers)
return urllib.request.urlopen(r).read()
except HTTPError as e:
raise TwythonError("updateProfileBackgroundImage() \
failed with a %d error code." % repr(e.code), e.code)
def updateProfileImage(self, filename, version = 1):
""" updateProfileImage(filename)
Updates the authenticating user's profile image (avatar).
Parameters:
image - Required. Must be a valid GIF, JPG, or PNG image of \
less than 700 kilobytes in size. Images with width larger than \
500 pixels will be scaled down.
version (number) - Optional. API version to request. \
Entire Twython class defaults to 1, but you can override on a \
function-by-function or class basis - (version=2), etc.
"""
try:
files = [("image", filename, open(filename, 'rb').read())]
fields = []
content_type, body = Twython.encode_multipart_formdata(fields, \
files)
headers = {'Content-Type': content_type, 'Content-Length': \
str(len(body))}
r = urllib.request.Request("http://api.twitter.com/%d/account/\
update_profile_image.json" % version, body, headers)
return urllib.request.urlopen(r).read()
except HTTPError as e:
raise TwythonError("updateProfileImage() failed with a %d error \
code." % repr(e.code), e.code)
@staticmethod
def encode_multipart_formdata(fields, files):
BOUNDARY = _make_boundary()
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"'\
% (key, filename))
L.append('Content-Type: %s' % mimetypes.guess_type(filename)[0] or \
'application/octet-stream')
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
@staticmethod
def unicode2utf8(text):
try:
if isinstance(text, str):
text = text.encode('utf-8')
except:
pass
return text
|
Change In Address Format Letter is free HD wallpaper. This wallpaper was upload at April 18, 2019 upload by admin in .You can download it in your computer by clicking resolution image in Download by size:. Don't forget to rate and comment if you interest with this wallpaper. |
"""The serialization logic"""
import cPickle
import copy
class InvalidFile(Exception): """The file used to thaw an item was not a valid serialized file"""
class Bag(object):
"""Bag to hold properties"""
def __init__(self, **kw):
"""Initialise the bag"""
for name, value in kw.iteritems():
setattr(self, name, value)
# Types
class Int(int):
"""An int"""
class Float(float):
"""An float"""
class String(str):
"""A str"""
class List(list):
"""A list"""
class Dict(dict):
"""A dict"""
class Bool(int):
"""A boolean"""
class Obj(object):
"""An object"""
def initType(item, name, description=None):
"""Initialize the type"""
item.name = name
item.description = description if description else name
def I(name, value, description=None):
v = Int(value)
initType(v, name, description)
return v
def F(name, value, description=None):
v = Float(value)
initType(v, name, description)
return v
def S(name, value, description=None):
v = String(value)
initType(v, name, description)
return v
def L(name, value, description=None):
v = List(value)
initType(v, name, description)
return v
def D(name, value, description=None):
v = Dict(value)
initType(v, name, description)
return v
def B(name, value, description=None):
v = Bool(value)
initType(v, name, description)
return v
def O(name, value, description=None):
v = Obj()
initType(v, name, description)
return v
class Serializable(object):
"""A mixing class to help serialize and deserialize objects"""
# This is where you put the properties that your object has
# This should be a list of tuples
# name, default value, type, description
my_properties = ()
@classmethod
def createInstance(cls):
"""Return an instance of the class with all default properties set"""
instance = cls()
instance.__setstate__()
return instance
@classmethod
def _getProperties(cls):
"""Get the properties all the way up the inheritance tree"""
props = dict([(obj.name, obj) for obj in cls.my_properties])
for the_cls in cls.__bases__:
if issubclass(the_cls, Serializable):
for key, value in the_cls._getProperties():
if key not in props:
props[key] = value
return props.iteritems()
def __getstate__(self):
"""Return the live properties suitable for pickling"""
values = []
for name, _ in self.__class__._getProperties():
values.append((name, getattr(self, name)))
return values
def __setstate__(self, state=None):
"""Initialize the object to the given state for unpickling"""
self.initial_properties = Bag()
#
# Initialize first from the defaults and then from the live state
for this_state in (self.__class__._getProperties(), state):
if this_state:
for name, value in this_state:
setattr(self, name, value)
setattr(self.initial_properties, name, value)
def asString(self):
"""Return the properties of this object as a string"""
return cPickle.dumps(self, protocol=2)
def toFile(self, filename):
"""Store this object in a file"""
with file(filename, 'wb') as f:
f.write(self.asString())
@classmethod
def fromString(cls, text):
"""Return a new instance from a string"""
obj = cPickle.loads(text)
obj.init()
return obj
@classmethod
def fromFile(cls, filename):
"""Return a new instance from a file"""
with file(filename, 'rb') as f:
try:
return cls.fromString(f.read())
except Exception, err:
raise InvalidFile('Failed to load data from file "%s": %s' % (filename, err))
def init(self):
"""Implement this method to do any object initialization after unpickling"""
pass
def copy(self):
"""Return another copy of this item"""
return self.fromString(self.asString())
class SerializedBag(object):
"""A bag that can be serialized"""
def __init__(self, **kw):
"""Initialise the bag"""
for name, value in kw.iteritems():
setattr(self, name, value)
def init(self):
"""Initialise - here to meet the Serialized protocol"""
pass
def copy(self):
"""Return a copy"""
return copy.deepcopy(self)
|
A new study, conducted by Dr. Jerry L. Hall, examines a technique that could create stem cells that could turn into nerve cells without the requirement for human fetuses.
The Johns Hopkins Volleyball team concluded another successful season this week in a disappointing fashion, losing 3-1 against Grove City in the first round of the ECAC South Women's Volleyball Championship. However, the team finished with its eighth consecutive winning season, compiling a 16-15 record for the year.
Lani Guinier, a celebrated Harvard University law professor and civil rights activist, detailed the degenerating quality of democracy in the modern United States and proposed several solutions to reform the institution in a speech last Thursday at Shriver Hall as part of the Milton S. Eisenhower Symposium.
Remember when you were little and falling asleep in your dark bedroom could be pure agony as you anticipated the arrival of the barrage of monsters from the shadowy recesses in the corners, under your bed, and of course, out of your closet? Somehow your parents never saw them, but you knew they were there and you knew they were scary. What you perhaps did not know was that the monsters were only doing their job.
Welcome, students of Johns Hopkins, to the dawn of the Information Age. Actually, it's less a dawn than the flicker of an almost - dead light bulb which sparks on for a second before burning out. Yes, once again we students have been handed, in typically-Hopkins fashion, an "improvement" that, when all is said and done, actually causes more headaches than it relieves.
Charles Cross starts his fourth rock 'n' roll book (and second on Nirvana), Heavier than Heaven: A Biography of Kurt Cobain, very much in medias res, a mere seven hours after the end of perhaps the most important day in alternative rock history, Jan. 11, 1992. Nevermind had just officially dethroned Michael Jackson for the number one album spot early that morning; backstage at NBC Studios, Weird Al Yankovic secured permission to parody "Smells Like Teen Spirit" just before Kurt Cobain's Nirvana became the first alternative band to be seen live by tens of millions of households across America that night.
Big Mac goes out in style - There's no mention of Derek, Tino or Paulie - shocking, huh?
When Mark McGwire made the announcement of his retirement this week, it was a perfectly fitting way to go out. Not in the sense that he was forced into it by mounting injuries just 16 home runs shy of 600, but fitting in that it was a simple, undramatized event.
Researchers from Johns Hopkins and the State University of New York at Binghamton have been examining seawater trapped inside million year old crystals, in order to study how the chemistry of oceans has changed from the Phanerozoic eon (540 million years ago) to the present.
Every November, for at least the last six, our campus community has been blessed by the color, pageantry and warmth of the Hindu New Year celebration called Diwali. This annual event has become a tradition for me as well as for my family to support and to attend. Over the years, we have enjoyed the music, the lights and the dancing, sometimes we have even joined in. We have also consumed more samosas than I care to divulge. Each time we are thrilled to be there and to once again have the opportunity to witness such an extraordinary thing. We, like many others who are not Hindu ourselves, are happily drawn to it and we count on it happening every year. This year was no different, with one exception. This year there was an air of relief and release about many of us who were in attendance on Nov. 3. We were relieved to be thinking of something other than Sept 11 and we were content to at long last participate, even in the smallest of ways in the remarkable act of releasing our joy. It is the latter that I wish to spend a little time with here.
The Johns Hopkins football team, its chances of winning a Conference championship having been extinguished, would not quit. Nor would the Blue Jays be intimidated by the conference leading, ninth ranked team in the nation, as the Jays stunned the Western Maryland Green Terror, beating them 21-14. The story of the game, senior wide receiver Zach Baylin caught 14 passes for 120 yards and all three of the Blue Jays touchdowns.
Johns Hopkins Men's Fencing kicked off their season perfectly by sweeping their first three opponents. They easily defeated Middle Atlantic Conference (MAC) opponents Virginia Tech, Yeshiva and St. Johns. These were their first conference matches since capturing the conference title last season.
It is never too late to learn something new. In fact, keeping the brain alert with new challenging activities can prevent mental aging. People have great fears of losing their mental capabilities during the aging process; however, instead of worrying about losing their brain, people should continually use it.
Who among us has not pondered purchasing something from an infomercial? Probably a lot, but nonetheless a well-executed late night infomercial can be both entertaining and thought provoking. When I am up late, watching these paid programs, I am forced to wonder, how much was that person's dignity worth to them, and do they still receive royalties for mortgaging it?
Baltimore has a Hispanic population estimated at 50,000 by some experts, and yet local healthcare providers cannot supply this large portion of the population with proper healthcare services due to language and cultural barriers.
Among the many under-utilized new resources in the Mattin Center is the Digital Media Center. It affords students the opportunity to use cutting-edge digital technology for both academic and nonacademic pursuits.
Waking Life is an original and ambitious film about ideas and thoughts rather than plot and characters. It is a semi-filmed, semi-computer animated feature that is so heavy with words it is absolutely impossible to grasp it all the first time around. It is a must-see for anyone who enjoys using his or her brain.
Implementing National Missile Defense (NMD) is sort of like buying a wrecked car and putting it up on blocks in your front yard: it doesn't work and it really pisses off the neighbors.
If the Johns Hopkins football team was angry about dropping a tough overtime loss to Dickinson the week before, they showed it by trouncing the Franklin & Marshall Diplomats 41-0. The Blue Jays' defense allowed only 138 yards in offense while the Jays' offense exploded for a season high 511 yards and five touchdowns.
The Johns Hopkins Volleyball team lost a tough semifinal match against Muhlenberg in the Centennial Conference championship tournament last Saturday. However, the Jays qualified as a selection in the ECAC Division III South Volleyball Championships for the third straight year. |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
from sqlalchemy.orm import exc
import quantum.db.api as db
import quantum.db.models as models
import ovs_models
def get_vlans():
session = db.get_session()
try:
bindings = session.query(ovs_models.VlanBinding).\
all()
except exc.NoResultFound:
return []
res = []
for x in bindings:
res.append((x.vlan_id, x.network_id))
return res
def add_vlan_binding(vlanid, netid):
session = db.get_session()
binding = ovs_models.VlanBinding(vlanid, netid)
session.add(binding)
session.flush()
return binding.vlan_id
def remove_vlan_binding(netid):
session = db.get_session()
try:
binding = session.query(ovs_models.VlanBinding).\
filter_by(network_id=netid).\
one()
session.delete(binding)
except exc.NoResultFound:
pass
session.flush()
|
Dealing with scalpers at Marlins Ballpark ?
Safety at Marlins Ballpark ?
Best giveaways at Marlins Ballpark ?
Artifacts to look for at Marlins Ballpark ?
Best insider tip for Marlins Ballpark ?
Traffic around Marlins Ballpark ?
Stores to hit up at Marlins Ballpark ?
Best food and drink at Marlins Ballpark ?
Things to check out around Marlins Ballpark ?
Directions to Marlins Ballpark ?
Best restaurants near Marlins Ballpark ? |
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.web import GOOGLE, YAHOO, BING, sort
# The pattern.web module includes an interesting sort() algorithm.
# Ir classifies search terms according to a search engine's total results count.
# When a context is defined, it sorts according to relevancy to the context:
# sort(terms=["black", "green", "red"], context="Darth Vader") =>
# yields "black" as the best candidate,
# because "black Darth Vader" yields more search results.
results = sort(
terms = [
"arnold schwarzenegger",
"chuck norris",
"dolph lundgren",
"steven seagal",
"sylvester stallone",
"mickey mouse",
],
context = "dangerous", # Term used for sorting.
service = BING, # GOOGLE, YAHOO, BING, ...
license = None, # You should supply your own API license key for the given service.
strict = True, # Wraps the query in quotes, i.e. 'mac sweet'.
reverse = True, # Reverses term and context: 'sweet mac' instead of 'mac sweet'.
cached = True)
for weight, term in results:
print "%5.2f" % (weight * 100) + "%", term |
A spokesman for Tillerson said he had not been planning to leave and wasn't sure why he was being ousted now.
President Donald Trump said Tuesday he is removing Rex Tillerson after a rocky tenure and nominating CIA Director Mike Pompeo in his place, a shake-up that appeared to take the secretary of state by surprise.
"I've worked with Mike Pompeo now for quite some time," Trump told reporters outside the White House. "Tremendous energy. Tremendous intellect. We're always on the same wavelength. The relationship has been very good, and that's what I need as secretary of state."
Trump said he and Tillerson did not see eye-to-eye on major foreign policy issues, naming as an example the Iran nuclear deal. The president said he thought it was a bad agreement, but he said Tillerson supported it.
"I wish Rex Tillerson well," the president said. "I'm really at a point where we're getting very close to having the Cabinet and other things that I want."
"I think Rex will be much happier now," he said.
Trump and Tillerson have been frequently at odds since the former ExxonMobil CEO took over at Foggy Bottom. But a spokesman for Tillerson said Tuesday he had not been planning to leave and wasn't sure why he was being ousted now.
"The secretary had every intention of remaining because of the tangible progress made on critical national security issues," said Steve Goldstein, under secretary of state for public affairs. "The secretary did not speak to the president this morning and is unaware of the reason, but he is grateful for the opportunity to serve, and still believes strongly that public service is a noble calling and not to be regretted."
Later on Tuesday, the State Department said Goldstein, too, would leave.
Tillerson, in farewell remarks from the State Department briefing room, said he would hand off his responsibilities to Deputy Secretary of State John Sullivan but remain officially in his post through the remainder of the month.
"What is most important is to ensure an orderly and smooth transition during a time that the country continues to face significant policy and national security challenges," a visibly emotional Tillerson, who did not take questions, told reporters.
Tillerson said he would encourage other State Department officials to remain in their jobs. He touted the administration's campaign to increase pressure on North Korea and warned that Russia's current trajectory is likely to lead to greater isolation on their part, a situation which is not in anyone's interests."
Tillerson's exit removes a source of frustration to Trump and elevates an official, Pompeo, who more closely shares his worldview. A senior White House official said Trump wanted Tillerson out so he could have his new team in place before upcoming talks with North Korea. The president agreed last week to sit down with North Korean leader Kim Jong Un. Tillerson had said just hours before Trump's announcement that such negotiations were a long way off.
“I don’t think they see eye-to-eye on anything,” said one senior administration official.
The strength of the secretary’s relationship with Trump has been a regular source of speculation. Tillerson reportedly referred to the president as a "moron" last year, and while the secretary held an impromptu press conference in the wake of that report, he did not outright deny having used the term.
Tillerson also went further than the president in condemning the Russian government for its alleged role in the attempted assassination of a former Russian spy in the United Kingdom earlier this month. Tillerson said Monday that the attempted murder “clearly came from Russia” and “certainly will trigger a response." White House spokeswoman Sarah Huckabee Sanders declined to explicitly blame Russia at her briefing that day, though Trump on Tuesday said, "It sounds to me like they believe it was Russia, and I would certainly take that finding as fact."
Trump has fired aides without warning before. Then-chief of staff Reince Priebus found out last summer that he had been replaced when Trump announced that John Kelly would be his new chief. Former FBI Director James Comey was alerted to his own firing last year by media reports, since he was not in Washington when a White House aide delivered Comey's termination letter to the bureau's Pennsylvania Avenue headquarters.
Pompeo's favored status in the West Wing for months led administration officials to view him as Tillerson's likely replacement. Trump was at one point expected to tap Sen. Tom Cotton (R-Ark.) to run the CIA, but the White House hesitated to put another Senate seat on the table ahead of the 2018 midterms, according to a person close to Cotton. The potential offer also became less attractive to the senator, who would have been giving up a safe Senate seat to run the CIA for just two and a half years if Trump loses reelection.
"I respect his intellect. I respect the process that we've all gone through together. We have a very good relationship, for whatever reason," Trump said of Pompeo. "I actually got along well with Rex, but really, it was a different mind set. It was a different thinking."
"I am deeply grateful to President Trump for permitting me to serve as Director of the Central Intelligence Agency and for this opportunity to serve as Secretary of State," Pompeo said in a statement. "His leadership has made America safer and I look forward to representing him and the American people to the rest of the world to further America's prosperity."
The Senate Foreign Relations Committee said Tuesday it expects to hold a hearing on Pompeo's nomination in April.
Sen. Bob Menendez (D-N.J.), the ranking member on the Senate Foreign Relations Committee, warned that instability at the State Department would create challenges for U.S. foreign policy, especially amid preparations for Trump's announced meeting with North Korea's leader.
"The problem is not the question of having talks," Menendez said. "The problem is the preparation that has to go into it in order to try to have a successful result in such talks."
Senate Armed Services Committee member Claire McCaskill (D-Mo.) cautioned that "being impulsive might work in a real estate deal. I'm not sure it works in foreign policy." She conceded though that she had "not thought about Pompeo seriously in the context of being secretary of state, and that's the work I need to do now."
Tillerson's ouster and the president's selection of Pompeo to replace him earned mixed reviews from Obama administration veterans who, as a group, have been widely critical of Tillerson's leadership at the State Department. Tommy Vietor, a former National Security Council spokesman under Obama who now co-hosts a popular anti-Trump podcast, wrote on Twitter that Tillerson was "probably the worst Secretary of State in modern history. He gutted and demoralized the department and delivered nothing for this country."
He cautioned that he fears "what comes next but am glad he's finally gone."
Inside Tillerson's State Department, rank-and-file opinion of the secretary was reportedly not much better. Current and former department officials said last November, amid a swirl of rumors that Tillerson might have been on his way out, that a shift to Pompeo would be welcomed in the hopes that his closer relationship to Trump might enliven the department's role in the government. Brett Bruen, a former State Department official, said Tuesday that “there is strong sense of relief at State. The last year has been traumatic to put it mildly."
Before being nominated by Trump to run the CIA, Pompeo was a three-term Republican congressman from Kansas. He has degrees from Harvard's law school and the U.S. Military Academy, from which he graduated first in his class.
Pompeo has previously expressed skepticism towards climate change science, remarking in 2013 that “there are scientists that think lots of different things about climate change. There’s some who think we’re warming, there’s some who think we’re cooling.” That stance has raised red flags among some climate advocates.
Gina Haspel, who Trump said would replace Pompeo and who was named the CIA’s deputy director last year, is an agency veteran who in 2002 ran one of its detention sites in Thailand, where she oversaw the torture of two terrorism suspects, according to a New York Times report. Tapes of those interrogations, which included waterboarding and smashing one suspect’s head into a wall, were ordered destroyed in 2005, the Times reported.
While the CIA’s tactics in the initial years of the war on terrorism have been widely controversial, Trump himself has expressed support for such techniques. On the 2016 campaign trail, Trump said he supported the use of waterboarding, a technique by which a subject is made to feel like they are drowning, and said he would resurrect its use. After his inauguration, the president said he would defer on the question of waterboarding to Pompeo and Defense Secretary James Mattis.
Eliana Johnson, Matthew Nussbaum, Emily Holden, Connor O'Brien and Daniel Lippman contributed to this report. |
from PIL import Image
import numpy as np
class Images(object):
RESIZE_METHODS = {
'bilinear': Image.BILINEAR,
'nearest': Image.NEAREST,
'lanczos': Image.LANCZOS,
'bicubic': Image.BICUBIC
}
NORMALIZATION_ZERO_ONE = 'zero_one'
NORMALIZATION_MINUS_PLUS_ONE = 'minus_plus_one'
NORMALIZATION_METHODS = [
NORMALIZATION_ZERO_ONE, NORMALIZATION_MINUS_PLUS_ONE
]
def load(self, filename):
"""
Load an image into PIL format
"""
self.img = Image.open(filename)
self.img = self.img.convert('RGB')
return self
def save(self, filename='/tmp/out.jpg'):
"""
Saves this image under the given filename. The format to use is determined from the filename extension.
"""
self.img.save(filename)
return self
def describe(self):
"""
Print some useful information for debugging
"""
print("Size: {}".format(self.img.size))
return self
def to_array(self, normalization=False, mean_normalized=False):
"""
Return a NumpyArray with (height, width, channel) format.
normalization: If False/None/Empty no normalization will be applied. Otherwise, the method should be passed.
normalization methods:
- zero_one: All the values will be normalized between [0, 1]
- minus_plus_one: All the values will be normalized between [-1, 1]
mean_normalized: Mean normalized perform a channel normalization. E.g: (123.68, 116.779, 103.939).
"""
# Validate the normalization method
if normalization and normalization not in self.NORMALIZATION_METHODS:
raise ValueError("Invalid Normalization method. Valid values: {}".format(self.NORMALIZATION_METHODS.keys()))
# Numpy array x has format (height, width, channel)
# but original PIL image has format (width, height, channel)
the_image_array = np.asarray(self.img, dtype='int16')
if normalization:
the_image_array = the_image_array.astype('float16')
if mean_normalized:
if len(mean_normalized) != 3:
raise ValueError("mean_normalized should have shape 3 for (r,g,b)")
the_image_array[:, :, 0] -= mean_normalized[0]
the_image_array[:, :, 1] -= mean_normalized[1]
the_image_array[:, :, 2] -= mean_normalized[2]
if normalization == self.NORMALIZATION_ZERO_ONE:
# All values are between 0 and 1
the_image_array /= 255.
if normalization == self.NORMALIZATION_MINUS_PLUS_ONE:
# All values are between -1 and 1
the_image_array /= 255.
the_image_array -= 0.5
the_image_array *= 2.
return the_image_array
def resize(self, width=224, height=224, method='bilinear'):
"""
Resize this image to the given size using the defined method.
"""
self.img = self.img.resize(size=(width, height), resample=self.RESIZE_METHODS.get(method, Image.BILINEAR))
return self
def central_crop(self, central_fraction=0.50):
"""
Crop the central region of the image.
Remove the outer parts of an image but retain the central region of the image along each dimension.
If we specify central_fraction = 0.5, this function returns the region marked with "X" in the below diagram.
--------
| |
| XXXX |
| XXXX |
| | where "X" is the central 50% of the image.
--------
"""
w, h = self.img.size
nw, nh = w * central_fraction, h * central_fraction
left = np.ceil((w - nw) / 2.)
top = np.ceil((h - nh) / 2.)
right = np.floor((w + nw) / 2)
bottom = np.floor((h + nh) / 2)
self.img = self.img.crop((left, top, right, bottom))
return self
def centered_crop(self, width, height):
"""
Crop the image to the new size keeping the content in the center.
Remove the outer parts of an image but retain the central region of the image along each dimension.
--------
| |
| XXXX |
| XXXX | where "X" has (width, height) size
| |
--------
"""
w, h = self.img.size
nw, nh = width, height
if width > w:
width = w
if height > h:
height = h
left = np.ceil((w - nw) / 2.)
top = np.ceil((h - nh) / 2.)
right = np.floor((w + nw) / 2)
bottom = np.floor((h + nh) / 2)
self.img = self.img.crop((left, top, right, bottom))
return self
def pad_to_square(self):
"""
Creates a padding in the shorter side with 0 (black) until the image is squared.
The image size will be (longer_side_size, longer_side_size, 3)
"""
longer_side = max(self.img.size)
horizontal_padding = (longer_side - self.img.size[0]) / 2
vertical_padding = (longer_side - self.img.size[1]) / 2
self.img = self.img.crop(
(
-horizontal_padding,
-vertical_padding,
self.img.size[0] + horizontal_padding,
self.img.size[1] + vertical_padding
)
)
return self
|
Inspiration can come in many ways, and mine for this tag was a peacock feather, featuring the Life of the Party Unmounted Rubber Stamp from Red Rubber Designs!
I used Distress Spray Stains to color the background of this manila tag, and to color the Life of the Party Stamp embossed with glitter onto white cardstock. After adding sequins I popped it up for dimension.
Then I added a border along the bottom with the Fanciful Vintage Unmounted Stamp Set, also embossed with glitter onto a strip of teal cardstock.
To really make the colors pop, I finished all of the edges with black embossed glitter.
Thanks for your visit, and be sure to visit Red Rubber Designs for these stamp sets and more! |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from bilean.common import exception
from bilean.common import utils
from bilean.db import api as db_api
from bilean.rules import base as rule_base
class Policy(object):
"""Policy object contains all policy operations"""
def __init__(self, name, **kwargs):
self.name = name
self.id = kwargs.get('id', None)
self.is_default = kwargs.get('is_default', False)
# rules schema like [{'id': 'xxx', 'type': 'os.nova.server'}]
self.rules = kwargs.get('rules', [])
self.metadata = kwargs.get('metadata', None)
self.created_at = kwargs.get('created_at', None)
self.updated_at = kwargs.get('updated_at', None)
self.deleted_at = kwargs.get('deleted_at', None)
def store(self, context):
"""Store the policy record into database table."""
values = {
'name': self.name,
'rules': self.rules,
'is_default': self.is_default,
'meta_data': self.metadata,
'created_at': self.created_at,
'updated_at': self.updated_at,
'deleted_at': self.deleted_at,
}
if self.id:
db_api.policy_update(context, self.id, values)
else:
policy = db_api.policy_create(context, values)
self.id = policy.id
return self.id
@classmethod
def _from_db_record(cls, record):
'''Construct a policy object from database record.
:param record: a DB policy object that contains all fields;
'''
kwargs = {
'id': record.id,
'rules': record.rules,
'is_default': record.is_default,
'metadata': record.meta_data,
'created_at': record.created_at,
'updated_at': record.updated_at,
'deleted_at': record.deleted_at,
}
return cls(record.name, **kwargs)
@classmethod
def load(cls, context, policy_id=None, policy=None, show_deleted=False):
'''Retrieve a policy from database.'''
if policy is None:
policy = db_api.policy_get(context, policy_id,
show_deleted=show_deleted)
if policy is None:
raise exception.PolicyNotFound(policy=policy_id)
return cls._from_db_record(policy)
@classmethod
def load_all(cls, context, limit=None, marker=None,
sort_keys=None, sort_dir=None,
filters=None, show_deleted=False):
'''Retrieve all policies of from database.'''
records = db_api.policy_get_all(context,
limit=limit, marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
filters=filters,
show_deleted=show_deleted)
return [cls._from_db_record(record) for record in records]
def find_rule(self, context, rtype):
'''Find the exact rule from self.rules by rtype'''
for rule in self.rules:
if rtype == rule['type']:
return rule_base.Rule.load(context, rule_id=rule['id'])
return None
def to_dict(self):
policy_dict = {
'id': self.id,
'name': self.name,
'rules': self.rules,
'is_default': self.is_default,
'metadata': self.metadata,
'created_at': utils.format_time(self.created_at),
'updated_at': utils.format_time(self.updated_at),
'deleted_at': utils.format_time(self.deleted_at),
}
return policy_dict
def do_delete(self, context):
db_api.policy_delete(context, self.id)
return True
|
This Leather Card Holder is made of super smooth and silky bright blue leather. This is the perfect business card holder for client meetings or interviews and is the best wallet for men or women with lots of cards. Hold multiple credit cards or business cards at once in this handmade leather business card holder. Our credit card holders are unlined so you can enjoy the richness of the leather both inside and out. With a 2.5" top opening you can easily fit all of your major credit cards and even some cash inside. |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Learned Acccept/Reject Sampling (Bauer & Mnih, 2018)."""
from __future__ import absolute_import
import functools
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from eim.models import base
tfd = tfp.distributions
class LARS(object):
"""Learned Accept/Reject Sampling model."""
def __init__(self,
K,
T,
data_dim,
accept_fn_layers,
proposal=None,
data_mean=None,
ema_decay=0.99,
dtype=tf.float32,
is_eval=False):
self.k = K
self.T = T # pylint: disable=invalid-name
self.data_dim = data_dim
self.ema_decay = ema_decay
self.dtype = dtype
if data_mean is not None:
self.data_mean = data_mean
else:
self.data_mean = tf.zeros((), dtype=dtype)
self.accept_fn = functools.partial(
base.mlp,
layer_sizes=accept_fn_layers + [1],
final_activation=tf.math.log_sigmoid,
name="a")
if proposal is None:
self.proposal = base.get_independent_normal(data_dim)
else:
self.proposal = proposal
self.is_eval = is_eval
if is_eval:
self.Z_estimate = tf.placeholder(tf.float32, shape=[]) # pylint: disable=invalid-name
with tf.variable_scope("LARS_Z_ema", reuse=tf.AUTO_REUSE):
self.Z_ema = tf.get_variable( # pylint: disable=invalid-name
name="LARS_Z_ema",
shape=[],
dtype=dtype,
initializer=tf.constant_initializer(0.5),
trainable=False)
def log_prob(self, data, log_q_data=None, num_samples=1):
"""Compute log likelihood estimate."""
# Compute log a(z), log pi(z), and log q(z)
log_a_z_r = tf.squeeze(self.accept_fn(data - self.data_mean),
axis=-1) # [batch_size]
# [batch_size]
try:
# Try giving the proposal lower bound num_samples if it can use it.
log_pi_z_r = self.proposal.log_prob(data, num_samples=num_samples)
except TypeError:
log_pi_z_r = self.proposal.log_prob(data)
tf.summary.histogram("log_energy_data", log_a_z_r)
if not self.is_eval:
# Sample zs from proposal to estimate Z
z_s = self.proposal.sample(self.k) # [K, data_dim]
# Compute log a(z) for zs sampled from proposal
log_a_z_s = tf.squeeze(self.accept_fn(z_s - self.data_mean),
axis=-1) # [K]
tf.summary.histogram("log_energy_proposal", log_a_z_s)
# pylint: disable=invalid-name
log_ZS = tf.reduce_logsumexp(log_a_z_s) # []
log_Z_curr_avg = log_ZS - tf.log(tf.to_float(self.k))
if log_q_data is not None:
# This may only be valid when log pi is exact (i.e., not a lower bound).
Z_curr_avg = (1. / (self.k + 1.)) * (
tf.exp(log_ZS) +
tf.exp(log_a_z_r + tf.stop_gradient(log_pi_z_r - log_q_data)))
else:
Z_curr_avg = tf.exp(log_Z_curr_avg)
self.Z_smooth = (
self.ema_decay * self.Z_ema + (1 - self.ema_decay) * Z_curr_avg)
# In forward pass, log Z is log of the smoothed ema version of Z
# In backward pass it is the current estimate of log Z, log_Z_curr_avg
Z = Z_curr_avg + tf.stop_gradient(self.Z_smooth - Z_curr_avg)
tf.summary.scalar("Z", tf.reduce_mean(Z))
else:
Z = self.Z_estimate # pylint: disable=invalid-name
# pylint: enable=invalid-name
alpha = tf.pow(1. - Z, self.T - 1)
log_prob = log_pi_z_r + tf.log(tf.exp(log_a_z_r) * (1. - alpha) / Z + alpha)
return log_prob
def post_train_op(self):
# Set up EMA of Z (EMA is updated after gradient step).
return tf.assign(self.Z_ema, tf.reduce_mean(self.Z_smooth))
def compute_Z(self, num_samples): # pylint: disable=invalid-name
r"""Returns log(\sum_i a(z_i) / num_samples)."""
z_s = self.proposal.sample(num_samples) # [num_samples, data_dim]
# Compute log a(z) for zs sampled from proposal
log_a_z_s = tf.squeeze(self.accept_fn(z_s - self.data_mean),
axis=-1) # [num_samples]
log_Z = tf.reduce_logsumexp(log_a_z_s) - tf.log( # pylint: disable=invalid-name
tf.to_float(num_samples)) # []
return log_Z
def sample(self, num_samples=1):
"""Sample from the model."""
def while_body(t, z, accept):
"""Truncated rejection sampling."""
new_z = self.proposal.sample(num_samples)
accept_prob = tf.squeeze(tf.exp(self.accept_fn(new_z - self.data_mean)),
axis=-1)
new_accept = tf.math.less_equal(
tf.random_uniform(shape=[num_samples], minval=0., maxval=1.),
accept_prob)
force_accept = tf.math.greater_equal(
tf.to_float(t),
tf.to_float(self.T) - 1.)
new_accept = tf.math.logical_or(new_accept, force_accept)
accepted = tf.logical_or(accept, new_accept)
swap = tf.math.logical_and(tf.math.logical_not(accept), new_accept)
z = tf.where(swap, new_z, z)
return t + 1, z, accepted
def while_cond(unused_t, unused_z, accept):
return tf.reduce_any(tf.logical_not(accept))
shape = [num_samples] + self.data_dim
z0 = tf.zeros(shape, dtype=self.dtype)
accept0 = tf.constant(False, shape=[num_samples])
_, zs, _ = tf.while_loop(while_cond, while_body, loop_vars=(0, z0, accept0))
return zs
|
Good the guidelines, building a house like Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK photograph shows can be interesting. Allow factors you enjoy coming from Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK pic information your private upgrading job. This combining of the facet that will Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK photo from Used Furniture Tulsa Ok image collection illustrates are going to be a wonderful case if you would like provide an exceptionally pleasing ambiance to your dwelling. If you would like to get a cozy together with excellent home, you can mix and additionally coordinate this ideas from Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK photograph with your recommendations. Picking out the right factors with Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK photo that is applied to your residence is usually one of the many key reasons. You should think about how large the home plus your form choices prior to when deciding upon your ideas from Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK picture. Property stirred just by Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK image are able to get yourself and your family sense safe if you find yourself in there.
You still have any information about Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK pic, and we will supply all of information and facts for you. That Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK photograph shows an extraordinary model which can be bought being a a blueprint, and it is unsurprising any time seeing that October 7, 2017 at 4:35 pm, Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK photograph provides witnessed just by 1 site visitors. After that various info is concerning your resolution, Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK snapshot can be described as hd snapshot by means of resolutions: 3296 x 2472 together with size: 1151 KB. If you would like to that Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK image onto your website, you can actually submit this approach stunning snapshot and include the particular hyperlink. If you would like to acquire Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK pic, you need to press here. Remember to always use this Nice Used Furniture Tulsa Ok 4 Piece Triangle Pub Table SOLD Consignment Furniture Tulsa, OK graphic for your own purpose, never with regard to store-bought usage. |
import asyncio
import logging
from aiohttp import web
from sensomatic.sources import defaults
from sensomatic.ui.data_stream import DataStreamHandler
from sensomatic.ui.static_utils import static_handler
class Server:
def __init__(self):
self.app = self.create_app()
self.handler = self.app.make_handler()
self.srv = self.create_http_server()
def create_app(self):
app = web.Application()
app.router.add_route('GET', '/updates', DataStreamHandler)
app.router.add_route('GET', '/ui/{path:.*}', static_handler)
return app
def create_http_server(self):
loop = asyncio.get_event_loop()
srv = loop.run_until_complete(
loop.create_server(self.handler, '0.0.0.0', 8081)
)
logging.info('will be serving on {}'.format(srv.sockets[0].getsockname()))
return srv
def finalize(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.handler.finish_connections(1.0))
self.srv.close()
loop.run_until_complete(self.srv.wait_closed())
loop.run_until_complete(self.app.finish())
|
This regiment, Col. Dudley Donnelly, was accepted by the State May 18, 1861;. organized at Albany, and there mustered in the service of the United States for two years May 22, 1861. Its three years' men were, in May, 1863, transferred to the 60th Infantry.
The companies were recruited principally: A, B, C and K at Lockport; D at Medina; E at Canandaigua; F at Batavia; G at Albion; H at Monticello, and I at Niagara Falls.
The regiment left the State June 24, 1861; served at Washington, D. C., from June 27, 1861; in Butterfield's Brigade, Keim's Division, Patterson's Army, District of Pennsylvania, from July 8, 1861; in Stiles' Brigade, Banks' Division, Army of the Potomac, from October 15, 1861; in 1st Brigade, 1st, Williams', Division, 5th Corps, Army of the Potomac, from March 13, 1862; in same brigade and division, Department of Schenandoah, from May, 1862; in 1st Brigade, 1st Division, 2d Corps, Army of Virginia, from June 26, 1862; in the 1st Brigade, 1st Division, I2th Corps, Army of the Potomac, from September 12, 1862; and was honorably discharged and mustered out under Col. Edwin F. Brown, June 2, 1863, at Albany.
During its service, the regiment lost by death, killed in action, 1 officer, 50 enlisted men; of wounds received in action, I officer, 10 enlisted men; of disease and other causes, 47 enlisted men; total, 2 officers, 167 enlisted men; aggregate, 109; of whom 2 enlisted . men died in the hands of the enemy.
Twenty-eighth Infantry.—Cols., Dudley Donnelly, Edwin F. Brown; Lieut.-Cols., Edwin F. Brown, Elliott W. Cook; Majs, James R. Mitchell, Elliott W. Cook, Theophilus Fitzgerald. The 28th, the "Niagara Rifles," was composed of five - companies from Niagara county, two from Orleans county, one from Ontario, one from Genesee and one from Sullivan, and was mustered into the U. S. service for two years on May 22, 1861, at Albany. A month was spent in camp at Camp Morgan and on June 25, the regiment left the state for "Washington. It was assigned on July 7 to But-terfield's brigade, Keim's division of Gen. Patterson's force, which it joined at Martinsburg, W. Va. Camp was occupied at Berlin until Aug. 20, when the force moved to Darnestown and remained there Until Oct. .20, when it was ordered to Ball's bluff but did not arrive in time to take part in the battle. From Dec. 5, 1861, to Jin. 6, 1862, the regiment encamped at Fredericksburg; was then at Hancock until March I, and then moved to Winchester with the 1st brigade, 1st division, 5th corps, Army of the Potomac. Co. E participated in the engagement near Columbia Furnace, Co. I in an encounter near Montevideo, and the entire regiment was trans-ferred to the Department of the Shenandoah in May. It marched to Front Royal, Middletown, Newton, Winchester and Bunker Hill in May; to Williamsport and Front Royal in June, and to Culpeper Court House and Cedar mountain in July. In the battle of Cedar mountain the loss of the 28th was 213 killed, wounded and missing but of 339 engaged, and of these 41 men were mortally wounded On Aug. .21, the regiment was again in action at Rappahannock Station. On June 26 it was assigned to the 1st brigade, 1st division 2nd corps, Army of Virginia, and on Sept. 12, to the same brigade and division of the 12th corps, Army of the Potomac. During the battle of Bull Run (second) the command was posted at Manassas Junction and was then withdrawn to Centerville and Alexandria leaving there Sept. 3 for Maryland. At Antietam the command was closely engaged and the commander of the corps, Gen. Mans-field, was mortally wounded. Gen. Williams succeeded him in com-mand and the corps went into camp at Harper's Ferry. On Dec. 10, the regiment marched toward Dumfries, from there to Fairfax Station, then to Stafford Court House, where it established win-ter quarters, The last battle of the 28th was at Chancellorsville, in which the regiment lost 78 members killed, wounded or missing. Soon after it returned to New York and was mustered out at Albany June 2, 1863. The total loss of the regiment during its term of service was 68 members killed or died of wounds and 49 died from other causes.
Boyce, Charles William. A brief history of the Twenty-eighth regiment, New York state volunteers, First brigade, First division, Twelfth corps, Army of the Potomac, from the author's diary and official reports, with the muster-roll of the Regiment, and many pictures, articles and letters from surviving members and friends, with the report of the proceedings of the thirty-fifth annual reunion held at Albion, New York, May 22, 1896 [by] C. W. Boyce. Buffalo [Matthews- Northrup co., 1896].
Boyce, Charles William. "The story of our flag, how lost, found, and restored." Maine bugle, campaign V (Rockland 1898) 182-9.
Boyce, Charles William. "The story of our flag, how lost, found, and restored. The first reunion of Blue and Gray." BandG I (1893) 377-82.
Boyce, Charles William. "A story of the Shenandoah valley in 1862. The first Provost-Marshal [Erwin A, Bowen] of Harrisonburg, Va." BandG III (1894) 243-8.
Brown, Benjamin Balmer. "Civil war letters." North Dakota historical quarterly I:3 (1926/27) 60-71; no 4 61-8.
King, Horatio Collins. Dedication of the monument to the 28th New York volunteers, Culpeper, Va., August 8, 1902.
Report of proceedings of the thirty-fourth annual reunion of the 28th reg't New York volunteers, held at Niagara Falls, New York, Wednesday, May 22d, 1895. [Buffalo, Peter Paul book co., printers, 1895].
Report of proceedings of the thirty-third annual reunion 28th regiment New York volunteers, held at Lockport, New York, Tuesday, May 22d, 1894. [Buffalo, Press of Peter Paul book co., 1894].
Thirty-sixth annual reunion Twenty-eighth regiment New York state volunteers, held at Eldorado, on Grand island, near Buffalo, N. Y., August 26th, 1897. [Buffalo, Paul's press, 1897].
Rowley, William W. "The Signal corps of the army during the rebellion, by Captain W. W. Rowley, 1893." MOLLUS-Wis II 220-9.
Squires, Charles H. The 28th regiment, N.Y. vols., 1st brig.-lst div., 12th army corps, at the battle of Cedar mountain, Va., Aug. 9th, 1862. Lockport, J. H. Murphy, printer [n.d.] broadside, 25 1/2 x 29cm.
Waller, John. "With Banks from Strasburg to the Potomac." BandG III (1894) 194-6. |
import sys
import numpy
from overrides import overrides
from keras.layers import Embedding, Dropout, LSTM, Bidirectional
from onto_attention import OntoAttentionLSTM
from embedding import OntoAwareEmbedding
class Encoder(object):
'''
Encoder is an abstract class that defines a get_encoded_phrase method.
'''
def __init__(self, data_processor=None, embed_dim=50, bidirectional=False, tune_embedding=False,
return_sequences=True, **kwargs):
self.embed_dim = embed_dim
self.data_processor = data_processor
self.bidirectional = bidirectional
self.tune_embedding = tune_embedding
self.return_sequences = return_sequences
self.embedding_layer = None
self.encoder_layer = None
def get_encoded_phrase(self, phrase_input_layer, dropout={}, embedding=None):
'''
Takes a Keras input layer, dropout and returns the output of the encoder as a Keras object.
Arguments:
phrase_input_layer (Input): Keras Input layer of the appropriate shape.
dropout (dict [str -> float]): Dict containing dropout values applied after
`embedding` and `encoder`.
embedding_file (str): Optional gzipped embedding file to use as initialization
for embedding layer.
'''
embedding_layer = self._get_embedding_layer(embedding)
embedded_phrase = embedding_layer(phrase_input_layer)
embedding_dropout = dropout.pop("embedding", 0.0)
if embedding_dropout > 0:
embedded_phrase = Dropout(embedding_dropout)(embedded_phrase)
encoder = self._get_encoder_layer()
encoded_phrase = encoder(embedded_phrase)
encoder_dropout = dropout.pop("encoder", 0.0)
if encoder_dropout > 0:
encoded_phrase = Dropout(encoder_dropout)(encoded_phrase)
return encoded_phrase
def _get_embedding_layer(self, embedding_file=None):
'''
Checks if an embedding layer is defined. If so, returns it. Or else, makes one.
'''
raise NotImplementedError
def _get_encoder_layer(self):
'''
Checks if an encoder layer is defined. If so, returns it. Or else, makes one.
'''
raise NotImplementedError
@staticmethod
def get_custom_objects():
return {}
class LSTMEncoder(Encoder):
@overrides
def _get_embedding_layer(self, embedding_file=None):
if self.embedding_layer is None:
if embedding_file is None:
if not self.tune_embedding:
print >>sys.stderr, "Pretrained embedding is not given. Setting tune_embedding to True."
self.tune_embedding = True
embedding = None
else:
# Put the embedding in a list for Keras to treat it as initiali weights of the embedding
# layer.
embedding = [self.data_processor.get_embedding_matrix(embedding_file, onto_aware=False)]
vocab_size = self.data_processor.get_vocab_size(onto_aware=False)
self.embedding_layer = Embedding(input_dim=vocab_size, output_dim=self.embed_dim,
weights=embedding, trainable=self.tune_embedding,
mask_zero=True, name="embedding")
return self.embedding_layer
@overrides
def _get_encoder_layer(self):
if self.encoder_layer is None:
self.encoder_layer = LSTM(input_dim=self.embed_dim, output_dim=self.embed_dim,
return_sequences=self.return_sequences, name="encoder")
if self.bidirectional:
self.encoder_layer = Bidirectional(self.encoder_layer, name="encoder")
return self.encoder_layer
class OntoLSTMEncoder(Encoder):
def __init__(self, num_senses, num_hyps, use_attention, set_sense_priors, **kwargs):
self.num_senses = num_senses
self.num_hyps = num_hyps
self.use_attention = use_attention
self.set_sense_priors = set_sense_priors
super(OntoLSTMEncoder, self).__init__(**kwargs)
@overrides
def _get_embedding_layer(self, embedding_file=None):
if self.embedding_layer is None:
word_vocab_size = self.data_processor.get_vocab_size(onto_aware=False)
synset_vocab_size = self.data_processor.get_vocab_size(onto_aware=True)
if embedding_file is None:
if not self.tune_embedding:
print >>sys.stderr, "Pretrained embedding is not given. Setting tune_embedding to True."
self.tune_embedding = True
embedding_weights = None
else:
# TODO: Other sources for prior initialization
embedding = self.data_processor.get_embedding_matrix(embedding_file, onto_aware=True)
# Put the embedding in a list for Keras to treat it as weights of the embedding layer.
embedding_weights = [embedding]
if self.set_sense_priors:
initial_sense_prior_parameters = numpy.random.uniform(low=0.01, high=0.99,
size=(word_vocab_size, 1))
# While setting weights, Keras wants trainable weights first, and then the non trainable
# weights. If we are not tuning the embedding, we need to keep the sense priors first.
if not self.tune_embedding:
embedding_weights = [initial_sense_prior_parameters] + embedding_weights
else:
embedding_weights.append(initial_sense_prior_parameters)
self.embedding_layer = OntoAwareEmbedding(word_vocab_size, synset_vocab_size, self.embed_dim,
weights=embedding_weights, mask_zero=True,
set_sense_priors=self.set_sense_priors,
tune_embedding=self.tune_embedding,
name="embedding")
return self.embedding_layer
@overrides
def _get_encoder_layer(self):
if self.encoder_layer is None:
self.encoder_layer = OntoAttentionLSTM(input_dim=self.embed_dim, output_dim=self.embed_dim,
num_senses=self.num_senses, num_hyps=self.num_hyps,
use_attention=self.use_attention, consume_less="gpu",
return_sequences=self.return_sequences, name="onto_lstm")
if self.bidirectional:
self.encoder_layer = Bidirectional(self.encoder_layer, name="onto_lstm")
return self.encoder_layer
@staticmethod
def get_custom_objects():
return {"OntoAttentionLSTM": OntoAttentionLSTM,
"OntoAwareEmbedding": OntoAwareEmbedding}
|
These memberships are designed to allow a sole operator through to a multi-national company, to have the opportunity of having a voice and to be heard in the medical technology market; regardless of their size or turn-over.
Many smaller companies which have niche products and services are not collaborated with when government starts to implement policy and pricing reform. These memberships offer the opportunity for all to have a say in a collective forum at affordable rates. |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Shortcut management"""
# Standard library imports
from __future__ import print_function
import os
import os.path as osp
import sys
# Third party imports
from qtpy.QtWidgets import (QButtonGroup, QGroupBox, QInputDialog, QLabel,
QLineEdit, QMessageBox, QPushButton, QVBoxLayout)
# Local imports
from spyder.config.base import _
from spyder.plugins.configdialog import GeneralConfigPage
from spyder.py3compat import PY2, is_text_string, to_text_string
from spyder.utils import icon_manager as ima
from spyder.utils.misc import get_python_executable
from spyder.utils import programs
class MainInterpreterConfigPage(GeneralConfigPage):
CONF_SECTION = "main_interpreter"
NAME = _("Python interpreter")
ICON = ima.icon('python')
def __init__(self, parent, main):
GeneralConfigPage.__init__(self, parent, main)
self.cus_exec_radio = None
self.pyexec_edit = None
# Python executable selection (initializing default values as well)
executable = self.get_option('executable', get_python_executable())
if self.get_option('default'):
executable = get_python_executable()
if not osp.isfile(executable):
# This is absolutely necessary, in case the Python interpreter
# executable has been moved since last Spyder execution (following
# a Python distribution upgrade for example)
self.set_option('executable', get_python_executable())
elif executable.endswith('pythonw.exe'):
# That should not be necessary because this case is already taken
# care of by the `get_python_executable` function but, this was
# implemented too late, so we have to fix it here too, in case
# the Python executable has already been set with pythonw.exe:
self.set_option('executable',
executable.replace("pythonw.exe", "python.exe"))
def initialize(self):
GeneralConfigPage.initialize(self)
self.pyexec_edit.textChanged.connect(self.python_executable_changed)
self.cus_exec_radio.toggled.connect(self.python_executable_switched)
def setup_page(self):
newcb = self.create_checkbox
# Python executable Group
pyexec_group = QGroupBox(_("Python interpreter"))
pyexec_bg = QButtonGroup(pyexec_group)
pyexec_label = QLabel(_("Select the Python interpreter for all Spyder "
"consoles"))
def_exec_radio = self.create_radiobutton(
_("Default (i.e. the same as Spyder's)"),
'default', button_group=pyexec_bg)
self.cus_exec_radio = self.create_radiobutton(
_("Use the following Python interpreter:"),
'custom', button_group=pyexec_bg)
if os.name == 'nt':
filters = _("Executables")+" (*.exe)"
else:
filters = None
pyexec_file = self.create_browsefile('', 'executable', filters=filters)
for le in self.lineedits:
if self.lineedits[le][0] == 'executable':
self.pyexec_edit = le
def_exec_radio.toggled.connect(pyexec_file.setDisabled)
self.cus_exec_radio.toggled.connect(pyexec_file.setEnabled)
pyexec_layout = QVBoxLayout()
pyexec_layout.addWidget(pyexec_label)
pyexec_layout.addWidget(def_exec_radio)
pyexec_layout.addWidget(self.cus_exec_radio)
pyexec_layout.addWidget(pyexec_file)
pyexec_group.setLayout(pyexec_layout)
# UMR Group
umr_group = QGroupBox(_("User Module Reloader (UMR)"))
umr_label = QLabel(_("UMR forces Python to reload modules which were "
"imported when executing a file in a Python or "
"IPython console with the <i>runfile</i> "
"function."))
umr_label.setWordWrap(True)
umr_enabled_box = newcb(_("Enable UMR"), 'umr/enabled',
msg_if_enabled=True, msg_warning=_(
"This option will enable the User Module Reloader (UMR) "
"in Python/IPython consoles. UMR forces Python to "
"reload deeply modules during import when running a "
"Python script using the Spyder's builtin function "
"<b>runfile</b>."
"<br><br><b>1.</b> UMR may require to restart the "
"console in which it will be called "
"(otherwise only newly imported modules will be "
"reloaded when executing files)."
"<br><br><b>2.</b> If errors occur when re-running a "
"PyQt-based program, please check that the Qt objects "
"are properly destroyed (e.g. you may have to use the "
"attribute <b>Qt.WA_DeleteOnClose</b> on your main "
"window, using the <b>setAttribute</b> method)"),
)
umr_verbose_box = newcb(_("Show reloaded modules list"),
'umr/verbose', msg_info=_(
"Please note that these changes will "
"be applied only to new consoles"))
umr_namelist_btn = QPushButton(
_("Set UMR excluded (not reloaded) modules"))
umr_namelist_btn.clicked.connect(self.set_umr_namelist)
umr_layout = QVBoxLayout()
umr_layout.addWidget(umr_label)
umr_layout.addWidget(umr_enabled_box)
umr_layout.addWidget(umr_verbose_box)
umr_layout.addWidget(umr_namelist_btn)
umr_group.setLayout(umr_layout)
vlayout = QVBoxLayout()
vlayout.addWidget(pyexec_group)
vlayout.addWidget(umr_group)
vlayout.addStretch(1)
self.setLayout(vlayout)
def python_executable_changed(self, pyexec):
"""Custom Python executable value has been changed"""
if not self.cus_exec_radio.isChecked():
return
if not is_text_string(pyexec):
pyexec = to_text_string(pyexec.toUtf8(), 'utf-8')
if programs.is_python_interpreter(pyexec):
self.warn_python_compatibility(pyexec)
else:
QMessageBox.warning(self, _('Warning'),
_("You selected an invalid Python interpreter for the "
"console so the previous interpreter will stay. Please "
"make sure to select a valid one."), QMessageBox.Ok)
self.pyexec_edit.setText(get_python_executable())
return
def python_executable_switched(self, custom):
"""Python executable default/custom radio button has been toggled"""
def_pyexec = get_python_executable()
cust_pyexec = self.pyexec_edit.text()
if not is_text_string(cust_pyexec):
cust_pyexec = to_text_string(cust_pyexec.toUtf8(), 'utf-8')
if def_pyexec != cust_pyexec:
if custom:
self.warn_python_compatibility(cust_pyexec)
def warn_python_compatibility(self, pyexec):
if not osp.isfile(pyexec):
return
spyder_version = sys.version_info[0]
try:
args = ["-c", "import sys; print(sys.version_info[0])"]
proc = programs.run_program(pyexec, args)
console_version = int(proc.communicate()[0])
except IOError:
console_version = spyder_version
if spyder_version != console_version:
QMessageBox.warning(self, _('Warning'),
_("You selected a <b>Python %d</b> interpreter for the console "
"but Spyder is running on <b>Python %d</b>!.<br><br>"
"Although this is possible, we recommend you to install and "
"run Spyder directly with your selected interpreter, to avoid "
"seeing false warnings and errors due to the incompatible "
"syntax between these two Python versions."
) % (console_version, spyder_version), QMessageBox.Ok)
def set_umr_namelist(self):
"""Set UMR excluded modules name list"""
arguments, valid = QInputDialog.getText(self, _('UMR'),
_("Set the list of excluded modules as "
"this: <i>numpy, scipy</i>"),
QLineEdit.Normal,
", ".join(self.get_option('umr/namelist')))
if valid:
arguments = to_text_string(arguments)
if arguments:
namelist = arguments.replace(' ', '').split(',')
fixed_namelist = []
non_ascii_namelist = []
for module_name in namelist:
if PY2:
if all(ord(c) < 128 for c in module_name):
if programs.is_module_installed(module_name):
fixed_namelist.append(module_name)
else:
QMessageBox.warning(self, _('Warning'),
_("You are working with Python 2, this means that "
"you can not import a module that contains non-"
"ascii characters."), QMessageBox.Ok)
non_ascii_namelist.append(module_name)
elif programs.is_module_installed(module_name):
fixed_namelist.append(module_name)
invalid = ", ".join(set(namelist)-set(fixed_namelist)-
set(non_ascii_namelist))
if invalid:
QMessageBox.warning(self, _('UMR'),
_("The following modules are not "
"installed on your machine:\n%s"
) % invalid, QMessageBox.Ok)
QMessageBox.information(self, _('UMR'),
_("Please note that these changes will "
"be applied only to new Python/IPython "
"consoles"), QMessageBox.Ok)
else:
fixed_namelist = []
self.set_option('umr/namelist', fixed_namelist)
def apply_settings(self, options):
self.main.apply_settings()
|
With one of the sleekest compact dome designs available, the Channel Vision 1.3 Megapixel Mini Dome IP Camera with POE offers discreet indoor or outdoor surveillance, viewable from any web-enabled device, anywhere in the world. Real-time video is accessible through an Internet browser or with the free Channel Vision smartphone app, available for iOS and Android phones. The camera also has an SD card slot for local recording (SD card not included).
Multi-streaming allows this camera to capture and display up to three IP video streams simultaneously. Essentially, that means you can use a single camera to capture three different views at once.
Featuring a 1.3-megapixel CMOS sensor, the camera captures video up to 720p (1280 x 800 pixels). Because the camera also has H.264 compression, high-resolution video is easily delivered at max frame rates (30fps) over the Internet, providing clear images near or far. Digital Wide Dynamic Range (WDR) offers greater scene detail in low lighting while noise reduction helps produce even clearer images.
The camera can be powered over Ethernet (PoE), or with a 12VDC power supply (not included). Its vandal-proof design helps prevent unauthorized tampering. |
from django.shortcuts import redirect, render
from django.core.urlresolvers import reverse
import os
import json
from functools import wraps
from django.http.response import JsonResponse
from django.conf import settings
from django.template import Context, Template
from rest_framework import status
from django.db.models.query_utils import Q
import subprocess
from scandir import scandir
import re
import datetime
from bioshareX.file_utils import istext
from os import path
class JSONDecorator(object):
def __init__(self, orig_func):
self.orig_func = orig_func
def __call__(self, *args, **kwargs):
import json
json_arg = args[0].POST.get('json',args[0].GET.get('json',None))
if json_arg is not None:
kwargs['json'] = json.loads(json_arg)
elif hasattr(args[0], 'data'):
kwargs['json'] = args[0].data
return self.orig_func(*args, **kwargs)
def share_access_decorator_old(perms,share_param='share'):
def wrap(f):
def wrapped_f(*args,**kwargs):
from bioshareX.models import Share
share = Share.objects.get(id=kwargs[share_param])
kwargs[share_param]=share
f(*args,**kwargs)
return wrapped_f
return wrap
def ajax_login_required(view_func):
@wraps(view_func)
def wrapper(request, *args, **kwargs):
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
return JsonResponse({'status':'error','unauthenticated':True,'errors':['You do not have access to this resource.']},status=status.HTTP_401_UNAUTHORIZED)
return wrapper
class share_access_decorator(object):
def __init__(self, perms,share_param='share'):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.perms = perms
self.share_param = share_param
def __call__(self, f):
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
def wrapped_f(*args,**kwargs):
from bioshareX.models import Share
try:
share = Share.get_by_slug_or_id(kwargs[self.share_param])
except Share.DoesNotExist:
return render(args[0],'errors/message.html', {'message':'No share with that ID exists.'},status=500)
kwargs[self.share_param]=share
request = args[0]
user_permissions = share.get_user_permissions(request.user)
for perm in self.perms:
if not share.secure and perm in ['view_share_files','download_share_files']:
continue
if not perm in user_permissions:
if request.is_ajax():
if not request.user.is_authenticated():
return JsonResponse({'status':'error','unauthenticated':True,'errors':['You do not have access to this resource.']},status=status.HTTP_401_UNAUTHORIZED)
return json_error({'status':'error','unauthenticated':True,'errors':['You do not have access to this resource.']})
else:
return json_error(['You do not have access to this resource.'])
else:
if not request.user.is_authenticated():
url = reverse('login') + '?next=%s' % request.get_full_path()
return redirect(url)
return redirect('forbidden')
return f(*args,**kwargs)
return wrapped_f
class safe_path_decorator(object):
def __init__(self, share_param='share',path_param='subpath'):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.share_param = share_param
self.path_param = path_param
def __call__(self, f):
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
def wrapped_f(*args,**kwargs):
from bioshareX.models import Share
share = kwargs.get(self.share_param,None)
if share:
if not isinstance(kwargs[self.share_param], Share):
try:
share = Share.get_by_slug_or_id(share)
except Share.DoesNotExist:
return render(args[0],'errors/message.html', {'message':'No share with that ID exists.'},status=500)
if not paths_contain(settings.DIRECTORY_WHITELIST,share.get_realpath()):
raise Exception('Share has an invalid root path: %s'%share.get_realpath())
path = kwargs.get(self.path_param,None)
if path is not None:
test_path(path)
if share:
full_path = os.path.join(share.get_path(),path)
if not paths_contain(settings.DIRECTORY_WHITELIST,full_path):
raise Exception('Illegal path encountered, %s, %s'%(share.get_path(),path))
return f(*args,**kwargs)
return wrapped_f
class safe_path_decorator_old(object):
def __init__(self, path_param='subpath'):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.path_param = path_param
def __call__(self, f):
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
def wrapped_f(*args,**kwargs):
path = kwargs[self.path_param]
if path is not None:
test_path(path)
return f(*args,**kwargs)
return wrapped_f
def get_setting(key, default=None):
return getattr(settings, key, default)
def test_path(path,allow_absolute=False,share=None):
illegals = ['..','*']
for illegal in illegals:
if illegal in path:
raise Exception('Illegal path encountered')
if path.startswith('/') and not allow_absolute:
raise Exception('Subpath may not start with slash')
if path.startswith('~') and not allow_absolute:
raise Exception('Subpath may not start with a "~"')
if share:
full_path = os.path.join(share.get_path(),path)
if not paths_contain(settings.DIRECTORY_WHITELIST,full_path):
raise Exception('Illegal path encountered, %s, %s'%(share.get_path(),path))
def path_contains(parent_path,child_path,real_path=True):
if real_path:
return os.path.join(os.path.realpath(child_path),'').startswith(os.path.join(os.path.realpath(parent_path),''))
else:
return os.path.join(child_path,'').startswith(os.path.join(parent_path,''))
def paths_contain(paths,child_path, get_path=False):
for path in paths:
if path_contains(path, child_path):
return path if get_path else True
return False
def json_response(dict):
from django.http.response import HttpResponse
import json
return HttpResponse(json.dumps(dict), content_type='application/json')
def json_error(messages,http_status=None):
http_status = http_status or status.HTTP_400_BAD_REQUEST
return JsonResponse({'status':'error','errors':messages},status=http_status)
# return json_response({'status':'error','errors':messages})
def dictfetchall(sql,args=[]):
from django.db import connection
cursor = connection.cursor()
cursor.execute(sql, args)
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def fetchall(sql,args=[]):
from django.db import connection
cursor = connection.cursor()
cursor.execute(sql, args)
return cursor.fetchall()
def find_python(pattern, path):
import fnmatch
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def find_in_shares(shares, pattern):
import subprocess
paths = [share.get_path() for share in shares]
output = subprocess.check_output(['find']+paths+['-name',pattern])
return output.split('\n')
def find(share, pattern, subdir=None,prepend_share_id=True):
import subprocess, os
path = share.get_path() if subdir is None else os.path.join(share.get_path(),subdir)
base_path = os.path.realpath(path)
output = subprocess.Popen(['find',base_path,'-name',pattern], stdout=subprocess.PIPE).communicate()[0]
# output = subprocess.check_output(['find',path,'-name',pattern])
paths = output.split('\n')
# return paths
results=[]
for path in paths:
result = path.split(base_path)
if len(result) == 2:
# print os.path.join(share.id,result[1])
if prepend_share_id:
results.append('/'+share.id+result[1])
else:
results.append(result[1][1:])
return results
def validate_email( email ):
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
try:
validate_email( email )
return True
except ValidationError:
return False
def email_users(users, subject_template=None, body_template=None, ctx_dict={},subject=None,body=None, from_email=settings.DEFAULT_FROM_EMAIL,content_subtype = "html"):
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
if subject:
t = Template(subject)
subject = t.render(Context(ctx_dict))
else:
subject = render_to_string(subject_template,ctx_dict)
subject = ''.join(subject.splitlines())
if body:
t = Template(body)
message = t.render(Context(ctx_dict))
else:
message = render_to_string(body_template, ctx_dict)
msg = EmailMessage(subject, message, from_email, [u.email for u in users])
msg.content_subtype = content_subtype # Main content is now text/html
msg.send(fail_silently=False)
#
# def get_file_info(path):
# from os.path import basename
# from os import stat
# import datetime
# (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = stat(path)
# return {'name':file.name,'size':size, 'modified':datetime.datetime.fromtimestamp(mtime).strftime("%b %d, %Y %H:%M")}
def sizeof_fmt(num):
num /= 1024.0 #function takes bytes, convert to KB
for x in ['KB','MB','GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def zipdir(base, path, zip):
from os.path import relpath
for root, dirs, files in os.walk(path):
for file in files:
file_path = os.path.join(root, file)
rel_path = relpath(path=file_path, start=base)
zip.write(file_path,arcname=rel_path)
def get_size(path):
total_size = 0
if os.path.isfile(path):
return os.path.getsize(path)
elif os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def get_share_stats(share):
path = os.path.abspath(share.get_path())
total_size = 0
if not share.parent: # don't count subshares
ZFS_PATH = share.get_zfs_path()
if ZFS_PATH:
ZFS_PATH = share.get_path()
total_size = subprocess.check_output(['zfs', 'get', '-H', '-o', 'value', '-p', 'used', ZFS_PATH])
else:
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return {'size':int(total_size)}
def get_total_size(paths=[]):
total_size = 0
for path in paths:
total_size += get_size(path)
return total_size
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
return subprocess.check_output(['du','-shL', path]).split()[0].decode('utf-8')
def list_share_dir(share,subdir=None,ajax=False):
from bioshareX.models import MetaData
PATH = share.get_path()
if subdir is not None:
PATH = os.path.join(PATH,subdir)
file_list=[]
directories={}
regex = r'^%s[^/]+/?' % '' if subdir is None else re.escape(os.path.normpath(subdir))+'/'
metadatas = {}
for md in MetaData.objects.prefetch_related('tags').filter(share=share,subpath__regex=regex):
metadatas[md.subpath]= md if not ajax else md.json()
for entry in scandir(PATH):
subpath= entry.name if subdir is None else os.path.join(subdir,entry.name)
metadata = metadatas[subpath] if metadatas.has_key(subpath) else {}
if entry.is_file():
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = entry.stat()
file={'name':entry.name,'extension':entry.name.split('.').pop() if '.' in entry.name else None,'size':sizeof_fmt(size),'bytes':size,'modified':datetime.datetime.fromtimestamp(mtime).strftime("%m/%d/%Y %H:%M"),'metadata':metadata,'isText':True}
file_list.append(file)
else:
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = entry.stat()
dir={'name':entry.name,'size':None,'metadata':metadata,'modified':datetime.datetime.fromtimestamp(mtime).strftime("%m/%d/%Y %H:%M")}
directories[os.path.realpath(entry.path)]=dir
return (file_list,directories)
def md5sum(path):
output = subprocess.check_output([settings.MD5SUM_COMMAND,path]) #Much more efficient than reading file contents into python and using hashlib
#IE: output = 4968966191e485885a0ed8854c591720 /tmp/Project/Undetermined_S0_L002_R2_001.fastq.gz
return re.findall(r'([0-9a-fA-F]{32})',output)[0]
|
India’s economy graph has taken a southward turn due to adventure of demonetization undertaken about a year ago and it was not required either economically or technically, said former Prime Minister of India, Manmohan Singh, who is considered as the architect of early ’90s reforms.
Eminent economist and Nobel laureate Amartya Sen mentioned lately the demonetization decision of Prime Minister Narendra Modi undermines trust of entire economy.
Originally aimed, as claimed by the PM, the demonetization was needed to weed out black money from the country, but the bold decision unfortunately backfired and largely cash-based economy of the country was disrupted. Added to it, people with undocumented money opened multiple bank accounts to convert their cash illegally. The objective was not served though the government is investigating on such illegal transactions. The process will take years before such individuals are booked.
The slowing down of economy that started just before demonetization exacerbated after the note ban. Announcement of new projects declined and in the words of Centre for Monitoring Indian Economy (CMIE) the capex cycle of India was heavily hit.
According to Anarock Property Consultant, a decline of 60 percent in the quarter ended September 30 was seen in project launches across top seven cities of the country compared to the last three months of last year when the 1,000 and 500 denominations of currency were banned overnight.
The chairman and managing director of property consultant Knight Frank India, Shishir Baijal, said such regulations have made the property market sluggish further and about another 18 months will be the observation period in the sector.
Critics point out the Modi government has failed to create new jobs too. In current scenario about 10 million jobs are required every year but in last fiscal year only 230,000 jobs were added.
Previous articleIndian Superstars Follow Rule of Thumb – Flatter Ruling Government? |
import asyncio
import os
import sys
import traceback
import uuid
from distutils.version import StrictVersion
from typing import Any, Dict
import pytest
from aiodocker.docker import Docker
from aiodocker.exceptions import DockerError
API_VERSIONS = {
"17.06": "v1.30",
"17.09": "v1.32",
"17.12": "v1.35",
"18.02": "v1.36",
"18.03": "v1.37",
"18.06": "v1.38",
"18.09": "v1.39",
}
if sys.platform == "win32":
if sys.version_info < (3, 7):
# Python 3.6 has no WindowsProactorEventLoopPolicy class
from asyncio import events
class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
_loop_factory = asyncio.ProactorEventLoop
else:
WindowsProactorEventLoopPolicy = asyncio.WindowsProactorEventLoopPolicy
@pytest.fixture
def event_loop(request):
"""Create an instance of the default event loop for each test case."""
if sys.platform == "win32":
asyncio.set_event_loop_policy(WindowsProactorEventLoopPolicy())
loop = asyncio.new_event_loop()
yield loop
loop.close()
def _random_name():
return "aiodocker-" + uuid.uuid4().hex[:7]
@pytest.fixture(scope="session")
def random_name():
yield _random_name
# If some test cases have used randomly-named temporary images,
# we need to clean up them!
if os.environ.get("CI", "") == "true":
# But inside the CI server, we don't need clean up!
return
event_loop = asyncio.get_event_loop()
async def _clean():
docker = Docker()
images = await docker.images.list()
for img in images:
if img["RepoTags"] is None:
continue
try:
if img["RepoTags"][0].startswith("aiodocker-"):
print("Deleting image id: {0}".format(img["Id"]))
await docker.images.delete(img["Id"], force=True)
except DockerError:
traceback.print_exc()
await docker.close()
event_loop.run_until_complete(_clean())
@pytest.fixture(scope="session")
def image_name() -> str:
if sys.platform == "win32":
return "python:latest"
else:
return "python:alpine"
@pytest.fixture(scope="session")
def testing_images(image_name: str) -> None:
# Prepare a small Linux image shared by most test cases.
event_loop = asyncio.get_event_loop()
async def _pull():
docker = Docker()
required_images = [image_name]
if image_name != "python:latest":
required_images.append("python:latest")
for img in required_images:
try:
await docker.images.inspect(img)
except DockerError as e:
assert e.status == 404
print('Pulling "{img}" for the testing session...'.format(img=img))
await docker.pull(img)
await docker.close()
event_loop.run_until_complete(_pull())
@pytest.fixture
def docker(event_loop, testing_images):
kwargs = {}
version = os.environ.get("DOCKER_VERSION")
if version:
for k, v in API_VERSIONS.items():
if version.startswith(k):
kwargs["api_version"] = v
break
else:
raise RuntimeError(f"Cannot find docker API version for {version}")
async def _make_docker():
return Docker(**kwargs)
docker = event_loop.run_until_complete(_make_docker())
yield docker
async def _finalize():
await docker.close()
event_loop.run_until_complete(_finalize())
@pytest.fixture
async def requires_api_version(docker):
# Update version info from auto to the real value
await docker.version()
def check(version, reason):
if StrictVersion(docker.api_version[1:]) < StrictVersion(version[1:]):
pytest.skip(reason)
yield check
@pytest.fixture
def swarm(event_loop, docker):
if sys.platform == "win32":
pytest.skip("swarm commands dont work on Windows")
assert event_loop.run_until_complete(docker.swarm.init())
yield docker
assert event_loop.run_until_complete(docker.swarm.leave(force=True))
@pytest.fixture
def make_container(event_loop, docker):
container = None
async def _spawn(config: Dict[str, Any], name=None):
nonlocal container
container = await docker.containers.create_or_replace(config=config, name=name)
await container.start()
return container
yield _spawn
async def _delete():
nonlocal container
if container is not None:
await container.delete(force=True)
event_loop.run_until_complete(_delete())
@pytest.fixture
async def shell_container(event_loop, docker, make_container, image_name):
config = {
"Cmd": ["python"],
"Image": image_name,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"Tty": True,
"OpenStdin": True,
}
return await make_container(config, name="aiodocker-testing-shell")
|
According to a report by The Wall Street Journal, government affiliated Shanghai Consumer Council is claiming that it has received eight reports from users saying their iPhone 6 handsets had spontaneously caught fire. This marks the third battery-related complaint against Apple from Chinese consumer groups over the past thirty days. Apple has however said that it analyzed the affected phones and found that the fires followed “external physical damage”.
The company encouraged customers with issues to visit an Apple store or contact company support. “We appreciate that customers are more concerned than ever about the performance and safety of batteries in their mobile devices,” Apple said in a statement.
Apple said a company investigation found that the problem was limited to iPhone 6s devices made in September and October 2015 whose battery components were exposed to “ambient air” longer than they should have been during assembly. Apple said it would replace the faulty batteries world-wide.
Apple has also recently said that it will soon update iOS with a “diagnostic capability” to gather more information about battery-related issues, adding that it could use the information to “improve the algorithms used to manage battery performance and shutdowns”. The company is also encouraging customers to contact customer service and use their battery replacement program website to see if their device is eligible for free replacement service.
The complaints from the consumer groups follow a separate Chinese government move to shut down Apple’s online book and movie services because they violated local media guidelines. |
import math
import numpy as np
import matplotlib.pyplot as plt
from RandomForest import RandomForest
from DecisionTree import Tree
from NN import NN
number_of_points = 100 #number of data points per class
number_of_classes = 3 #number of classes in dataset
#data generation: creates spiral dataset with 4 classes and 100 samples each
def generateData(number_of_points, number_of_classes):
data = np.empty([3, number_of_classes * number_of_points])
for i in range(0, number_of_classes):
data[0, i*number_of_points : (i+1) * number_of_points] = np.float(i) #np.matrix(np.ones(numberOfPoints));
radius = np.linspace(0.05, 0.9, number_of_points)
theta = np.linspace(i*2*math.pi/number_of_classes, \
i*2*math.pi/number_of_classes + 3*math.pi/2, number_of_points) +\
np.random.normal(0, .1, number_of_points)
x = radius * np.cos(theta)
y = radius * np.sin(theta)
datum = np.matrix(np.transpose(np.column_stack((x, y))))
data[1:, i*number_of_points:(i+1)*number_of_points] = datum
return data
def display(data, hists):
display_decision_boundary(hists)
display_training_data(data)
plt.show()
#displays training data for classification
def display_training_data(data):
colors = ['green', 'blue', 'red', 'yellow', 'orange']
for i in range(0, number_of_classes):
plt.scatter(data[1, i*number_of_points:(i+1)*number_of_points], data[2, i*number_of_points:(i+1)*number_of_points], c=colors[i], s=40)
def display_decision_boundary(hists):
plt.imshow(hists, interpolation='nearest', extent=[-1,1,-1,1])
#returns histograms in range -1,1 -1,1
def train_random_forest(data, size):
return RandomForest(data, size, 7, number_of_classes)
#creates a decision boundary represented as a 1000 x 1000 x 3 matrix
def create_decision_boundary(forest, size):
def scale_to_grid(i, size):
return -1 + 2 * (i / size)
hists = np.zeros([size, size, 3])
for i in range(0, size):
for j in range(0, size):
hists[i, j] = forest.test_point(np.transpose(np.matrix([scale_to_grid(i, size), scale_to_grid(j, size)])))
return hists
def train_nn(data):
# print(data.T, np.shape(data))
print(data.T[:, 1:3].shape)
# print(data.T[range(400), 0].shape)
nn = NN(data.T[: ,1:], data.T[:, 0])
nn.train()
nn.display()
if __name__ == '__main__':
data = generateData(number_of_points, number_of_classes)
# testing if master different now! w
# train_softmax(data)
# train_nn(data)
print('creating forest')
forest = train_random_forest(data, 200)
print('forest created')
print('creating decison boundary')
hists = create_decision_boundary(forest, 25)
print('decision boundary created')
print('displaying data and decision boundary')
display(data, hists)
|
Note: These are only the heads of families, my brick walls.
Elizabeth Hickson was born 1864 in Mold. I lived there many years later!
Walter Hickson, born 1886 in Boston, married ca 1934 Katherine Mitchell, born 1908 in Belfast. Their family moved to Co Durham and to Wales.
If I have mispelled any of these place names, please let me know. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.