repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Akrog/cinder | cinder/tests/scheduler/test_host_filters.py | 4 | 38357 | # Copyright 2011 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import mock
from oslo_serialization import jsonutils
from requests import exceptions as request_exceptions
from cinder.compute import nova
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common.scheduler import filters
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.tests import utils
class HostFiltersTestCase(test.TestCase):
"""Test case for host filters."""
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
# This has a side effect of testing 'get_filter_classes'
# when specifying a method (in this case, our standard filters)
filter_handler = filters.HostFilterHandler('cinder.scheduler.filters')
classes = filter_handler.get_all_classes()
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
class CapacityFilterTestCase(HostFiltersTestCase):
def setUp(self):
super(CapacityFilterTestCase, self).setUp()
self.json_query = jsonutils.dumps(
['and',
['>=', '$free_capacity_gb', 1024],
['>=', '$total_capacity_gb', 10 * 1024]])
@mock.patch('cinder.utils.service_is_up')
def test_filter_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 200,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_current_host_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100, 'vol_exists_on': 'host1'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 100,
'free_capacity_gb': 10,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 200,
'free_capacity_gb': 120,
'reserved_percentage': 20,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_passes_infinite(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'infinite',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_passes_unknown(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'unknown',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_thin_true_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 200,
'provisioned_capacity_gb': 500,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_thin_false_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> False',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
# If "thin_provisioning_support" is False,
# "max_over_subscription_ratio" will be ignored.
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 200,
'provisioned_capacity_gb': 300,
'max_over_subscription_ratio': 1.0,
'reserved_percentage': 5,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_over_subscription_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 200,
'provisioned_capacity_gb': 500,
'max_over_subscription_ratio': 1.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_over_subscription_fails2(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 30,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 30,
'provisioned_capacity_gb': 500,
'max_over_subscription_ratio': 1.0,
'reserved_percentage': 0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_true_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 100,
'provisioned_capacity_gb': 500,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_false_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> False',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
# If "thin_provisioning_support" is False,
# "max_over_subscription_ratio" will be ignored.
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 100,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 1.0,
'reserved_percentage': 5,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_thick_true_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 100,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_thick_true_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 125,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_thick_true_fails2(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 99,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 5,
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_filter_reserved_thin_thick_true_passes2(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
'free_capacity_gb': 100,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'reserved_percentage': 0,
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
class AffinityFilterTestCase(HostFiltersTestCase):
@mock.patch('cinder.utils.service_is_up')
def test_different_filter_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['DifferentBackendFilter']()
service = {'disabled': False}
host = fakes.FakeHostState('host1:pool0',
{'free_capacity_gb': '1000',
'updated_at': None,
'service': service})
volume = utils.create_volume(self.context, host='host1:pool1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_different_filter_legacy_volume_hint_passes(
self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['DifferentBackendFilter']()
service = {'disabled': False}
host = fakes.FakeHostState('host1:pool0',
{'free_capacity_gb': '1000',
'updated_at': None,
'service': service})
volume = utils.create_volume(self.context, host='host1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_different_filter_non_list_fails(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host2', {})
volume = utils.create_volume(self.context, host='host2')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': vol_id}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_different_filter_fails(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_different_filter_handles_deleted_instance(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
vol_id = volume.id
db.volume_destroy(utils.get_test_admin_context(), vol_id)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_different_filter_fail_nonuuid_hint(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': "NOT-a-valid-UUID", }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_different_filter_handles_multiple_uuids(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume1 = utils.create_volume(self.context, host='host1:pool1')
vol_id1 = volume1.id
volume2 = utils.create_volume(self.context, host='host1:pool3')
vol_id2 = volume2.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id1, vol_id2], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_different_filter_handles_invalid_uuids(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host2')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id, "NOT-a-valid-UUID"], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': vol_id}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_same_filter_passes(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1#pool0')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [vol_id], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_same_filter_legacy_vol_fails(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [vol_id], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_same_filter_fails(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1#pool1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [vol_id], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_same_filter_vol_list_pass(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume1 = utils.create_volume(self.context, host='host1')
vol_id1 = volume1.id
volume2 = utils.create_volume(self.context, host='host2')
vol_id2 = volume2.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [vol_id1, vol_id2], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_same_filter_handles_none(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_same_filter_handles_deleted_instance(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host2')
vol_id = volume.id
db.volume_destroy(utils.get_test_admin_context(), vol_id)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [vol_id], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_same_filter_fail_nonuuid_hint(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': "NOT-a-valid-UUID", }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
class DriverFilterTestCase(HostFiltersTestCase):
def test_passing_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': '1 == 1',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_failing_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': '1 == 2',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_no_filter_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': None,
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_not_implemented(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_no_volume_extra_specs(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': '1 == 1',
}
})
filter_properties = {'volume_type': {}}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_volume_backend_name_different(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': '1 == 1',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake2',
}
}
}
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_function_extra_spec_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': 'extra.var == 1',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
'var': 1,
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_function_stats_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'total_capacity_gb': 100,
'capabilities': {
'filter_function': 'stats.total_capacity_gb < 200',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_function_volume_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': 'volume.size < 5',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
},
'request_spec': {
'volume_properties': {
'size': 1
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_function_qos_spec_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': 'qos.var == 1',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
},
'qos_specs': {
'var': 1
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_function_exception_caught(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': '1 / 0 == 0',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_function_empty_qos(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'filter_function': 'qos.maxiops == 1',
}
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
},
'qos_specs': None
}
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_capabilities(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
'volume_backend_name': 'fake',
'capabilities': {
'foo': 10,
'filter_function': 'capabilities.foo == 10',
},
})
filter_properties = {
'volume_type': {
'extra_specs': {
'volume_backend_name': 'fake',
}
}
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
class InstanceLocalityFilterTestCase(HostFiltersTestCase):
def setUp(self):
super(InstanceLocalityFilterTestCase, self).setUp()
self.override_config('nova_endpoint_template',
'http://novahost:8774/v2/%(project_id)s')
self.context.service_catalog = \
[{'type': 'compute', 'name': 'nova', 'endpoints':
[{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]},
{'type': 'identity', 'name': 'keystone', 'endpoints':
[{'publicURL': 'http://keystonehost:5000/v2.0'}]}]
@mock.patch('cinder.compute.nova.novaclient')
def test_same_host(self, _mock_novaclient):
_mock_novaclient.return_value = fakes.FakeNovaClient()
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
uuid = nova.novaclient().servers.create('host1')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.compute.nova.novaclient')
def test_different_host(self, _mock_novaclient):
_mock_novaclient.return_value = fakes.FakeNovaClient()
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
uuid = nova.novaclient().servers.create('host2')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_handles_none(self):
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context,
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_invalid_uuid(self):
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context,
'scheduler_hints':
{'local_to_instance': 'e29b11d4-not-valid-a716'}}
self.assertRaises(exception.InvalidUUID,
filt_cls.host_passes, host, filter_properties)
@mock.patch('cinder.compute.nova.novaclient')
def test_nova_no_extended_server_attributes(self, _mock_novaclient):
_mock_novaclient.return_value = fakes.FakeNovaClient(
ext_srv_attr=False)
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
uuid = nova.novaclient().servers.create('host1')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
self.assertRaises(exception.CinderException,
filt_cls.host_passes, host, filter_properties)
@mock.patch('cinder.compute.nova.novaclient')
def test_nova_down_does_not_alter_other_filters(self, _mock_novaclient):
# Simulate Nova API is not available
_mock_novaclient.side_effect = Exception
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context, 'size': 100}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('requests.request')
def test_nova_timeout(self, _mock_request):
# Simulate a HTTP timeout
_mock_request.side_effect = request_exceptions.Timeout
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = \
{'context': self.context, 'scheduler_hints':
{'local_to_instance': 'e29b11d4-15ef-34a9-a716-598a6f0b5467'}}
self.assertRaises(exception.APITimeout,
filt_cls.host_passes, host, filter_properties)
| apache-2.0 | -3,520,615,006,342,418,400 | 41.058114 | 78 | 0.507574 | false | 4.371168 | true | false | false |
rexzhang/rpress | rpress/views/auth.py | 1 | 1484 | #!/usr/bin/env python
# coding=utf-8
import flask
from flask import request, redirect, url_for, flash, abort
from flask_login import login_required
from rpress.runtimes.template import render_template
from rpress.runtimes.auth import user_login, user_logout
from rpress.forms import LoginForm
auth = flask.Blueprint('auth', __name__)
@auth.route('/login', methods=['GET', 'POST'])
def login():
# Here we use a class of some kind to represent and validate our
# client-side form data. For example, WTForms is a library that will
# handle this for us, and we use a custom LoginForm to validate.
form = LoginForm()
if form.validate_on_submit():
# Login and validate the user.
# user should be an instance of your `User` class
if not user_login(form.username.data, form.password.data):
flash('login fail.')
abort(401)
# return redirect(url_for('.index'))
flash('Logged in successfully.')
next_location = request.args.get('next')
# next_is_valid should check if the user has valid
# permission to access the `next` url
# if not next_is_valid(next):
# return flaskabort(400)
return redirect(next_location or url_for('rpadmin_dashboard.dashboard'))
return render_template('/common/login.html', form=form)
@auth.route("/logout")
@login_required
def logout():
user_logout()
return redirect(url_for('post_page.paginate_with_all'))
| gpl-3.0 | 3,280,459,609,077,871,600 | 30.574468 | 80 | 0.669137 | false | 3.834625 | false | false | false |
proximate/proximate | sendfile.py | 1 | 8344 | #
# Proximate - Peer-to-peer social networking
#
# Copyright (c) 2008-2011 Nokia Corporation
#
# All rights reserved.
#
# This software is licensed under The Clear BSD license.
# See the LICENSE file for more details.
#
import os
from bencode import fmt_bdecode, bencode
from ioutils import get_flen, TCP_Queue, TCPQ_ERROR
from plugins import Plugin, get_plugin_by_type
from support import warning
from proximateprotocol import TP_SEND_FILE, valid_receive_name, \
PLUGIN_TYPE_COMMUNITY, PLUGIN_TYPE_SEND_FILE, \
TP_CONNECT_TIMEOUT, PLUGIN_TYPE_NOTIFICATION, \
PLUGIN_TYPE_FILE_TRANSFER, TP_MAX_TRANSFER
from utils import format_bytes
SEND_FILE_ACCEPT = 'mkay'
SEND_FILE_DENY = 'nothx'
community = None
notify = None
sendfile = None
ACCEPT_TIMEOUT = 300
class Send_File_Server:
""" Process incoming sendfile connection """
sendspec = {'uid': str,
'flen': lambda flen: (type(flen) == int or type(flen) == long) and flen >= 0,
'name': valid_receive_name,
}
def __init__(self, address, sock, data):
self.q = TCP_Queue(self.msghandler, closehandler=self.queue_closed)
# Close queue that is idle for a period of time
self.q.set_timeout(ACCEPT_TIMEOUT)
self.address = address
self.initstate = True
self.f = None
self.ui = None
self.pos = 0
self.user = None
self.name = None
self.flen = None
self.cb = None
self.ctx = None
self.q.append_input(data)
self.q.initialize(sock)
def queue_closed(self, q, parameter, msg):
if self.f != None:
self.f.close()
self.f = None
if self.ui != None:
self.ui.cleanup('End')
self.ui = None
if self.cb != None:
self.cb(self.pos == self.flen, self.ctx)
self.cb = None
if self.name != None and self.pos < self.flen:
notify('Unable to receive a file from %s: %s' % (self.user.get('nick'), self.name), True)
self.name = None
def msghandler(self, q, data, parameter):
if not self.initstate:
warning('send file server: protocol violation!\n')
return False
self.initstate = False
d = fmt_bdecode(self.sendspec, data)
if d == None:
warning('send file server: invalid msg: %s\n' % data)
return False
self.user = community.safe_get_user(d['uid'], self.address[0])
if self.user == None:
warning('send file server: invalid uid: %s\n' % d['uid'])
return False
self.name = d['name']
self.flen = d['flen']
notify('Got a file send request from %s: %s (%s)' % (self.user.get('nick'), self.name, format_bytes(self.flen)))
for cb in sendfile.receive_cb:
cb(self.accept_send, self.user, self.name)
return True
def abort_cb(self, ctx):
self.q.close(msg='Aborted')
def accept_send(self, accept, destname, cb, ctx=None):
""" callback(success, bytes, ctx) """
if self.name == None:
# Aborted
return
if not accept:
self.q.write(SEND_FILE_DENY)
self.q.close_after_send('File denied')
return
filetransfer = get_plugin_by_type(PLUGIN_TYPE_FILE_TRANSFER)
if filetransfer != None:
title = 'Receiving from %s: %s' % (self.user.get('nick'), self.name)
self.ui = filetransfer.add_transfer(title, self.flen, self.abort_cb)
self.q.set_timeout(TP_CONNECT_TIMEOUT)
self.cb = cb
self.ctx = ctx
self.q.write(SEND_FILE_ACCEPT)
try:
self.f = open(destname, 'w')
except IOError, (errno, strerror):
self.q.close(TCPQ_ERROR, msg=strerror)
return
self.q.set_recv_handler(self.receive)
def receive(self, data):
amount = min(len(data), self.flen - self.pos)
try:
self.f.write(data[0:amount])
except IOError, (errno, strerror):
self.q.close(TCPQ_ERROR, msg=strerror)
return None
self.pos += amount
if self.ui != None:
self.ui.update(amount)
if self.flen == self.pos:
notify('Received a file from %s succefully: %s' % (self.user.get('nick'), self.name))
self.q.close(msg='Complete')
return None
return amount
class Send_File:
def __init__(self, user, fname):
self.q = TCP_Queue(self.msghandler, closehandler=self.queue_closed)
self.user = user
self.f = None
self.fname = fname
self.name = os.path.basename(fname)
self.ui = None
self.initstate = True
self.pos = 0
self.flen = None
def queue_closed(self, q, parameter, msg):
if self.f != None:
self.f.close()
self.f = None
if self.ui != None:
self.ui.cleanup('End')
self.ui = None
if self.flen != None and self.pos < self.flen:
notify('Unable to send a file to %s: %s' % (self.user.get('nick'), self.name), True)
self.flen = None
def begin(self):
try:
self.f = open(self.fname, 'r')
except IOError, (errno, strerror):
self.q.close(TCPQ_ERROR, msg=strerror)
return False
try:
self.f.seek(0, os.SEEK_END)
except IOError, (errno, strerror):
self.q.close(TCPQ_ERROR, msg=strerror)
return False
self.flen = self.f.tell()
self.f.seek(0)
notify('Sending a file to %s: %s (%s)' % (self.user.get('nick'), self.name, format_bytes(self.flen)))
filetransfer = get_plugin_by_type(PLUGIN_TYPE_FILE_TRANSFER)
if filetransfer != None:
title = 'Sending to %s: %s' % (self.user.get('nick'), self.name)
self.ui = filetransfer.add_transfer(title, self.flen, self.abort_cb)
return self.connect()
def abort_cb(self, ctx):
self.q.close(msg='Aborted')
def connect(self):
ip = self.user.get('ip')
port = self.user.get('port')
if ip == None or port == None or not self.q.connect((ip, port), TP_CONNECT_TIMEOUT):
return False
prefix = TP_SEND_FILE + '\n'
self.q.write(prefix, writelength = False)
myuid = community.get_myuid()
req = {'uid': myuid, 'flen': self.flen, 'name': self.name}
self.q.write(bencode(req))
# Close queue that is idle for a period of time
self.q.set_timeout(ACCEPT_TIMEOUT)
return True
def msghandler(self, q, data, parameter):
if not self.initstate:
warning('send file: protocol violation!\n')
return False
self.initstate = False
if data == SEND_FILE_ACCEPT:
self.q.set_timeout(TP_CONNECT_TIMEOUT)
self.q.set_send_handler(self.send)
return True
elif data == SEND_FILE_DENY:
return False
warning('send file: invalid message %s\n' % data)
return False
def send(self):
amount = min(TP_MAX_TRANSFER * 4, self.flen - self.pos)
try:
chunk = self.f.read(amount)
except IOError, (errno, strerror):
self.q.close(TCPQ_ERROR, msg=strerror)
return None
self.pos += amount
if self.ui != None:
self.ui.update(amount)
if self.pos == self.flen:
notify('Sent a file to %s succefully: %s' % (self.user.get('nick'), self.name))
self.q.set_send_handler(None)
self.q.close_after_send('Complete')
return chunk
class Send_File_Plugin(Plugin):
def __init__(self):
global sendfile
self.register_plugin(PLUGIN_TYPE_SEND_FILE)
self.register_server(TP_SEND_FILE, Send_File_Server)
sendfile = self
self.receive_cb = []
def ready(self):
global community, notify
community = get_plugin_by_type(PLUGIN_TYPE_COMMUNITY)
notify = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION).notify
def send(self, user, fname):
s = Send_File(user, fname)
return s.begin()
def init(options):
Send_File_Plugin()
| bsd-3-clause | 5,660,686,219,359,146,000 | 28.69395 | 120 | 0.566994 | false | 3.550638 | false | false | false |
wizardofozzie/pybitcointools | bitcoin/file_insert.py | 1 | 4125 | import io, struct, os, sys, math
from binascii import crc32, unhexlify, hexlify
from bitcoin.main import *
from bitcoin.bci import *
from bitcoin.transaction import *
#from bitcoin.pyspecials import hexify, unhexify, by
def _mk_multisig_scriptpubkey(fo):
# make a single output's redeemScript
data = fo.read(65*3)
if not data:
return None
script_pubkeys = []
while data:
chunk = data[:65]
data = data[65:]
# pad right side with null bytes
if len(chunk) < 33:
chunk += by(bytearray(33-len(chunk)))
elif len(chunk) < 65:
chunk += by(bytearray(65-len(chunk)))
script_pubkeys.append(chunk)
pubz = list(map(hexify, script_pubkeys))
return mk_multisig_script(pubz, 1)
def _mk_txouts(fo, value=None):
value = 547 if not value else int(value)
hexval = hexify(struct.pack('<Q', value)) # make 8 byte LE value
txouts = []
while True:
scriptPubKey = _mk_multisig_scriptpubkey(fo)
if scriptPubKey is None: break
txouts.append( {'script': scriptPubKey, 'value': value} )
return txouts
#return ''.join([(hexval + str(wrap_script(x['script']))) for x in txouts])
#Encode file into the blockchain (with prepended file length, crc32) using multisig addresses
def _mk_binary_txouts(filename, value=None):
try: fileobj = open(filename, 'rb').read()
except: raise Exception("can't find file!")
data = struct.pack('<I', len(fileobj)) + \
struct.pack('<I', crc32(fileobj) & 0xffffffff) + fileobj
fd = io.BytesIO(data)
TXOUTS = _mk_txouts(fd, value)
return list(TXOUTS)
#return wrap_varint(TXOUTS)
def encode_file(filename, privkey, *args):
""""""
#filename, privkey, value, change_address, network, signtx
if len(args) == 0:
value, input_address, network, signtx = None, None, None, False
elif len(args) == 3:
value, input_address, network = args
signtx = False
elif len(args) == 4:
value, input_address, network, signtx = args
else:
raise SyntaxError("params = filename, privkey, value, change_address, network, signtx")
if not network:
network = 'testnet'
if input_address is None:
input_address = privtoaddr(privkey, 111) if network == 'testnet' else privtoaddr(privkey)
u = unspent(input_address, 'testnet', source='blockr') if network == 'testnet' else unspent(input_address)
value = 547 if value is None else int(value)
TXFEE = int(math.ceil(1.1 * (10000*os.path.getsize(filename)/1000)))
OUTS = _mk_binary_txouts(filename, value)
TOTALFEE = TXFEE + int(value)*len(OUTS)
INS = select(u, TOTALFEE)
rawtx = mksend(INS, OUTS, input_address, TXFEE)
if signtx:
signedtx = sign(rawtx, 0, privkey, 1)
return signedtx
return rawtx
def decode_file(txid, network='btc'):
"""Returns decoded blockchain binary file as bytes, ready to write to a file"""
# TODO: multiple TxIDs? verify encode_file output?
assert network in ('btc', 'testnet')
txh = fetchtx(txid, network, source='blockr')
txo = deserialize(txh)
outs1 = map(deserialize_script, multiaccess(txo['outs'], 'script'))
# get hex key data from multisig scripts
outs2 = filter(lambda l: l[-1] == 174, outs1) # TODO: check for _non-p2sh_ outputs
outs3 = map(lambda l: l[1:-2], outs2)
data = unhexify(''.join([item for sublist in outs3 for item in sublist])) # base 256 of encoded data
# TODO: are length & crc32 prepended?
length = struct.unpack('<I', data[0:4])[0] # TODO: check length == len(data)
checksum = struct.unpack('<I', data[4:8])[0]
data = data[8:8+length]
assert checksum == crc32(data) & 0xffffffff
return data # TODO: write return to file object?
# def decode_files(txids, network='btc'):
# if isinstance(txids, string_types):
# return decode_file(txids, network)
# elif isinstance(txids, list) and len(txids) == 1:
# return decode_file(txids[0], network)
# return ''.join([decode_file(x) for x in txids])
| mit | 1,319,070,831,841,523,500 | 34.25641 | 110 | 0.635879 | false | 3.315916 | false | false | false |
OpenAddressesUK/common-ETL | bulkinsert.py | 1 | 2134 | #
# Open addresses ETL Common Library
#
# Bulk Insert Class
#
# Version 1.0 (Python) in progress
# Author John Murray
# Licence MIT
#
# Purpose Bulk insert items into a MySQl or MariaDB table
#
# Arguments: database cursor, table name, list of fields, max = maximum buffer (2000), ignore = ingore duplicate keys (false)
#
import MySQLdb
import string
class BulkInsert:
def __init__(self, cur,table,fields,max=2000,ignore=False): # Instantiation - pass database
self.max_rows = max
self.cursor = cur
self.fields = fields
self.table = table
if ignore:
self.type = "IGNORE "
else:
self.type = ""
self.nrecs = 0
self.bufrecs = 0
self.values = []
self.prefix = "INSERT "+self.type+"INTO `"+self.table+"` ("
self.prefix += string.join(["`" + field + "`" for field in fields],",")
self.prefix += ") VALUES "
def close(self):
if self.bufrecs > 0:
self.writeData()
def addRow(self,row):
self.values.append(row)
self.nrecs += 1
self.bufrecs += 1
if (self.nrecs % self.max_rows) == 0:
self.writeData()
def writeData(self):
query = self.prefix
for i in range(0,len(self.values)):
if i > 0:
query += ", "
query += "("
for j in range(0,len(self.fields)):
if j > 0:
query += ", "
if isinstance(self.values[i][j], (int, long, float, complex)): # Is numeric
query += "'" + str(self.values[i][j]) + "'"
elif self.values[i][j] == "NULL":
query += "NULL"
elif self.values[i][j][0:12] == "GeomFromText":
query += self.values[i][j]
else:
query += "'" + self.values[i][j].replace("'","\\'") + "'"
query += ")"
query += ";"
self.cursor.execute(query)
self.values = []
self.bufrecs = 0
| mit | -4,134,537,101,536,599,600 | 30.382353 | 129 | 0.481256 | false | 3.817531 | false | false | false |
garnaat/petard | petard/resource.py | 1 | 1941 | # Copyright (c) 2015 Mitch Garnaat. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class Resource(object):
def __init__(self, response):
self.href = '/'
self.items = []
for key in response:
if key == '_links':
self._links = response['_links']
if 'self' in self._links:
self.href = self._links['self']['href']
self.url = self._links.get('self', '/')
elif key == '_embedded':
if isinstance(response['_embedded']['item'], dict):
self.items.append(Resource(response['_embedded']['item']))
else:
for item in response['_embedded']['item']:
self.items.append(Resource(item))
else:
setattr(self, key, response[key])
def __repr__(self):
return 'Resource: %s' % self.href
| apache-2.0 | -7,147,697,005,222,159,000 | 42.133333 | 78 | 0.63627 | false | 4.513953 | false | false | false |
hagifoo/gae-pomodoro | app/src/application/handler/__init__.py | 1 | 2303 | # coding: utf-8
import json
import logging
import webapp2
from webapp2_extras import sessions
from google.appengine.api.taskqueue import TombstonedTaskError, TaskAlreadyExistsError, DuplicateTaskNameError
from domain.entity import User
import error
class BaseHandler(webapp2.RequestHandler):
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
user = self.session.get('user')
if user:
self.user = User.from_json(user)
else:
self.user = None
try:
return webapp2.RequestHandler.dispatch(self)
except webapp2.HTTPException as e:
self.response.set_status(e.code)
if e.message:
self.response.write(e.message)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
return self.session_store.get_session()
@property
def session_id(self):
cookie_name = self.session_store.config['cookie_name']
return self.request.cookies[cookie_name]
class JsonHandler(BaseHandler):
def dispatch(self):
j = super(JsonHandler, self).dispatch()
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
if j is not None:
self.response.out.write(json.dumps(j))
class TaskHandler(BaseHandler):
"""Handle unrecoverable errors."""
def dispatch(self):
try:
super(TaskHandler, self).dispatch()
# Unrecoverable Exceptions such as Invalid Parameter
except error.TaskUnrecoverableException as e:
logging.error(e)
except (TombstonedTaskError,
TaskAlreadyExistsError,
DuplicateTaskNameError) as e:
logging.error(e)
def signin_user_only(f):
"""Raise UnauthorizedException if session user is None
Examples:
class MyHandler(BaseHandler):
@singin_user_only
def get(self):
# following code is executed only if user is signed in.
...
"""
def wrapper(self, *args, **keywords):
if not self.user:
raise error.UnauthorizedException('Need sign in')
else:
return f(self, *args, **keywords)
return wrapper
| mit | -780,267,513,866,127,100 | 27.432099 | 110 | 0.635258 | false | 4.15704 | false | false | false |
imAArtist/simIr | Pipline/moss.py | 1 | 3399 | import string
import re
import os
def read_from_dir(path='../Data/singleFile/'):
pool = []
dirs = [i for i in os.listdir(path) if i.find('cpp') != -1]
for d in dirs:
with open(os.path.join(path, d)) as f:
pool.append(''.join(f.readlines()))
return pool
def clearFile(f, pattern=None):
if pattern is None:
pattern = r'[^a-z0-9\"\':;><,.\/\?~`!@#$%^&*()_\+\-=\|\{\}\[\]]'
return ''.join(re.split(pattern, f.lower()))
def winnowing(hv, window_size=4):
if window_size > len(hv):
raise ValueError('windows size must less than length of hash list')
forb = set()
fp = []
for index in range(len(hv) - window_size + 1):
t = [(hv[i], -i) for i in range(index, index + window_size)]
t = sorted(t)
if t[0][1] not in forb:
fp.append(tuple(t[0]))
forb.add(t[0][1])
else:
for j in t[1:]:
if j[0] != t[0][0]:
break
if j[1] not in forb:
forb.add(j[1])
fp.append(tuple(j))
return fp
def rolling_hash(s, gram_size=4, hash_table=None):
if hash_table is None:
hash_table = ' {}{}{}'.format(string.ascii_lowercase, '0123456789', '"\':;><,.\/?~`!@#$%^&*()_+-=|{}[]')
if len(s) < gram_size:
raise ValueError('String length should larger than gram size')
seq = []
hv = 0
n = 10 ** (gram_size - 1)
for i in s[:gram_size]:
h = hash_table.find(i)
if h == -1:
raise ValueError('Can not find in hash table')
hv *= 10
hv += h
head = 0
seq.append(hv)
for i in s[gram_size:]:
h = hash_table.find(i)
if h == -1:
raise ValueError('Can not find in hash table')
hv -= n * hash_table.find(s[head])
hv *= 10
hv += h
head += 1
seq.append(hv)
return seq
def get_hash_value(dat, hash_table=list(string.ascii_letters + ' '), gram_size=4):
token = dat[:gram_size]
fp = 0
for t in token:
fp *= 10
try:
fp += hash_table.index(t)
except ValueError:
fp = hash_table.__len__()
hash_table.append(t)
rst = [fp]
prev_hv = hash_table.index(token[-1])
for t in dat[gram_size:]:
last_fp = int(rst[-1])
last_fp -= gram_size * 10 * prev_hv
last_fp *= 10
try:
last_fp += hash_table.index(t)
except ValueError:
last_fp += hash_table.__len__()
hash_table.append(t)
prev_hv = hash_table.index(t)
rst.append(last_fp)
return set(rst)
def sim(base, com):
base = set(base)
com = set(com)
inter = base.intersection(com)
score = (len(inter) / len(base), len(inter) / len(com))
dist = 1 - (score[0] ** 2 / 2 + score[1] ** 2 / 2) ** 0.5
return (score, dist)
def cal_sim_dup(fp_pool):
sim_pool = {}
for index, base in enumerate(fp_pool):
for cindex, com in enumerate(fp_pool[index:]):
if cindex == 0:
continue
sim_pool['{},{}'.format(index, index + cindex)] = sim(base, com)
return sim_pool
if __name__ == '__main__':
files = read_from_dir()
files = [clearFile(f) for f in files]
hv = [rolling_hash(f.lower) for f in files]
fingerprint = [winnowing(i) for i in hv]
| artistic-2.0 | -3,107,151,619,041,312,000 | 27.563025 | 112 | 0.50103 | false | 3.155989 | false | false | false |
KlemenKobau/pycheckio | Home exercises/min_and_max.py | 1 | 1097 | # very ugly solution
def min(*args,**kwargs):
key = kwargs.get("key",None)
if hasattr(args[0],'__iter__') and not isinstance(args[0],str):
# is some sort of an iterable object that is not a string
args = sorted(args,key=key)
if len(args) == 1:
args = args[0]
args = sorted(args,key=key)
return args[0]
elif isinstance(args[0],str):
args = args[0]
args = sorted(list(args),key=key)
return args[0]
else:
args = sorted(list(args),key=key)
return args[0]
def max(*args,**kwargs):
key = kwargs.get("key",None)
if hasattr(args[0],'__iter__') and not isinstance(args[0],str):
# is some sort of an iterable object that is not a string
args = sorted(args,key=key,reverse=True)
if len(args) == 1:
args = list(args[0])
args = sorted(args,key=key,reverse=True)
return args[0]
elif isinstance(args[0],str):
args = args[0]
args = sorted(list(args),key=key,reverse=True)
return args[0]
else:
args = sorted(list(args),key=key,reverse=True)
return args[0]
print(min([1,2,3]))
print(max((2,1)))
print(min(8,9))
print(max("yolo"))
print(min("j")) | mit | 3,003,928,824,370,789,400 | 21.875 | 64 | 0.645397 | false | 2.67561 | false | false | false |
jbrendel/RESTx | src/python/restx/core/parameter.py | 1 | 10895 | """
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
The parameter class.
"""
from datetime import date
from datetime import time as time_class
from restx.platform_specifics import PLATFORM, PLATFORM_JYTHON
from org.mulesoft.restx.exception import RestxException
#
# Types for resource parameters
#
PARAM_STRING_LIST = "string_list"
PARAM_STRING = "string"
PARAM_PASSWORD = "password"
PARAM_BOOL = "boolean"
PARAM_DATE = "date"
PARAM_TIME = "time"
PARAM_NUMBER_LIST = "number_list"
PARAM_NUMBER = "number"
PARAM_URI = "uri"
#
# Each entry in the following table has the format:
# ( storage_types, runtime_types, conversion_func )
#
# 'storage_types' defines a list of types that this value may
# have after being read in via JSON. For example, 'date'
# will not be recognized by JSON, it is stored and loaded
# as a string. So, 'str' is one of the valid types for date
# parameters.
#
# 'runtime_types' is a list of types that are acceptable after
# proper conversion, so that we can actually work with that
# type in our programming language. For example, we really
# want dates to be of class 'date', which is why for date
# parameters we specify that type.
#
# 'conversion_func' is a function that can be used to convert
# from a storatge-type to a runtime-type. Calling this function
# also provides a proper sanity check, since those functions
# will throw errors if they fail.
#
# Note: Date is defined as YYYY-MM-DD
# Note: Time is defined as HH:MM:SS
#
def __numstr_to_num(x):
if type(x) in [ int, float ]:
return x
elif type(x) in [ str, unicode ]:
try:
return int(x)
except:
return float(x)
# Can't convert anything else
return None
def __bool_convert(x):
if type(x) is bool:
return x
else:
if x.lower() in [ "y", "yes", "true", "t", "1" ]:
return True
else:
return False
#
# Type conversion turns out to be quite odd. The more languages
# we enable, the more 'similar' types we have to recognize and
# deal with. For example, a component may expect a number as
# attribute. For a Java component that might be a BigDecimal,
# for a Python component, it might just be int or float.
# So, considering Python as the base, we start by defining all
# the python types for a particular RESTx type. Then we add the
# types of the other languages if and when appropriate.
TYPES_DICT = {
"STRING_LIST_TYPES" : [ list ],
"STRING_TYPES" : [ unicode, str ],
"PASSWORD_TYPES" : [ unicode, str ],
"BOOL_TYPES" : [ bool ],
"DATE_TYPES" : [ unicode, str ],
"TIME_TYPES" : [ unicode, str ],
"NUMBER_LIST_TYPES" : [ list ],
"NUMBER_TYPES" : [ int, float ],
"URI_TYPES" : [ unicode, str ],
}
if PLATFORM == PLATFORM_JYTHON:
# Now selectively add some of the Java types
from java.math import BigDecimal
TYPES_DICT["NUMBER_TYPES"].append(BigDecimal)
def __list_to_strlist(x):
if type(x) is not list:
x = [ x ]
return [ str(e) for e in x ]
def __list_to_numlist(x):
if type(x) is not list:
x = [ x ]
nlist = []
for e in x:
converted = __numstr_to_num(e)
if not converted:
return None
nlist.append(converted)
return nlist
TYPE_COMPATIBILITY = {
PARAM_STRING_LIST : (TYPES_DICT["STRING_LIST_TYPES"], [ list ], __list_to_strlist),
PARAM_STRING : (TYPES_DICT["STRING_TYPES"], [ str ], None),
PARAM_PASSWORD : (TYPES_DICT["PASSWORD_TYPES"], [ str ], None),
PARAM_BOOL : (TYPES_DICT["BOOL_TYPES"], [ bool ], __bool_convert),
PARAM_DATE : (TYPES_DICT["DATE_TYPES"], [ date ], lambda x : date(*[ int(elem) for elem in x.split("-")])),
PARAM_TIME : (TYPES_DICT["TIME_TYPES"], [ time_class ], lambda x : time_class(*[ int(elem) for elem in x.split(":")])),
PARAM_NUMBER_LIST : (TYPES_DICT["NUMBER_LIST_TYPES"], [ list ], __list_to_numlist),
PARAM_NUMBER : (TYPES_DICT["NUMBER_TYPES"], [ int, float ], __numstr_to_num),
PARAM_URI : (TYPES_DICT["URI_TYPES"], [ str ], None)
}
class ParameterDef(object):
"""
This class encapsulates a parameter definition.
Parameters are defined by each individual component.
Therefore, in its __init__() method each component
has to create its dictionary of ParameterDef classes
and make it available via the getParams() method.
By default, a parameter is 'required'. Note that
this parameter definition does not contain the
name of the parameter, since the name is merely
the key in the parameter definition dictionary,
which is maintained by each component.
"""
def __init__(self, ptype, desc="", required=True, default=None, choices=None):
"""
Define a new parameter.
A parameter is defined with the following attributes:
@param ptype: A type, such as PARAM_STRING, etc.
@type prtype: string
@param desc: A short, one-line description in human readable form.
@type desc: string
@param required: A flag indicating whether this parameter needs to be
set by the resource creator, or whether a default
value can be used.
@type required: boolean
@param default: A default value for this parameter of a suitable type.
Only used if 'required == False'.
@type default: Whatever is needed as default value
@param choices: If the allowed input values should be restricted to a
number of choices, specify them here as a list of strings.
@type choices: list
"""
self.ptype = ptype
self.desc = desc
self.required = required
if not self.required and default is None:
raise RestxException("A default value is required for optional parameters")
if self.required and default:
raise RestxException("A default value cannot be provided for a required parameter")
self.default = default
self.choices = choices
if self.choices:
str_choices = [ str(c) for c in self.choices ]
if self.default and str(self.default) not in str_choices:
raise RestxException("Specified default value is not listed in 'choices'")
if self.ptype not in [ PARAM_STRING, PARAM_NUMBER, PARAM_STRING_LIST, PARAM_NUMBER_LIST ]:
raise RestxException("Choices are not supported for this type.")
if self.ptype in [ PARAM_STRING_LIST, PARAM_NUMBER_LIST ]:
self.is_list = True
else:
self.is_list = False
def isList(self):
"""
Return an indication whether this is a list type or not.
"""
return self.is_list
def getDefaultVal(self):
"""
Return default value.
Javaesque naming convention, because the method was first needed
on the Java side of things.
@return: The default value.
@rtype: object
"""
return self.default
def as_dict(self):
"""
Unwraps this single parameter definition into a plain dictionary.
Needed for browsing or accessing the component's meta info.
@return: Dictionary representation of the parameter.
@rtype: dict
"""
d = dict(type = self.ptype,
desc = self.desc,
required = self.required)
if not self.required:
d['default'] = self.default
if self.choices:
d['val_choices'] = self.choices
if self.is_list:
d['multi_choice'] = True
return d
def html_type(self, name, initial=None):
"""
Return the HTML form field type for a value of this type.
Needed when we display a resource creation form.
@return: A string containing "checkbox" or "text"
@rtype: string
"""
if self.ptype == PARAM_BOOL:
yes_value = "checked " if initial == "yes" else ""
no_value = "checked " if initial == "no" else ""
return '''<label for="%s_yes"><input %stype="radio" id="%s_yes" name="%s" value="yes" />yes</label><br>
<label for="%s_no"><input %stype="radio" id="%s_no" name="%s" value="no" />no</label>''' % (name, yes_value, name, name, name, no_value, name, name)
else:
if self.choices:
if type(initial) is not list:
initial = [ initial ]
buf = '<select '
if self.ptype in [ PARAM_STRING_LIST, PARAM_NUMBER_LIST ]:
buf += "multiple size=%d " % min(8, len(self.choices))
multiple = True
else:
multiple = False
buf += 'name="%s" id="%s">' % (name, name)
if self.default and not multiple:
buf += '<option value="">--- Accept default ---</option>'
# Initial may be a string, since that all the type information we can have when we convert
# the form input to a data structure
buf += '%s</select>' % ( [ '<option value="%s"%s>%s</option>' % (c, 'selected="selected"' if initial and str(c) in initial else "", c) for c in self.choices ] )
return buf
if initial:
init_val = 'value="%s" ' % initial
else:
init_val = ''
if self.ptype == PARAM_PASSWORD:
type_str = "password"
else:
type_str = "text"
return '<input type="%s" name="%s" id="%s" %s/>' % (type_str, name, name, init_val)
| gpl-3.0 | 7,665,217,387,583,133,000 | 36.311644 | 176 | 0.582744 | false | 4.09125 | false | false | false |
qtile/qtile | libqtile/hook.py | 2 | 10136 | # Copyright (c) 2009-2010 Aldo Cortesi
# Copyright (c) 2010 Lee McCuller
# Copyright (c) 2010 matt
# Copyright (c) 2010, 2014 dequis
# Copyright (c) 2010, 2012, 2014 roger
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2011 Paul Colomiets
# Copyright (c) 2011 Tzbob
# Copyright (c) 2012-2015 Tycho Andersen
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import contextlib
from typing import Dict, Set
from libqtile import utils
from libqtile.log_utils import logger
subscriptions = {} # type: Dict
SKIPLOG = set() # type: Set
def clear():
subscriptions.clear()
class Subscribe:
def __init__(self):
hooks = set([])
for i in dir(self):
if not i.startswith("_"):
hooks.add(i)
self.hooks = hooks
def _subscribe(self, event, func):
lst = subscriptions.setdefault(event, [])
if func not in lst:
lst.append(func)
return func
def startup_once(self, func):
"""Called when Qtile has started on first start
This hook is called exactly once per session (i.e. not on each
``lazy.restart()``).
**Arguments**
None
"""
return self._subscribe("startup_once", func)
def startup(self, func):
"""Called when qtile is started
**Arguments**
None
"""
return self._subscribe("startup", func)
def startup_complete(self, func):
"""Called when qtile is started after all resources initialized
**Arguments**
None
"""
return self._subscribe("startup_complete", func)
def shutdown(self, func):
"""Called before qtile is shutdown
**Arguments**
None
"""
return self._subscribe("shutdown", func)
def restart(self, func):
"""Called before qtile is restarted
**Arguments**
None
"""
return self._subscribe("restart", func)
def setgroup(self, func):
"""Called when group is changed
**Arguments**
None
"""
return self._subscribe("setgroup", func)
def addgroup(self, func):
"""Called when group is added
**Arguments**
* name of new group
"""
return self._subscribe("addgroup", func)
def delgroup(self, func):
"""Called when group is deleted
**Arguments**
* name of deleted group
"""
return self._subscribe("delgroup", func)
def changegroup(self, func):
"""Called whenever a group change occurs
**Arguments**
None
"""
return self._subscribe("changegroup", func)
def focus_change(self, func):
"""Called when focus is changed, including moving focus between groups or when
focus is lost completely
**Arguments**
None
"""
return self._subscribe("focus_change", func)
def float_change(self, func):
"""Called when a change in float state is made
**Arguments**
None
"""
return self._subscribe("float_change", func)
def group_window_add(self, func):
"""Called when a new window is added to a group
**Arguments**
* ``Group`` receiving the new window
* ``Window`` added to the group
"""
return self._subscribe("group_window_add", func)
def client_new(self, func):
"""Called before Qtile starts managing a new client
Use this hook to declare windows static, or add them to a group on
startup. This hook is not called for internal windows.
**Arguments**
* ``Window`` object
Examples
--------
::
@libqtile.hook.subscribe.client_new
def func(c):
if c.name == "xterm":
c.togroup("a")
elif c.name == "dzen":
c.cmd_static(0)
"""
return self._subscribe("client_new", func)
def client_managed(self, func):
"""Called after Qtile starts managing a new client
Called after a window is assigned to a group, or when a window is made
static. This hook is not called for internal windows.
**Arguments**
* ``Window`` object of the managed window
"""
return self._subscribe("client_managed", func)
def client_killed(self, func):
"""Called after a client has been unmanaged
**Arguments**
* ``Window`` object of the killed window.
"""
return self._subscribe("client_killed", func)
def client_focus(self, func):
"""Called whenever focus moves to a client window
**Arguments**
* ``Window`` object of the new focus.
"""
return self._subscribe("client_focus", func)
def client_mouse_enter(self, func):
"""Called when the mouse enters a client
**Arguments**
* ``Window`` of window entered
"""
return self._subscribe("client_mouse_enter", func)
def client_name_updated(self, func):
"""Called when the client name changes
**Arguments**
* ``Window`` of client with updated name
"""
return self._subscribe("client_name_updated", func)
def client_urgent_hint_changed(self, func):
"""Called when the client urgent hint changes
**Arguments**
* ``Window`` of client with hint change
"""
return self._subscribe("client_urgent_hint_changed", func)
def layout_change(self, func):
"""Called on layout change
**Arguments**
* layout object for new layout
* group object on which layout is changed
"""
return self._subscribe("layout_change", func)
def net_wm_icon_change(self, func):
"""Called on `_NET_WM_ICON` chance
**Arguments**
* ``Window`` of client with changed icon
"""
return self._subscribe("net_wm_icon_change", func)
def selection_notify(self, func):
"""Called on selection notify
**Arguments**
* name of the selection
* dictionary describing selection, containing ``owner`` and
``selection`` as keys
"""
return self._subscribe("selection_notify", func)
def selection_change(self, func):
"""Called on selection change
**Arguments**
* name of the selection
* dictionary describing selection, containing ``owner`` and
``selection`` as keys
"""
return self._subscribe("selection_change", func)
def screen_change(self, func):
"""Called when the output configuration is changed (e.g. via randr in X11).
**Arguments**
* ``xproto.randr.ScreenChangeNotify`` event (X11) or None (Wayland).
"""
return self._subscribe("screen_change", func)
def current_screen_change(self, func):
"""Called when the current screen (i.e. the screen with focus) changes
**Arguments**
None
"""
return self._subscribe("current_screen_change", func)
def enter_chord(self, func):
"""Called when key chord begins
**Arguments**
* name of chord(mode)
"""
return self._subscribe("enter_chord", func)
def leave_chord(self, func):
"""Called when key chord ends
**Arguments**
None
"""
return self._subscribe("leave_chord", func)
subscribe = Subscribe()
class Unsubscribe(Subscribe):
"""
This class mirrors subscribe, except the _subscribe member has been
overridden to removed calls from hooks.
"""
def _subscribe(self, event, func):
lst = subscriptions.setdefault(event, [])
try:
lst.remove(func)
except ValueError:
raise utils.QtileError(
"Tried to unsubscribe a hook that was not"
" currently subscribed"
)
unsubscribe = Unsubscribe()
def _fire_async_event(co):
loop = None
with contextlib.suppress(RuntimeError):
loop = asyncio.get_running_loop()
if loop is None:
asyncio.run(co)
else:
asyncio.ensure_future(co)
def fire(event, *args, **kwargs):
if event not in subscribe.hooks:
raise utils.QtileError("Unknown event: %s" % event)
if event not in SKIPLOG:
logger.debug("Internal event: %s(%s, %s)", event, args, kwargs)
for i in subscriptions.get(event, []):
try:
if asyncio.iscoroutinefunction(i):
_fire_async_event(i(*args, **kwargs))
elif asyncio.iscoroutine(i):
_fire_async_event(i)
else:
i(*args, **kwargs)
except: # noqa: E722
logger.exception("Error in hook %s", event)
| mit | 7,779,694,990,883,977,000 | 25.534031 | 86 | 0.588792 | false | 4.424269 | false | false | false |
Ircam-Web/mezzanine-organization | organization/pages/migrations/0014_auto_20161028_1516.py | 1 | 1131 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-28 13:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pages', '0005_auto_20160923_1219'),
('organization-pages', '0013_auto_20161026_1025'),
]
operations = [
migrations.CreateModel(
name='PageRelatedTitle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=1024, null=True, verbose_name='title')),
('page', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='related_title', to='pages.Page', verbose_name='page')),
],
options={
'verbose_name': 'related title',
},
),
migrations.AlterOrderWithRespectTo(
name='pagerelatedtitle',
order_with_respect_to='page',
),
]
| agpl-3.0 | 5,761,957,583,470,936,000 | 34.34375 | 184 | 0.594164 | false | 3.996466 | false | false | false |
omiddavoodi/StellarisInGameLedger | server/ledger.py | 1 | 14030 | import zipfile
import sys
import paradoxparser
import datetime
TECH_SCORE_MULTIPLIER = 10
ACCUMULATED_ENERGY_MULTIPLIER = 0.1
ACCUMULATED_MINERALS_MULTIPLIER = 0.05
ACCUMULATED_INFLUENCE_MULTIPLIER = 0.05
ENERGY_PRODUCTION_MULTIPLIER = 2
MINERAL_PRODUCTION_MULTIPLIER = 1.5
INFLUENCE_PRODUCTION_MULTIPLIER = 1
NUM_SUBJECTS_MULTIPLIER = 30
MILITARYPOWER_MULTIPLIER = 0.03
NUM_COLONIES_MULTIPLIER = 15
NUM_PLANETS_MULTIPLIER = 0.01
class Country:
def __init__(self):
self.name = ''
self.score = 0
self.techscore = 0
self.currentenergy = 0
self.currentminerals = 0
self.currentinfluence = 0
self.energyproduction = 0
self.mineralproduction = 0
self.influenceproduction = 0
self.physicsResearch = 0
self.societyResearch = 0
self.engineeringResearch = 0
self.population = 0
self.numsubjects = 0
self.militarypower = 0
self.numcolonies = 0
self.numplanets = 0
self.numarmies = 0
self.type = ''
self.id = '0'
def calcscore(self):
self.score += TECH_SCORE_MULTIPLIER * self.techscore
self.score += ACCUMULATED_ENERGY_MULTIPLIER * self.currentenergy
self.score += ACCUMULATED_MINERALS_MULTIPLIER * self.currentminerals
self.score += ACCUMULATED_INFLUENCE_MULTIPLIER * self.currentinfluence
self.score += ENERGY_PRODUCTION_MULTIPLIER * self.energyproduction
self.score += MINERAL_PRODUCTION_MULTIPLIER * self.mineralproduction
self.score += INFLUENCE_PRODUCTION_MULTIPLIER * self.influenceproduction
self.score += NUM_SUBJECTS_MULTIPLIER * self.numsubjects
self.score += MILITARYPOWER_MULTIPLIER * self.militarypower
self.score += NUM_COLONIES_MULTIPLIER * self.numcolonies
self.score += NUM_PLANETS_MULTIPLIER * self.numplanets
def _getResearchPenalty(self):
return 0.1 * max(0, self.numcolonies -1) + 0.01 * max(0, self.population-10)
def getPhysicsResearchWithPenalty(self):
return self.physicsResearch / (1 + self._getResearchPenalty())
def getSocietyResearchWithPenalty(self):
return self.societyResearch / (1 + self._getResearchPenalty())
def getEngineeringResearchWithPenalty(self):
return self.engineeringResearch / (1 + self._getResearchPenalty())
def getMatchedScope(text, scopeName):
countries = text[text.find(scopeName+'={'):]
t = 1
instring = False
for country_key_value_pair in range(len(scopeName+'={') + 1, len(countries)):
if countries[country_key_value_pair] == '{' and not instring:
if (t == 1):
k = countries[country_key_value_pair-1]
j = country_key_value_pair-1
while(k != '\t'):
j -= 1
k = countries[j]
t += 1
elif countries[country_key_value_pair] == '}' and not instring:
t -= 1
elif countries[country_key_value_pair] == '"':
instring = not instring
if (t == 0):
countries = countries[:country_key_value_pair+1]
break
result = paradoxparser.psr.parse(countries)
return result
def makeLedgerForSave(path, basePath):
save = zipfile.ZipFile(path)
f = save.open('gamestate')
s = str(f.read(), 'utf-8')
f.close()
playertaglocation = s.find('player={')
playertag = s[playertaglocation:s.find('}', playertaglocation)]
playercountry = playertag[playertag.find('country=')+len('country='):playertag.find('}')].strip()
country_raw_data = getMatchedScope(s,"country")[0][1]
planets = getMatchedScope(s,"planet")[0][1]
ret = ''
retlist = []
contactlist = []
num = 1
for i in country_raw_data:
if (i[1] != 'none'):
ret2 = ''
isUs = False
if (i[0] == playercountry):
isUs = True
contactlist.append(i[0])
relman_part = paradoxparser.paradox_dict_get_child_by_name(i[1], 'relations_manager')
if (relman_part is not None):
for j in relman_part:
countryid = paradoxparser.paradox_dict_get_child_by_name(j[1], 'country')
commun = paradoxparser.paradox_dict_get_child_by_name(j[1], 'communications')
if (commun != None):
contactlist.append(countryid)
country = Country()
country.id = i[0]
namepart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'name')
if (namepart is not None):
country.name = namepart.replace('"', '')
techpart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'tech_status')
if (techpart is not None):
country.techscore = sum(int(j[1]) for j in techpart if j[0] == 'level')
militarypowerpart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'military_power')
if (militarypowerpart is not None):
country.militarypower = float(militarypowerpart)
empiretype = paradoxparser.paradox_dict_get_child_by_name(i[1], 'type')
if (empiretype is not None):
country.type = empiretype.replace('"', '')
if (country.type not in ('fallen_empire', 'default')):
continue
subjectpart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'subjects')
if (subjectpart is not None):
country.numsubjects = len(subjectpart)
armiespart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'owned_armies')
if (armiespart is not None):
country.numarmies = len(armiespart)
planetsspart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'controlled_planets')
if (planetsspart is not None):
country.numplanets = len(planetsspart)
controlledplanetsspart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'owned_planets')
if (controlledplanetsspart is not None):
country.numcolonies = len(controlledplanetsspart)
country.population = 0
for planetId in controlledplanetsspart:
planetObject=planets[int(planetId)][1]
popObject= next((x[1] for x in planetObject if x[0]=='pop'),None)
# if the planet is under colonization, it doesn't have pop key.
if(popObject is not None):
country.population+=len(popObject)
modulespart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'modules')
if (modulespart is not None):
economymodule = paradoxparser.paradox_dict_get_child_by_name(modulespart, 'standard_economy_module')
if (economymodule is not None):
resourcesmodule = paradoxparser.paradox_dict_get_child_by_name(economymodule, 'resources')
if (resourcesmodule is not None):
energy = paradoxparser.paradox_dict_get_child_by_name(resourcesmodule, 'energy')
if (energy is not None):
if (type(energy) == str):
country.currentenergy = float(energy)
else:
country.currentenergy = float(energy[0])
minerals = paradoxparser.paradox_dict_get_child_by_name(resourcesmodule, 'minerals')
if (minerals is not None):
if (type(minerals) == str):
country.currentminerals = float(minerals)
else:
country.currentminerals = float(minerals[0])
influence = paradoxparser.paradox_dict_get_child_by_name(resourcesmodule, 'influence')
if (influence is not None):
if (type(influence) == str):
country.currentinfluence = float(influence)
else:
country.currentinfluence = float(influence[0])
lastmonthmodule = paradoxparser.paradox_dict_get_child_by_name(economymodule, 'last_month')
if (lastmonthmodule is not None):
energy = paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'energy')
if (energy is not None):
if (type(energy) == str):
country.energyproduction = float(energy)
else:
country.energyproduction = float(energy[0])
minerals = paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'minerals')
if (minerals is not None):
if (type(minerals) == str):
country.mineralproduction = float(minerals)
else:
country.mineralproduction = float(minerals[0])
influence = paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'influence')
if (influence is not None):
if (type(influence) == str):
country.influenceproduction = float(influence)
else:
country.influenceproduction = float(influence[0])
physicsResearch=paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'physics_research')
if(physicsResearch is not None):
if (type(physicsResearch) == str):
country.physicsResearch = float(physicsResearch)
else:
country.physicsResearch = float(physicsResearch[0])
societyResearch=paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'society_research')
if(societyResearch is not None):
if (type(societyResearch) == str):
country.societyResearch = float(societyResearch)
else:
country.societyResearch = float(societyResearch[0])
engineeringResearch=paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'engineering_research')
if(engineeringResearch is not None):
if (type(engineeringResearch) == str):
country.engineeringResearch = float(engineeringResearch)
else:
country.engineeringResearch = float(engineeringResearch[0])
country.calcscore()
ret2 += '<tr>'
ret2 += '<td>%s</td>' % num
if (isUs):
ret2 += '<td hiddenvalue=%s>★</td>' % num
else:
ret2 += '<td hiddenvalue=%s> </td>' % num
ret2 += '<td class="name">%s</td>' % country.name
ret2 += '<td>{:10.0f}</td>'.format(country.score).strip()
ret2 += '<td>{:10.0f}</td>'.format(country.militarypower)
ret2 += '<td>%d</td>' % country.techscore
ret2 += '<td>%d</td>' % country.numcolonies
ret2 += '<td>%d</td>' % country.numplanets
ret2 += '<td>%d</td>' % country.numsubjects
production = ('{:10.0f}'.format(country.energyproduction)).strip()
if (country.energyproduction >= 0):
netincome = '<td class="positive">+%s</td>' % production
else:
netincome = '<td class="negative">%s</td>' % production
ret2 += '<td>{:10.0f}</td>'.format(country.currentenergy) + netincome
production = ('{:10.0f}'.format(country.mineralproduction)).strip()
if (country.mineralproduction >= 0):
netincome = '<td class="positive">+%s</td>' % production
else:
netincome = '<td class="negative">%s</td>' % production
ret2 += '<td>{:10.0f}</td>'.format(country.currentminerals) + netincome
production = ('{:10.1f}'.format(country.influenceproduction)).strip()
if (country.influenceproduction >= 0):
netincome = '<td class="positive">+%s</td>' % production
else:
netincome = '<td class="negative">%s</td>' % production
ret2 += '<td>{:10.0f}</td>'.format(country.currentinfluence) + netincome
ret2 += '<td>%.1f</td>' % country.getPhysicsResearchWithPenalty()
ret2 += '<td>%.1f</td>' % country.getSocietyResearchWithPenalty()
ret2 += '<td>%.1f</td>' % country.getEngineeringResearchWithPenalty()
ret2 += '<td>%d</td>' % country.population
ret2 += '</tr>'
retlist.append((country.id, ret2))
num += 1
## print(country.name)
## print(country.techscore)
## print(country.militarypower)
## print(country.type)
## print(country.numsubjects)
## print(country.numarmies)
## print(country.numplanets)
## print(country.numcolonies)
## print(country.currentenergy)
## print(country.currentminerals)
## print(country.currentinfluence)
## print(country.energyproduction)
## print(country.mineralproduction)
## print(country.influenceproduction)
retlist2 = []
for i in retlist:
if (i[0] in contactlist):
retlist2.append(i[1])
ret = "\n".join(retlist2)
return ret
| mit | 7,446,365,831,525,162,000 | 41.644377 | 129 | 0.548753 | false | 3.853337 | false | false | false |
Fendoe/open-hackathon-o | open-hackathon-server/src/hackathon/config_sample.py | 1 | 4189 | # -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
# "javascript" section for javascript. see @app.route('/config.js') in app/views.py
# oauth constants
QQ_OAUTH_STATE = "openhackathon" # todo state should not be constant. Actually it should be unguessable to prevent CSFA
HACKATHON_SERVER_ENDPOINT = "http://localhost:15000"
MYSQL_HOST = "localhost"
MYSQL_USER = "hackathon"
MYSQL_PWD = "hackathon"
MYSQL_DB = "hackathon"
MYSQL_PORT = 3306
Config = {
"environment": "local",
"endpoint": HACKATHON_SERVER_ENDPOINT,
"app": {
"secret_key": "secret_key"
},
"mysql": {
"connection": 'mysql://%s:%s@%s:%s/%s' % (MYSQL_USER, MYSQL_PWD, MYSQL_HOST, MYSQL_PORT, MYSQL_DB)
},
"login": {
"github": {
"user_info_url": 'https://api.github.com/user?access_token=',
"emails_info_url": 'https://api.github.com/user/emails?access_token='
},
"qq": {
"openid_url": 'https://graph.qq.com/oauth2.0/me?access_token=',
"user_info_url": 'https://graph.qq.com/user/get_user_info?access_token=%s&oauth_consumer_key=%s&openid=%s'
},
"gitcafe": {
# gitcafe domain: gcas.dgz.sh/gcs.dgz.sh for Staging, api.gitcafe.com/gitcafe.com for Production
"user_info_url": "https://gcas.dgz.sh/api/v1/user"
},
"weibo": {
"user_info_url": 'https://api.weibo.com/2/users/show.json?access_token=',
"email_info_url": 'https://api.weibo.com/2/account/profile/email.json?access_token='
},
"live": {
"user_info_url": 'https://apis.live.net/v5.0/me?access_token='
},
"token_expiration_minutes": 60 * 24
},
"azure": {
"cert_base": "/home/if/If/open-hackathon/open-hackathon/src/hackathon/certificates",
"container_name": "certificates"
},
"guacamole": {
"host": "http://localhost:8080"
},
"scheduler": {
"job_store": "mysql",
"job_store_url": 'mysql://%s:%s@%s:%s/%s' % (MYSQL_USER, MYSQL_PWD, MYSQL_HOST, MYSQL_PORT, MYSQL_DB)
},
"pre_allocate": {
"check_interval_minutes": 5,
"azure": 1,
"docker": 1
},
"storage": {
"type": "local",
"size_limit_kilo_bytes": 5 * 1024,
"azure": {
"account_name": "hackathon",
"account_key": "U4/oE3Ocwk9txQHw2qNOCCW2Fy05FBY3yQfzcKRNss5tnReyYTO7PDyeXQ8TWMMxXF07JrW7UXPyOhGgJlodEQ==",
"image_container": "images",
"template_container": "templates",
"blob_service_host_base": ".blob.core.chinacloudapi.cn"
}
},
"docker": {
"alauda": {
"token": "",
"namespace": "",
"endpoint": "https://api.alauda.cn"
}
},
"cloud_eclipse": {
"api": "http://www.idehub.cn/api/ide"
}
}
| mit | 1,975,551,654,810,841,000 | 37.787037 | 120 | 0.591549 | false | 3.378226 | false | false | false |
853612777/pythontestbrain | test.py | 1 | 1282 | #!/usr/bin/python
#-*-coding:utf8-*-
import pygame
from pygame.locals import *
from gameobjects.vector2 import Vector2
from sys import exit
from random import randint
def main():
background_image_path=r'./picture/sushiplate.jpg'
sprite_image_path='./picture/fugu.png'
SCREEN_SIZE=(640,480)
clock=pygame.time.Clock()
position=Vector2(100.0,100.0)
heading=Vector2()
speed=100
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE,0,32)
pygame.display.set_caption("hello world")
# font=pygame.font.SysFont('arial',16)
# font_height=font.get_linesize()
background=pygame.image.load(background_image_path).convert()
sprite=pygame.image.load(sprite_image_path).convert_alpha()
destination=Vector2(randint(0,640),randint(0,480))
while True:
for event in pygame.event.get():
if event.type==QUIT:
exit()
screen.blit(background,(0,0))
screen.blit(sprite,position)
time_passed=clock.tick()
time_passed_seconds=time_passed/1000.0
vector_to_mouse=Vector2.from_points(position,destination)
if vector_to_mouse.get_length() > 1:
heading=vector_to_mouse.normalize()
position+=heading*time_passed_seconds*speed
else:
destination=Vector2(randint(0,640),randint(0,480))
pygame.display.update()
if __name__ == '__main__':
main() | gpl-3.0 | -4,383,065,302,121,159,000 | 21.508772 | 62 | 0.723869 | false | 2.880899 | false | false | false |
theiviaxx/frog-angular | frog/admin.py | 1 | 1945 | ##################################################################################################
# Copyright (c) 2012 Brett Dixon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##################################################################################################
from django.contrib import admin
from frog.models import Gallery, Image, Video, Tag
class GalleryAdmin(admin.ModelAdmin):
list_display = ('title', 'parent', 'owner', 'security')
class ImageAdmin(admin.ModelAdmin):
list_display = ('title', 'guid', 'author', 'thumbnail_tag')
class VideoAdmin(admin.ModelAdmin):
list_display = ('title', 'guid', 'author', 'thumbnail_tag')
class TagAdmin(admin.ModelAdmin):
list_display = ('name', 'parent')
list_filter = ('artist',)
admin.site.register(Gallery, GalleryAdmin)
admin.site.register(Image, ImageAdmin)
admin.site.register(Video, VideoAdmin)
admin.site.register(Tag, TagAdmin) | mit | 765,541,564,951,422,300 | 39.541667 | 98 | 0.675578 | false | 4.380631 | false | false | false |
assisi/assisipy-lib | assisipy_utils/examples/arena/show_arena_in_mpl.py | 2 | 1302 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
A simple example of assisilib usage, with arenas shown in a matplotlib
visualisation (for testing of shapes, figures that are not from screenshots, etc)
'''
from math import pi
from assisipy_utils import arena
import matplotlib.pyplot as plt
from assisipy_utils.arena import rendering
if __name__ == '__main__':
plt.figure(1); plt.clf()
fig, ax = plt.subplots(1,1,num=1)
# define an arena wall object
A = arena.RoundedRectArena(length=25.5, width=16.5, ww=0.5, corner_rad=4.9)
A2 = arena.RoundedRectBarrier(length=11., width=10., ww=0.5, corner_rad=2.,
label_stub='block', edges='wn')
T2= arena.Transformation(dx=8.75, dy=-4.75)
A2.transform(T2)
rendering.render_arena(A.segs, ax=ax, fc='0.5')
rendering.render_arena(A2.segs, ax=ax, fc='r')
yaw = pi
casu_poses = []
for (x,y) in [(x,y) for x in [-9, 0, 9] for y in [-4.5, +4.5] ]:
p = arena.transforms.Point(x, y, 0)
casu_poses.append( (p, yaw))
# special setup, with only 5 - remove the last one
del casu_poses[4]
rendering.render_CASUs(casu_poses, ax=ax)
ax.set_xlim(-20, +20)
ax.set_ylim(-20, +20)
ax.set_aspect('equal')
ax.grid('on')
plt.show()
| lgpl-3.0 | 7,424,228,556,685,868,000 | 25.571429 | 81 | 0.608295 | false | 2.812095 | false | false | false |
brendanzhao/GoogleCodeJam2014 | src/1a/full_binary_tree.py | 1 | 1633 | import sys
FILEPATH = 'B-large-practice'
sys.stdin = open(FILEPATH + '.in', 'r')
sys.stdout = open(FILEPATH + '.out', 'w')
def get_line(): return raw_input()
def get_int(): return int(get_line())
def get_ints(): return [int(x) for x in get_line().split()]
def max_two_numbers(numbers):
largest = 0
second_largest = 0
for number in numbers:
if number > largest:
second_largest = largest
largest = number
elif number > second_largest:
second_largest = number
return largest + second_largest
def max_subtree(vertex, parent):
subtree = []
children = vertex.adjacent[:]
if parent:
children.remove(parent)
if len(children) < 2:
return 1
for child in children:
subtree.append(max_subtree(child, vertex))
return 1 + max_two_numbers(subtree)
def full_binary_tree(node_count, edges):
min_deletions = float('inf')
vertices = [Vertex(i) for i in xrange(1, node_count + 1)]
for v, w in edges:
vertices[v - 1].add_adjacent(vertices[w - 1])
vertices[w - 1].add_adjacent(vertices[v - 1])
for root in vertices:
min_deletions = min(min_deletions, node_count - max_subtree(root, None))
return min_deletions
class Vertex(object):
def __init__(self, id):
self.id = id
self.adjacent = []
def add_adjacent(self, vertex):
self.adjacent.append(vertex)
def remove_adjacent(self, vertex):
self.adjacent.remove(vertex)
if __name__ == '__main__':
for case in xrange(1, get_int() + 1):
node_count = get_int()
edges = [get_ints() for _ in xrange(node_count - 1)]
print 'Case #%d: %d' % (case, full_binary_tree(node_count, edges))
| mit | -8,946,798,903,781,333,000 | 23.373134 | 76 | 0.644213 | false | 3.140385 | false | false | false |
gviejo/ThalamusPhysio | python/main_make_SWS_norm_product.py | 1 | 10943 | #!/usr/bin/env python
'''
File name: main_ripp_mod.py
Author: Guillaume Viejo
Date created: 16/08/2017
Python Version: 3.5.2
'''
import sys
import numpy as np
import pandas as pd
import scipy.io
from functions import *
# from pylab import *
from multiprocessing import Pool
import os
import neuroseries as nts
from time import time
from pylab import *
from functions import quickBin
from numba import jit
@jit(nopython=True)
def scalarProduct(r):
tmp = np.sqrt(np.power(r, 2).sum(1))
denom = tmp[0:-1] * tmp[1:]
num = np.sum(r[0:-1]*r[1:], 1)
return num/(denom)
@jit(nopython=True)
def quickBin(spikelist, ts, bins, index):
rates = np.zeros((len(ts), len(bins)-1, len(index)))
for i, t in enumerate(ts):
tbins = t + bins
for j in range(len(spikelist)):
a, _ = np.histogram(spikelist[j], tbins)
rates[i,:,j] = a
return rates
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
anglehd = {}
anglenohd = {}
zanglehd = {}
zanglenohd = {}
for session in datasets:
print(session)
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
rip_ep,rip_tsd = loadRipples(data_directory+session)
rip_ep = sws_ep.intersect(rip_ep)
rip_tsd = rip_tsd.restrict(sws_ep)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
####################################################################################################################
# binning data
####################################################################################################################
spikeshd = {k:spikes[k] for k in np.where(hd_info_neuron==1)[0] if k not in []}
spikesnohd = {k:spikes[k] for k in np.where(hd_info_neuron==0)[0] if k not in []}
hdneurons = np.sort(list(spikeshd.keys()))
nohdneurons = np.sort(list(spikesnohd.keys()))
bin_size = 20
n_ex = 1000
normed = True
####################################################################################################################
# MIN MAX SWS
####################################################################################################################
# mean and standard deviation during SWS
mean_sws = pd.DataFrame(index = np.sort(list(spikes.keys())), columns = ['mean', 'std'])
for n in spikes.keys():
r = []
for e in sws_ep.index:
bins = np.arange(sws_ep.loc[e,'start'], sws_ep.loc[e,'end'], bin_size*1e3)
a, _ = np.histogram(spikes[n].restrict(sws_ep.loc[[e]]).index.values, bins)
r.append(a)
r = np.hstack(r)
r = r / (bin_size*1e-3)
mean_sws.loc[n,'min']= r.min()
mean_sws.loc[n,'max']= r.max()
bins = np.arange(0, 2000+2*bin_size, bin_size) - 1000 - bin_size/2
times = bins[0:-1] + np.diff(bins)/2
ts = rip_tsd.as_units('ms').index.values
####################################################################################################################
# HD NEURONS
####################################################################################################################
if len(spikeshd) >=5:
rates = quickBin([spikeshd[j].as_units('ms').index.values for j in hdneurons], ts, bins, hdneurons)
rates = rates/float(bin_size*1e-3)
angle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
for i, r in enumerate(rates):
tmp = scalarProduct(r)
angle[i] = np.nan_to_num(tmp, 0)
zangle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
a = mean_sws.loc[hdneurons,'min'].values.astype('float')
b = mean_sws.loc[hdneurons,'max'].values.astype('float')
zrates = (rates - a) / (b-a)
for i, r in enumerate(zrates):
zangle[i] = scalarProduct(r)
# random
rnd_tsd = nts.Ts(t = np.sort(np.hstack([np.random.randint(sws_ep.loc[i,'start']+500000, sws_ep.loc[i,'end']+500000, n_ex//len(sws_ep)) for i in sws_ep.index])))
ts = rnd_tsd.as_units('ms').index.values
rates2 = quickBin([spikeshd[j].as_units('ms').index.values for j in hdneurons], ts, bins, hdneurons)
rates2 = rates2/float(bin_size*1e-3)
shuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
for i, r in enumerate(rates2):
tmp = scalarProduct(r)
shuffled[i] = np.nan_to_num(tmp, 0)
zshuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
a = mean_sws.loc[hdneurons,'min'].values.astype('float')
b = mean_sws.loc[hdneurons,'max'].values.astype('float')
zrates2 = (rates2 - a) / (b-a)
for i, r in enumerate(zrates2):
zshuffled[i] = scalarProduct(r)
anglehd[session] = (angle.mean(1) - shuffled.mean(1))/shuffled.mean(1)
zanglehd[session] = (zangle.mean(1) - zshuffled.mean(1))/zshuffled.mean(1)
####################################################################################################################
# NO HD NEURONS
####################################################################################################################
if len(spikesnohd) >=5:
rates = quickBin([spikesnohd[j].as_units('ms').index.values for j in nohdneurons], ts, bins, nohdneurons)
rates = rates/float(bin_size*1e-3)
angle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
for i, r in enumerate(rates):
tmp = scalarProduct(r)
angle[i] = np.nan_to_num(tmp, 0)
zangle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
a = mean_sws.loc[nohdneurons,'min'].values.astype('float')
b = mean_sws.loc[nohdneurons,'max'].values.astype('float')
zrates = (rates - a) / (b-a)
for i, r in enumerate(zrates):
zangle[i] = scalarProduct(r)
# random
rnd_tsd = nts.Ts(t = np.sort(np.hstack([np.random.randint(sws_ep.loc[i,'start']+500000, sws_ep.loc[i,'end']+500000, n_ex//len(sws_ep)) for i in sws_ep.index])))
ts = rnd_tsd.as_units('ms').index.values
rates2 = quickBin([spikesnohd[j].as_units('ms').index.values for j in nohdneurons], ts, bins, nohdneurons)
rates2 = rates2/float(bin_size*1e-3)
shuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
for i, r in enumerate(rates2):
tmp = scalarProduct(r)
shuffled[i] = np.nan_to_num(tmp, 0)
zshuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
a = mean_sws.loc[nohdneurons,'min'].values.astype('float')
b = mean_sws.loc[nohdneurons,'max'].values.astype('float')
zrates2 = (rates2 - a) / (b-a)
for i, r in enumerate(zrates2):
zshuffled[i] = scalarProduct(r)
anglenohd[session] = (angle.mean(1) - shuffled.mean(1))/shuffled.mean(1)
zanglenohd[session] = (zangle.mean(1) - zshuffled.mean(1))/zshuffled.mean(1)
anglehd = pd.DataFrame.from_dict(anglehd)
anglenohd = pd.DataFrame.from_dict(anglenohd)
zanglehd = pd.DataFrame.from_dict(zanglehd)
zanglenohd = pd.DataFrame.from_dict(zanglenohd)
subplot(211)
plot(anglehd.mean(1))
plot(anglenohd.mean(1))
title("Scalar product")
subplot(212)
plot(zanglehd.mean(1))
plot(zanglenohd.mean(1))
title("Scalar product + zscored")
sys.exit()
store = pd.HDFStore('../figures/figures_articles_v4/figure2/test.h5')
if normed:
store.append('anglehd_normed', anglehd)
store.append('anglenohd_normed', anglenohd)
else:
store.append('anglehd', anglehd)
store.append('anglenohd', anglenohd)
store.close()
figure()
store = pd.HDFStore('../figures/figures_articles_v4/figure2/test.h5')
subplot(2,2,1)
plot(store['anglehd'].mean(1), label = 'HD')
plot(store['anglenohd'].mean(1), label = 'non-HD')
legend()
title("Scalar Product")
subplot(2,2,2)
plot(store['pearsonhd'].mean(1), label = 'HD')
plot(store['pearsonnohd'].mean(1), label = 'non-HD')
legend()
title("Pearson Correlation")
subplot(2,2,3)
plot(store['anglehd_normed'].mean(1), label = 'HD')
plot(store['anglenohd_normed'].mean(1), label = 'non-HD')
legend()
title("Scalar Product normalized")
subplot(2,2,4)
plot(store['pearsonhd_normed'].mean(1), label = 'HD')
plot(store['pearsonnohd_normed'].mean(1), label = 'non-HD')
legend()
title("Pearson Correlation normalized")
show()
sys.exit()
anglehd = pd.DataFrame.from_dict(anglehd)
anglenohd = pd.DataFrame.from_dict(anglenohd)
plot(anglehd.mean(1), label = 'hd')
plot(anglenohd.mean(1), label = 'nohd')
legend()
show()
sys.exit()
datatosave = cPickle.load(open("/mnt/DataGuillaume/MergedData/SWR_SCALAR_PRODUCT.pickle", 'rb'))
angleall = datatosave['cosalpha']
baselineall = datatosave['baseline']
hd = pd.DataFrame()
for s in angleall.keys():
if 'hd' in list(angleall[s].keys()):
tmp1 = angleall[s]['hd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=0.5)
tmp2 = baselineall[s]['hd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=0.5)
tmp = (tmp1.mean(1) - tmp2.mean(1))/tmp2.mean(1)
hd[s.split("/")[1]] = tmp
nohd = pd.DataFrame()
for s in angleall.keys():
if 'nohd' in list(angleall[s].keys()):
tmp1 = angleall[s]['nohd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)
tmp2 = baselineall[s]['nohd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)
tmp = (tmp1.mean(1) - tmp2.mean(1))/tmp2.mean(1)
nohd[s.split("/")[1]] = tmp
data = pd.DataFrame(index = hd.index.values, columns = pd.MultiIndex.from_product([['hd', 'nohd'], ['mean', 'sem']]))
data['hd', 'mean'] = hd.mean(1)
data['hd', 'sem'] = hd.sem(1)
data['nohd', 'mean'] = nohd.mean(1)
data['nohd', 'sem'] = nohd.sem(1)
data.to_hdf("../figures/figures_articles_v4/figure2/SWR_SCALAR_PRODUCT.h5", 'w')
subplot(111)
m = hd.mean(1)
v = hd.sem(1)
plot(hd.mean(1), label = 'hd')
fill_between(hd.index.values, m+v, m-v, alpha = 0.5)
# title("Only hd")
# subplot(212)
# title("No hd")
m = nohd.mean(1)
v = nohd.sem(1)
plot(nohd.mean(1), label = 'nohd')
fill_between(nohd.index.values, m+v, m-v, alpha = 0.5)
legend()
figure()
subplot(121)
plot(hd, color = 'grey')
plot(hd.mean(1), color = 'red')
title("HD")
subplot(122)
plot(nohd, color = 'grey')
plot(nohd.mean(1), color = 'black')
title("No HD")
show()
| gpl-3.0 | -5,185,810,242,031,926,000 | 32.567485 | 162 | 0.606689 | false | 2.658003 | false | false | false |
dmargala/tpcorr | examples/plugmap.py | 1 | 12679 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import sys,os
import string
import math
import argparse
def read_plugmap(filename):
debug=False
file=open(filename,"r")
doc={}
intypedef=False
indices={}
indices["HOLETYPE"]=8
indices["OBJECT"]=21
indices["ra"]=9
indices["dec"]=10
indices["xfoc"]=22
indices["yfoc"]=23
objects={}
for k in indices :
objects[k]=[]
for line in file.readlines() :
line=line.strip().replace('\t',' ')
if debug :
print "line: ",line
if len(line)==0 :
continue
if line[0]=="#" :
continue
if line.find("typedef")>=0 :
intypedef=True
if debug :
print "now in typedef"
continue
if intypedef and line.find("}")>=0 :
intypedef=False
if debug :
print "end of typedef"
continue
if intypedef :
continue
if line.find("PLUGMAPOBJ")>=0 :
tmp=line.split(" ")
entries=[]
for t in tmp :
if len(t)>0 :
entries.append(t)
for k in objects.keys() :
i=indices[k]
val=entries[i]
#print k,i,val
tmp=None
try :
tmp=string.atoi(val)
except ValueError :
pass
if tmp is None :
try :
val=string.atof(val)
except ValueError :
pass
if tmp is not None :
val=tmp
objects[k].append(val)
if debug :
print "added one PLUGMAPOBJ"
continue
tmp=line.strip().split(" ")
entries=[]
for t in tmp :
if len(t)>0 :
entries.append(t)
if len(entries)>=2 :
key=entries[0]
val=entries[1]
tmp=None
try :
tmp=string.atoi(val)
except ValueError :
pass
if tmp is None :
try :
val=string.atof(val)
except ValueError :
pass
if tmp is not None :
val=tmp
doc[key]=val
if debug :
print "added doc",key,val
# convert objects into np.array
for k in objects :
objects[k]=np.array(objects[k])
return doc,objects
class OpticalDistortion() :
def __init__(self,platescale) :
self.platescale=platescale # has units
# see ~/software/platedesign/trunk/pro/plate/ad2xyfocal.pro
coef=np.array([-0.000137627, -0.00125238, 1.5447e-09,
8.23673e-08, -2.74584e-13, -1.53239e-12,
6.04194e-18, 1.38033e-17, -2.97064e-23,
-3.58767e-23])
self.achromatic_distortion_pol=np.poly1d(coef[::-1])
# see ~/software/platedesign/trunk/pro/plate/apo_rdistort.pro
mm_per_rad =platescale*180/math.pi
self.chromatic_distort_radii=np.arcsin(np.linspace(0,90,10)*math.pi/(60*180))*mm_per_rad
print "RADII=",self.chromatic_distort_radii
self.chromatic_distort_wave=np.array([5300,4000,5500,6000,8000,10000,15350,15950,16550])
nw=self.chromatic_distort_wave.size
nr=self.chromatic_distort_radii.size
self.chromatic_distort=np.array([
[0.,36.26,72.53,108.84,145.18,181.53,217.90,254.29,290.77,327.44],
[0.,-0.002,-0.003,-0.004,-0.005,-0.005,-0.005,-0.004,-0.002,0.003],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.001,0.001,0.001,0.001,0.001,0.001,0.001,0.001,-0.001],
[0.,0.001,0.003,0.003,0.004,0.004,0.004,0.003,0.002,-0.003],
[0.,0.002,0.004,0.005,0.005,0.005,0.005,0.005,0.003,-0.004],
[0.,0.003,0.006,0.007,0.008,0.008,0.008,0.008,0.004,-0.006],
[0.,0.003,0.006,0.008,0.008,0.009,0.009,0.008,0.004,-0.006],
[0.,0.004,0.006,0.008,0.009,0.009,0.009,0.008,0.004,-0.007]])
# apply scaling
scale=np.zeros((nr))
scale[1:]=self.chromatic_distort_radii[1:]/self.chromatic_distort[0,1:]
self.chromatic_distort[1:] *= scale
self.chromatic_distort[0]=0.
# sort wave
ii=np.argsort(self.chromatic_distort_wave)
self.chromatic_distort_wave=self.chromatic_distort_wave[ii]
for j in range(nr) :
self.chromatic_distort[:,j]=self.chromatic_distort[ii,j]
# in ad2xyfocal, a reference wavelength of 5000A instead of 5500A is used !!
ref_distort = np.zeros((nr))
for j in range(nr) :
ref_distort[j]=np.interp(5000,self.chromatic_distort_wave,self.chromatic_distort[:,j])
self.chromatic_distort -= ref_distort
"""
plt.plot(self.chromatic_distort_wave,self.chromatic_distort[:,-1],"o-")
ww=np.linspace(4000,8000,200)*u.angstrom
r=self.chromatic_distort_radii[-1]
dd=np.zeros((ww.size))
for i in range(ww.size) :
dd[i]=self.chromatic_distortion(r,ww[i]).to(u.mm).value
plt.plot(ww,dd,c="r")
plt.show()
"""
def chromatic_distortion(self,radius,wavelength) : # with radius and wave with units , returns delta r to be added
i=np.where(self.chromatic_distort_wave>=wavelength)[0]
if i.size == 0 :
i=1
else :
i=min(max(1,i[0]),self.chromatic_distort_radii.size-1)
dist1=np.interp(radius,self.chromatic_distort_radii,self.chromatic_distort[i-1])
dist2=np.interp(radius,self.chromatic_distort_radii,self.chromatic_distort[i])
dist=np.interp(wavelength,[self.chromatic_distort_wave[i-1],self.chromatic_distort_wave[i]],[dist1,dist2])
return dist
def distortion(self,radius,wavelength) :
return self.achromatic_distortion_pol(radius) + self.chromatic_distortion(radius,wavelength)
# same result as idlutils/goddard/pro/astro/hadec2altaz.pro
# but with adr calibrated using astropy
def hadec2altaz(ha, dec, lat, wavelength=None) : # ha,dec,lat in deg, wave in a, returns alt,az
d2r = math.pi/180.
sh = math.sin(ha*d2r)
ch = math.cos(ha*d2r)
sd = math.sin(dec*d2r)
cd = math.cos(dec*d2r)
sl = math.sin(lat*d2r)
cl = math.cos(lat*d2r)
"""
x=np.array([cd*ch,cd*sh,sd])
r=np.array([[sl,0,-cl],[0,1,0],[cl,0,sl]])
x=r.dot(x)
x0=x[0]
x1=x[1]
x2=x[2]
"""
x0 = - ch * cd * sl + sd * cl
x1 = - sh * cd
x2 = ch * cd * cl + sd * sl
r=math.sqrt(x0**2+x1**2)
az = math.atan2(-x1,-x0) /d2r
alt = math.atan2(x2,r) / d2r
if wavelength is not None :
# arcsec per unit of tan(zenith)
fact=np.interp(wavelength,[3000,3500,4000,5000,5400,6000,7000,8000],[44.166347,43.365612,42.8640697818,42.292551282,42.1507465805,41.990386,41.811009,41.695723])
alt += fact*(r/x2)/3600.
return alt,az
# exact same routine as altaz2rpa in idl, needed to get same platescale definition
def altaz2xy(alt,az,altcen,azcen,platescale) :
d2r=math.pi/180
xx= -np.sin(az*d2r) * np.sin((90-alt)*d2r)
yy= -np.cos(az*d2r) * np.sin((90-alt)*d2r)
zz= np.cos((90-alt)*d2r)
xi= -xx*np.cos(azcen*d2r) + yy*np.sin(azcen*d2r)
yi= -yy*np.cos(azcen*d2r) - xx*np.sin(azcen*d2r)
zi= zz
xl= xi
yl= yi*np.sin((90-altcen)*d2r) + zi*np.cos((90-altcen)*d2r)
zl= zi*np.sin((90-altcen)*d2r) - yi*np.cos((90-altcen)*d2r)
rfocal=np.arcsin(np.sqrt(xl**2+zl**2))/d2r*platescale
posang=np.arctan2(-xl, zl)
return rfocal*np.cos(posang),rfocal*np.sin(posang)
def hadec2xy(ha,dec,alt0,az0,crot,srot,latitude,platescale,distortion,wavelength) :
alt,az = hadec2altaz(ha,dec,latitude,wavelength)
x,y = altaz2xy(alt,az,alt0,az0,platescale)
rscale = 1
if 1 :
# Distortion, see ad2xyfocal.pro
r = np.sqrt(x**2 + y**2)
if r>0 :
rscale = 1+distortion.distortion(r,wavelength)/r
# Rotate the focal plane so that +y points towards a point that is offset from
# the plate center along DEC by +1.5 degrees.
xr = rscale*(x*crot-y*srot)
yr = rscale*(x*srot+y*crot)
return -xr,yr,alt,az
def main() :
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', type=str, default='plPlugMapP-4392.par',
help='Input plugmap filename.')
parser.add_argument('--output', type=str, default='myPlugMapP-4392.list',
help='Output filename.')
parser.add_argument('--ha', type=float, default=0,
help='Design hour angle (degrees).')
args = parser.parse_args()
filename = args.input
ofilename = args.output
ha_obs = args.ha
doc, objects = read_plugmap(filename)
ra=objects["ra"]
dec=objects["dec"]
xfoc=objects["xfoc"]
yfoc=objects["yfoc"]
ha_design=doc["ha"]
ra0=doc["raCen"]
dec0=doc["decCen"]
mjd=doc["mjdDesign"]
print "design MJD=%d HA=%f ra=%f dec=%f"%(mjd,ha_design,ra0,dec0)
# APO lat=32.7797556 in plate_refrac.pro
latitude=32.7797556
# optical distortion
# from platedesign/trunk/pro/plate/get_platescale.pro
platescale = 217.7358
distortion = OpticalDistortion(platescale)
# only reference for this wavelength I could find is in code platedesign/trunk/pro/plate/adr.pro
refwave=5400.0
gal=np.where(objects["OBJECT"]=="GALAXY")[0]
qso=np.where(objects["OBJECT"]=="QSO")[0]
star=np.where(objects["OBJECT"]=="SPECTROPHOTO_STD")[0]
na=np.where(objects["OBJECT"]=="NA")[0]
nobj=xfoc.size
wave_design=refwave*np.ones((nobj))
wave_design[gal]=5400.
wave_design[qso]=4000.
wave_design[star]=5400.
wave_obs=7450*np.ones((nobj))
wave_obs[gal]=7450. # to study r1/r2
wave_obs[qso]=7450.
wave_obs[star]=7450.
# for design
alt0_design,az0_design = hadec2altaz(ha_design, dec0, latitude, refwave)
print "Design ALT (ref wave)=",alt0_design
print "Design AZ (ref wave)=",az0_design
# rotation of plate to get vertical dec
altfid,azfid = hadec2altaz(ha_design, dec0+1.5, latitude, refwave)
xfid,yfid = altaz2xy(altfid,azfid,alt0_design,az0_design,platescale)
rotation_angle = np.arctan2(xfid,yfid)
crot_design = np.cos(rotation_angle)
srot_design = np.sin(rotation_angle)
# same for obs
alt0_obs,az0_obs = hadec2altaz(ha_obs, dec0, latitude, refwave)
print "Obs ALT (ref wave)=",alt0_obs
print "Obs AZ (ref wave)=",az0_obs
# rotation of plate to get vertical dec
altfid,azfid = hadec2altaz(ha_obs, dec0+1.5, latitude, refwave)
xfid,yfid = altaz2xy(altfid,azfid,alt0_obs,az0_obs,platescale)
rotation_angle = np.arctan2(xfid,yfid)
crot_obs = np.cos(rotation_angle)
srot_obs = np.sin(rotation_angle)
# compute, at design hour angle = ha_design
xdesign=np.zeros((nobj))
ydesign=np.zeros((nobj))
alt_design=np.zeros((nobj))
az_design=np.zeros((nobj))
# compute, at observed hour angle = ha_obs
xobs=np.zeros((nobj))
yobs=np.zeros((nobj))
alt_obs=np.zeros((nobj))
az_obs=np.zeros((nobj))
selection=range(nobj)
for o in selection :
x,y,alt,az = hadec2xy(ha_design-(ra[o]-ra0),dec[o],alt0_design,az0_design,crot_design,srot_design,latitude,platescale,distortion,wave_design[o])
xdesign[o] = x
ydesign[o] = y
alt_design[o] = alt
az_design[o] = az
x,y,alt,az = hadec2xy(ha_obs-(ra[o]-ra0),dec[o],alt0_obs,az0_obs,crot_obs,srot_obs,latitude,platescale,distortion,wave_obs[o])
xobs[o] = x
yobs[o] = y
alt_obs[o] = alt
az_obs[o] = az
file=open(ofilename,"w")
file.write("#ra dec xfoc yfoc wavedesign xdesign ydesign altdesign azdesign waveobs xobs yobs altobs azobs hole obj\n")
for o in selection :
file.write("%f %f %f %f %f %f %f %f %f %f %f %f %f %f %s %s\n"%(ra[o],dec[o],xfoc[o],yfoc[o],wave_design[o],xdesign[o],ydesign[o],alt_design[o],az_design[o],wave_obs[o],xobs[o],yobs[o],alt_obs[o],az_obs[o],objects["HOLETYPE"][o],objects["OBJECT"][o]))
file.close()
print "wrote", ofilename
if __name__ == '__main__':
main()
| mit | 8,056,772,923,345,085,000 | 34.024862 | 259 | 0.568736 | false | 2.906028 | false | false | false |
dustin/couchapp | couchapp/localdoc.py | 1 | 16619 | # -*- coding: utf-8 -*-
#
# This file is part of couchapp released under the Apache 2 license.
# See the NOTICE for more information.
from __future__ import with_statement
import base64
import logging
import mimetypes
import os
import os.path
import re
import urlparse
import webbrowser
try:
import desktopcouch
except ImportError:
desktopcouch = None
from couchapp.errors import ResourceNotFound, AppError
from couchapp.macros import package_shows, package_views
from couchapp import util
if os.name == 'nt':
def _replace_backslash(name):
return name.replace("\\", "/")
else:
def _replace_backslash(name):
return name
re_comment = re.compile("((?:\/\*(?:[^*]|(?:\*+[^*\/]))*\*+\/)|(?:\/\/.*))")
DEFAULT_IGNORE = """[
// filenames matching these regexps will not be pushed to the database
// uncomment to activate; separate entries with ","
// ".*~$"
// ".*\\\\.swp$"
// ".*\\\\.bak$"
]"""
logger = logging.getLogger(__name__)
class LocalDoc(object):
def __init__(self, path, create=False, docid=None, is_ddoc=True):
self.docdir = path
self.ignores = []
self.is_ddoc = is_ddoc
ignorefile = os.path.join(path, '.couchappignore')
if os.path.exists(ignorefile):
# A .couchappignore file is a json file containing a
# list of regexps for things to skip
with open(ignorefile, 'r') as f:
self.ignores = util.json.loads(
util.remove_comments(f.read())
)
if not docid:
docid = self.get_id()
self.docid = docid
self._doc = {'_id': self.docid}
if create:
self.create()
def get_id(self):
"""
if there is an _id file, docid is extracted from it,
else we take the current folder name.
"""
idfile = os.path.join(self.docdir, '_id')
if os.path.exists(idfile):
docid = util.read(idfile).split("\n")[0].strip()
if docid: return docid
if self.is_ddoc:
return "_design/%s" % os.path.split(self.docdir)[1]
else:
return os.path.split(self.docdir)[1]
def __repr__(self):
return "<%s (%s/%s)>" % (self.__class__.__name__, self.docdir, self.docid)
def __str__(self):
return util.json.dumps(self.doc())
def create(self):
if not os.path.isdir(self.docdir):
logger.error("%s directory doesn't exist." % self.docdir)
rcfile = os.path.join(self.docdir, '.couchapprc')
ignfile = os.path.join(self.docdir, '.couchappignore')
if not os.path.isfile(rcfile):
util.write_json(rcfile, {})
util.write(ignfile, DEFAULT_IGNORE)
else:
logger.info("CouchApp already initialized in %s." % self.docdir)
def push(self, dbs, noatomic=False, browser=False, force=False,
noindex=False):
"""Push a doc to a list of database `dburls`. If noatomic is true
each attachments will be sent one by one."""
for db in dbs:
if noatomic:
doc = self.doc(db, with_attachments=False, force=force)
db.save_doc(doc, force_update=True)
attachments = doc.get('_attachments') or {}
for name, filepath in self.attachments():
if name not in attachments:
logger.debug("attach %s " % name)
db.put_attachment(doc, open(filepath, "r"),
name=name)
else:
doc = self.doc(db, force=force)
db.save_doc(doc, force_update=True)
indexurl = self.index(db.raw_uri, doc['couchapp'].get('index'))
if indexurl and not noindex:
if "@" in indexurl:
u = urlparse.urlparse(indexurl)
indexurl = urlparse.urlunparse((u.scheme, u.netloc.split("@")[-1],
u.path, u.params, u.query, u.fragment))
logger.info("Visit your CouchApp here:\n%s" % indexurl)
if browser:
self.browse_url(indexurl)
def browse(self, dbs):
for db in dbs:
doc = self.doc()
indexurl = self.index(db.raw_uri, doc['couchapp'].get('index'))
if indexurl:
self.browse_url(indexurl)
def browse_url(self, url):
if url.startswith("desktopcouch://"):
if not desktopcouch:
raise AppError("Desktopcouch isn't available on this"+
"machine. You can't access to %s" % url)
ctx = desktopcouch.local_files.DEFAULT_CONTEXT
bookmark_file = os.path.join(ctx.db_dir, "couchdb.html")
try:
username, password = re.findall("<!-- !!([^!]+)!!([^!]+)!! -->",
open(bookmark_file).read())[-1]
except ValueError:
raise IOError("Bookmark file is corrupt."+
"Username/password are missing.")
url = "http://%s:%s@localhost:%s/%s" % (username, password,
desktopcouch.find_port(), url[15:])
webbrowser.open_new_tab(url)
def attachment_stub(self, name, filepath):
att = {}
with open(filepath, "rb") as f:
re_sp = re.compile('\s')
att = {
"data": re_sp.sub('',base64.b64encode(f.read())),
"content_type": ';'.join(filter(None,
mimetypes.guess_type(name)))
}
return att
def doc(self, db=None, with_attachments=True, force=False):
""" Function to reetrieve document object from
document directory. If `with_attachments` is True
attachments will be included and encoded"""
manifest = []
objects = {}
signatures = {}
attachments = {}
self._doc = {'_id': self.docid}
# get designdoc
self._doc.update(self.dir_to_fields(self.docdir, manifest=manifest))
if not 'couchapp' in self._doc:
self._doc['couchapp'] = {}
self.olddoc = {}
if db is not None:
try:
self.olddoc = db.open_doc(self._doc['_id'])
attachments = self.olddoc.get('_attachments') or {}
self._doc.update({'_rev': self.olddoc['_rev']})
except ResourceNotFound:
self.olddoc = {}
if 'couchapp' in self.olddoc:
old_signatures = self.olddoc['couchapp'].get('signatures',
{})
else:
old_signatures = {}
for name, filepath in self.attachments():
signatures[name] = util.sign(filepath)
if with_attachments and not old_signatures:
logger.debug("attach %s " % name)
attachments[name] = self.attachment_stub(name, filepath)
if old_signatures:
for name, signature in old_signatures.items():
cursign = signatures.get(name)
if not cursign:
logger.debug("detach %s " % name)
del attachments[name]
elif cursign != signature:
logger.debug("detach %s " % name)
del attachments[name]
else:
continue
if with_attachments:
for name, filepath in self.attachments():
if old_signatures.get(name) != signatures.get(name) or force:
logger.debug("attach %s " % name)
attachments[name] = self.attachment_stub(name, filepath)
self._doc['_attachments'] = attachments
self._doc['couchapp'].update({
'manifest': manifest,
'objects': objects,
'signatures': signatures
})
if self.docid.startswith('_design/'): # process macros
for funs in ['shows', 'lists', 'updates', 'filters',
'spatial']:
if funs in self._doc:
package_shows(self._doc, self._doc[funs], self.docdir,
objects)
if 'validate_doc_update' in self._doc:
tmp_dict = dict(validate_doc_update=self._doc[
"validate_doc_update"])
package_shows( self._doc, tmp_dict, self.docdir,
objects)
self._doc.update(tmp_dict)
if 'views' in self._doc:
# clean views
# we remove empty views and malformed from the list
# of pushed views. We also clean manifest
views = {}
dmanifest = {}
for i, fname in enumerate(manifest):
if fname.startswith("views/") and fname != "views/":
name, ext = os.path.splitext(fname)
if name.endswith('/'):
name = name[:-1]
dmanifest[name] = i
for vname, value in self._doc['views'].iteritems():
if value and isinstance(value, dict):
views[vname] = value
else:
del manifest[dmanifest["views/%s" % vname]]
self._doc['views'] = views
package_views(self._doc,self._doc["views"], self.docdir,
objects)
if "fulltext" in self._doc:
package_views(self._doc,self._doc["fulltext"], self.docdir,
objects)
return self._doc
def check_ignore(self, item):
for i in self.ignores:
match = re.match(i, item)
if match:
logger.debug("ignoring %s" % item)
return True
return False
def dir_to_fields(self, current_dir='', depth=0,
manifest=[]):
""" process a directory and get all members """
fields={}
if not current_dir:
current_dir = self.docdir
for name in os.listdir(current_dir):
current_path = os.path.join(current_dir, name)
rel_path = _replace_backslash(util.relpath(current_path, self.docdir))
if name.startswith("."):
continue
elif self.check_ignore(name):
continue
elif depth == 0 and name.startswith('_'):
# files starting with "_" are always "special"
continue
elif name == '_attachments':
continue
elif depth == 0 and (name == 'couchapp' or name == 'couchapp.json'):
# we are in app_meta
if name == "couchapp":
manifest.append('%s/' % rel_path)
content = self.dir_to_fields(current_path,
depth=depth+1, manifest=manifest)
else:
manifest.append(rel_path)
content = util.read_json(current_path)
if not isinstance(content, dict):
content = { "meta": content }
if 'signatures' in content:
del content['signatures']
if 'manifest' in content:
del content['manifest']
if 'objects' in content:
del content['objects']
if 'length' in content:
del content['length']
if 'couchapp' in fields:
fields['couchapp'].update(content)
else:
fields['couchapp'] = content
elif os.path.isdir(current_path):
manifest.append('%s/' % rel_path)
fields[name] = self.dir_to_fields(current_path,
depth=depth+1, manifest=manifest)
else:
logger.debug("push %s" % rel_path)
content = ''
if name.endswith('.json'):
try:
content = util.read_json(current_path)
except ValueError:
logger.error("Json invalid in %s" % current_path)
else:
try:
content = util.read(current_path).strip()
except UnicodeDecodeError, e:
logger.warning("%s isn't encoded in utf8" % current_path)
content = util.read(current_path, utf8=False)
try:
content.encode('utf-8')
except UnicodeError, e:
logger.warning(
"plan B didn't work, %s is a binary" % current_path)
logger.warning("use plan C: encode to base64")
content = "base64-encoded;%s" % base64.b64encode(
content)
# remove extension
name, ext = os.path.splitext(name)
if name in fields:
logger.warning(
"%(name)s is already in properties. Can't add (%(fqn)s)" % {
"name": name, "fqn": rel_path })
else:
manifest.append(rel_path)
fields[name] = content
return fields
def _process_attachments(self, path, vendor=None):
""" the function processing directory to yeld
attachments. """
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for dirname in dirs:
if self.check_ignore(dirname):
dirs.remove(dirname)
if files:
for filename in files:
if self.check_ignore(filename):
continue
else:
filepath = os.path.join(root, filename)
name = util.relpath(filepath, path)
if vendor is not None:
name = os.path.join('vendor', vendor, name)
name = _replace_backslash(name)
yield (name, filepath)
def attachments(self):
""" This function yield a tuple (name, filepath) corresponding
to each attachment (vendor included) in the couchapp. `name`
is the name of attachment in `_attachments` member and `filepath`
the path to the attachment on the disk.
attachments are processed later to allow us to send attachments inline
or one by one.
"""
# process main attachments
attachdir = os.path.join(self.docdir, "_attachments")
for attachment in self._process_attachments(attachdir):
yield attachment
vendordir = os.path.join(self.docdir, 'vendor')
if not os.path.isdir(vendordir):
logger.debug("%s don't exist" % vendordir)
return
for name in os.listdir(vendordir):
current_path = os.path.join(vendordir, name)
if os.path.isdir(current_path):
attachdir = os.path.join(current_path, '_attachments')
if os.path.isdir(attachdir):
for attachment in self._process_attachments(attachdir,
vendor=name):
yield attachment
def index(self, dburl, index):
if index is not None:
return "%s/%s/%s" % (dburl, self.docid, index)
elif os.path.isfile(os.path.join(self.docdir, "_attachments",
'index.html')):
return "%s/%s/index.html" % (dburl, self.docid)
return False
def document(path, create=False, docid=None, is_ddoc=True):
return LocalDoc(path, create=create, docid=docid, is_ddoc=is_ddoc)
| apache-2.0 | -4,242,378,768,020,816,400 | 37.738928 | 86 | 0.482761 | false | 4.511129 | false | false | false |
honnibal/spaCy | spacy/tests/vocab_vectors/test_lookups.py | 1 | 4956 | # coding: utf-8
from __future__ import unicode_literals
import pytest
from spacy.lookups import Lookups, Table
from spacy.strings import get_string_id
from spacy.vocab import Vocab
from ..util import make_tempdir
def test_lookups_api():
table_name = "test"
data = {"foo": "bar", "hello": "world"}
lookups = Lookups()
lookups.add_table(table_name, data)
assert len(lookups) == 1
assert table_name in lookups
assert lookups.has_table(table_name)
table = lookups.get_table(table_name)
assert table.name == table_name
assert len(table) == 2
assert table["hello"] == "world"
table["a"] = "b"
assert table["a"] == "b"
table = lookups.get_table(table_name)
assert len(table) == 3
with pytest.raises(KeyError):
lookups.get_table("xyz")
with pytest.raises(ValueError):
lookups.add_table(table_name)
table = lookups.remove_table(table_name)
assert table.name == table_name
assert len(lookups) == 0
assert table_name not in lookups
with pytest.raises(KeyError):
lookups.get_table(table_name)
def test_table_api():
table = Table(name="table")
assert table.name == "table"
assert len(table) == 0
assert "abc" not in table
data = {"foo": "bar", "hello": "world"}
table = Table(name="table", data=data)
assert len(table) == len(data)
assert "foo" in table
assert get_string_id("foo") in table
assert table["foo"] == "bar"
assert table[get_string_id("foo")] == "bar"
assert table.get("foo") == "bar"
assert table.get("abc") is None
table["abc"] = 123
assert table["abc"] == 123
assert table[get_string_id("abc")] == 123
table.set("def", 456)
assert table["def"] == 456
assert table[get_string_id("def")] == 456
def test_table_api_to_from_bytes():
data = {"foo": "bar", "hello": "world", "abc": 123}
table = Table(name="table", data=data)
table_bytes = table.to_bytes()
new_table = Table().from_bytes(table_bytes)
assert new_table.name == "table"
assert len(new_table) == 3
assert new_table["foo"] == "bar"
assert new_table[get_string_id("foo")] == "bar"
new_table2 = Table(data={"def": 456})
new_table2.from_bytes(table_bytes)
assert len(new_table2) == 3
assert "def" not in new_table2
@pytest.mark.skip(reason="This fails on Python 3.5")
def test_lookups_to_from_bytes():
lookups = Lookups()
lookups.add_table("table1", {"foo": "bar", "hello": "world"})
lookups.add_table("table2", {"a": 1, "b": 2, "c": 3})
lookups_bytes = lookups.to_bytes()
new_lookups = Lookups()
new_lookups.from_bytes(lookups_bytes)
assert len(new_lookups) == 2
assert "table1" in new_lookups
assert "table2" in new_lookups
table1 = new_lookups.get_table("table1")
assert len(table1) == 2
assert table1["foo"] == "bar"
table2 = new_lookups.get_table("table2")
assert len(table2) == 3
assert table2["b"] == 2
assert new_lookups.to_bytes() == lookups_bytes
@pytest.mark.skip(reason="This fails on Python 3.5")
def test_lookups_to_from_disk():
lookups = Lookups()
lookups.add_table("table1", {"foo": "bar", "hello": "world"})
lookups.add_table("table2", {"a": 1, "b": 2, "c": 3})
with make_tempdir() as tmpdir:
lookups.to_disk(tmpdir)
new_lookups = Lookups()
new_lookups.from_disk(tmpdir)
assert len(new_lookups) == 2
assert "table1" in new_lookups
assert "table2" in new_lookups
table1 = new_lookups.get_table("table1")
assert len(table1) == 2
assert table1["foo"] == "bar"
table2 = new_lookups.get_table("table2")
assert len(table2) == 3
assert table2["b"] == 2
@pytest.mark.skip(reason="This fails on Python 3.5")
def test_lookups_to_from_bytes_via_vocab():
table_name = "test"
vocab = Vocab()
vocab.lookups.add_table(table_name, {"foo": "bar", "hello": "world"})
assert len(vocab.lookups) == 1
assert table_name in vocab.lookups
vocab_bytes = vocab.to_bytes()
new_vocab = Vocab()
new_vocab.from_bytes(vocab_bytes)
assert len(new_vocab.lookups) == 1
assert table_name in new_vocab.lookups
table = new_vocab.lookups.get_table(table_name)
assert len(table) == 2
assert table["hello"] == "world"
assert new_vocab.to_bytes() == vocab_bytes
@pytest.mark.skip(reason="This fails on Python 3.5")
def test_lookups_to_from_disk_via_vocab():
table_name = "test"
vocab = Vocab()
vocab.lookups.add_table(table_name, {"foo": "bar", "hello": "world"})
assert len(vocab.lookups) == 1
assert table_name in vocab.lookups
with make_tempdir() as tmpdir:
vocab.to_disk(tmpdir)
new_vocab = Vocab()
new_vocab.from_disk(tmpdir)
assert len(new_vocab.lookups) == 1
assert table_name in new_vocab.lookups
table = new_vocab.lookups.get_table(table_name)
assert len(table) == 2
assert table["hello"] == "world"
| mit | 1,471,401,789,147,549,000 | 32.04 | 73 | 0.628531 | false | 3.243455 | true | false | false |
peterfpeterson/mantid | scripts/Muon/GUI/Common/home_tab/home_tab_presenter.py | 3 | 3408 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from abc import ABCMeta, abstractmethod
from mantidqt.utils.observer_pattern import Observer, GenericObserver
class HomeTabSubWidget:
__metaclass__ = ABCMeta
"""
Abstract base class which all sub-widgets must inherit from. This is used to
enforce a common interface so that the home tab can keep a list of sub-widgets without
specifically naming each one. Since each sub-widget shares a common model (the context)
all the home tab needs to do is instruct them to update from their own model.
"""
@abstractmethod
def update_view_from_model(self):
# update from model
pass
class HomeTabPresenter(object):
def __init__(self, view, model, subwidgets):
self._view = view
self._model = model
self._subwidgets = subwidgets
self.instrumentObserver = HomeTabPresenter.InstrumentObserver(self)
self.loadObserver = HomeTabPresenter.LoadObserver(self)
self.groupingObserver = HomeTabPresenter.GroupingObserver(self)
self.enable_observer = HomeTabPresenter.EnableWidgetObserver(self)
self.disable_observer = HomeTabPresenter.DisableWidgetObserver(self)
self.update_view_from_model_observer = GenericObserver(self.update_all_widgets)
self.update_all_widgets()
def show(self):
self._view.show()
def update_all_widgets(self):
"""
Update all widgets from the context.
"""
for subwidget in self._subwidgets:
subwidget.update_view_from_model()
def enable_home_tab(self):
self._view.setEnabled(True)
def disable_home_tab(self):
self._view.setEnabled(False)
# ------------------------------------------------------------------------------------------------------------------
# Observer / Observable
# ------------------------------------------------------------------------------------------------------------------
class InstrumentObserver(Observer):
def __init__(self, outer):
Observer.__init__(self)
self.outer = outer
def update(self, observable, arg):
self.outer.update_all_widgets()
class LoadObserver(Observer):
def __init__(self, outer):
Observer.__init__(self)
self.outer = outer
def update(self, observable, arg):
self.outer.update_all_widgets()
class GroupingObserver(Observer):
def __init__(self, outer):
Observer.__init__(self)
self.outer = outer
def update(self, observable, arg):
self.outer.update_all_widgets()
class EnableWidgetObserver(Observer):
def __init__(self, outer):
Observer.__init__(self)
self.outer = outer
def update(self, observable, arg):
self.outer.enable_home_tab()
class DisableWidgetObserver(Observer):
def __init__(self, outer):
Observer.__init__(self)
self.outer = outer
def update(self, observable, arg):
self.outer.disable_home_tab()
| gpl-3.0 | 5,089,843,358,528,657,000 | 31.457143 | 120 | 0.597124 | false | 4.346939 | false | false | false |
dankolbman/travel_blahg | blog/models.py | 1 | 3108 | import hashlib, random
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from geoalchemy2 import Geometry
db = SQLAlchemy()
class User(db.Model, UserMixin):
__tablename__ = "user"
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(32))
password = db.Column(db.String())
email = db.Column(db.String())
api_key = db.Column(db.String(64))
submitted = db.relationship('Post', backref='author', lazy='dynamic')
pings = db.relationship('Ping', backref='author', lazy='dynamic')
def __init__(self, username, password, email):
self.username = username
self.email = email
self.set_password(password)
self.new_api_key()
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, value):
return check_password_hash(self.password, value)
def new_api_key(self):
self.api_key = hashlib.sha224(str(random.getrandbits(256)).encode('utf-8')).hexdigest()
def is_authenticated(self):
if isinstance(self, AnonymousUserMixin):
return False
else:
return True
def is_active(self):
return True
def is_anonymous(self):
if isinstance(self, AnonymousUserMixin):
return True
else:
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User %r>' % self.username
class Post(db.Model):
""" A post containing location data """
__tablename__ = "post"
id = db.Column(db.Integer, primary_key=True)
post_type = db.Column(db.String(32), nullable=False)
title = db.Column(db.String(256), nullable=False)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow(), nullable=False)
loc = db.Column(Geometry('POINT'), nullable=False)
latitude = db.Column(db.Float, default=43.165556, nullable=False)
longitude = db.Column(db.Float, default=-77.611389, nullable=False)
private = db.Column(db.Boolean, default=False, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True, nullable=False)
__mapper_args__ = {'polymorphic_on': post_type }
def get_id(self):
return self.id
def get_location(self):
return self.loc
def __repr__(self):
return '<Post {0}>'.format(self.title)
class TextPost(Post):
""" A blog post """
__mapper_args__ = {'polymorphic_identity': 'text'}
text = db.Column(db.Text)
class ImagePost(Post):
""" An image post """
__mapper_args__ = {'polymorphic_identity': 'image'}
large = db.Column(db.Text)
medium = db.Column(db.Text)
small = db.Column(db.Text)
caption = db.Column(db.String(512))
class Ping(db.Model):
__tablename__ = "ping"
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow())
accuracy = db.Column(db.Float, default=100.0, nullable=False)
loc = db.Column(Geometry('POINT'))
| mit | 5,491,943,983,080,167,000 | 30.08 | 92 | 0.686615 | false | 3.407895 | false | false | false |
hideshis/scripts_for_research | bug_report_parser/bug_report_activity_parser.py | 1 | 4453 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script identifies when the bug is fixed, i.e., when the state of the bug
becomes "RESOLVED", "VERIFIED", or "CLOSED".
At first, this script extracts info. about transitions of the state of the bug.
This is done by parsing the history of the bug.
After that, this script identifies when the bug is fixed.
input: A file that records bug history (not bug report). It is obtained by using Masaki's script.
output: bug ID and the date that the bug is fixed.
"""
import os
import sys
import commands
import subprocess
import re
from lxml import html
def str_format(src):
src = src.replace("\n", "")
src = src.replace(" ", "")
return src
def extraction(file_name):
html_string = open(file_name).read()
tree = html.fromstring(html_string)
# check if the activity exists
try:
presence = tree.xpath('//*[@id="bugzilla-body"]/p[2]/text()')
if presence[0] == "\n No changes have been made to this bug yet.\n ":
match = re.findall(r'[0-9]+', file_name)
bug_id = match[0]
return_list = []
return_list.append(bug_id)
return_list.append("-1")
return return_list
except IndexError:
presence = tree.xpath('//*[@id="error_msg"]/text()[1]')
if "You are not authorized to access bug" in presence:
match = re.findall(r'[0-9]+', file_name)
bug_id = match[0]
return_list = []
return_list.append(bug_id)
return_list.append("-1")
return return_list
tr_len = len(tree.xpath('//*[@id="bugzilla-body"]/table/tr'))
if tr_len == 0:
return
fixed_date = ""
for tr_num in range(2, tr_len+1):
activity_detail = {"who":"", "when":"", "what":"", "removed":"", "added":""}
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td'
td_len = len(tree.xpath(xpath))
if td_len == 3:
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[1]/text()'
activity_detail["what"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[2]/text()'
activity_detail["removed"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[3]/text()'
activity_detail["added"] = str_format(tree.xpath(xpath)[0])
activity_detail["when"] = when
if td_len == 5:
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[1]/text()'
activity_detail["who"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[2]/text()'
activity_detail["when"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[3]/text()'
activity_detail["what"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[4]/text()'
activity_detail["removed"] = str_format(tree.xpath(xpath)[0])
xpath = '//*[@id="bugzilla-body"]/table/tr' + '[' + str(tr_num) + ']' + '/td[5]/text()'
activity_detail["added"] = str_format(tree.xpath(xpath)[0])
when = activity_detail["when"]
if (activity_detail["what"] == "Status"):
if (activity_detail["added"] == "REOPENED"):
fixed_date = ""
elif (activity_detail["added"] == "RESOLVED") or (activity_detail["added"] == "VERIFIED") or (activity_detail["added"] == "CLOSED"):
r = re.compile("\d{4}-\d{2}-\d{2}")
fixed_date = r.findall(activity_detail["when"])[0]
fixed_date = fixed_date.replace("-", "/")
r = re.compile("\d{2}:\d{2}")
fixed_time = r.findall(activity_detail["when"])[0]
fixed_date = fixed_date + " " + fixed_time
match = re.findall(r'[0-9]+', file_name)
bug_id = match[0]
return_list = []
return_list.append(bug_id)
if fixed_date == "":
return_list.append("-1")
else:
return_list.append(fixed_date)
return return_list
| mit | -7,574,184,957,669,207,000 | 44.385417 | 144 | 0.522569 | false | 3.394055 | false | false | false |
aistis-/checksum-compare | compare_files.py | 1 | 1084 | from os import listdir, rename, walk
from os.path import isfile, join, basename, abspath, dirname
import sys
import hashlib
def sha1OfFile(filepath):
filepath = abspath(filepath)
with open(filepath, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
if sys.argv[1][len(sys.argv[1]) - 1] == '/':
sys.argv[1] = sys.argv[1][:-1]
if sys.argv[2][len(sys.argv[2]) - 1] == '/':
sys.argv[2] = sys.argv[2][:-1]
original_files = {}
for root, subFolders, files in walk(sys.argv[1]):
for f in files:
p = join(root, f)
f = abspath(p)
original_files[sha1OfFile(f)] = basename(f)
found = 0;
not_found = 0;
for root, subFolders, files in walk(sys.argv[2]):
for f in files:
p = join(root, f)
f = abspath(p)
if isfile(join(sys.argv[2], f)):
if sha1OfFile(f) in original_files:
found += 1
rename(f, dirname(abspath(f)) + '/' + original_files[sha1OfFile(f)])
else:
not_found += 1
print 'Total original files: ' + str(len(original_files))
print 'Total recovered files found: ' + str(found)
print 'Total not recovered files found: ' + str(not_found)
| mit | -5,876,263,636,446,501,000 | 23.636364 | 72 | 0.647601 | false | 2.669951 | false | false | false |
ezietsman/msc-thesis | images/august_phot/trailed_phot_FT.py | 1 | 2444 | # script to calculate trailed FT of the lightcurve
import astronomy as ast
import pylab as pl
import scipy.stats as sci
#X2 = pl.load('run2_flat.dat')
#X = pl.load('ec2117ans_2_cc.dat')
X = pl.load('S7651_FF.dat')
x = X[:,0]
y = X[:,1]
z = X[:,2]
# original lightcurve
#z = X2[:,2] + X2[:,1]
N = len(x)
fitlength = 100
#x -= int(x[0])
# ephemeris
T0 = 2453964.3307097
P = 0.1545255
#x = (x - T0) / P
ft = []
date = []
peaks = []
f0 = 3000
f1 = 4000
for i in range(0,N,int(fitlength/2.0)):
if i + fitlength/2.0 <= len(x):
print 'somewhere'
date.append(pl.average(x[i:i + fitlength]))
f,a = ast.signal.dft(x[i:i+fitlength],y[i:i+fitlength],f0,f1,1)
ft.append(a)
#sort,argsort = sci.fastsort(a)
#peaks.append(f[argsort[-1]])
# / len(x[i:i+fitlength]))
print i, i+fitlength
else:
print 'finally'
#x = fitwave(y[i:len(t)+1],t[i:len(t)+1],freq)
f,a = ast.signal.dft(x[i:len(x)+1],y[i:len(x)+1],f0,f1,1)
ft.append(a)
#sort,argsort = sci.fastsort(a)
#peaks.append(f[argsort[-1]])
date.append(pl.average(x[i:-1]))# / len(x[i:-1]))
print i
print '\n\n\n\n',N
print pl.shape(ft)
#pl.figure(figsize=(8,4))
pl.subplots_adjust(hspace=0.001)
ax1=pl.subplot(211)
# calculate phase
x = (x - T0) / P
date = (pl.array(date) - T0) / P
pl.scatter(x,y+z,marker='o',s=0.1)
#yl = pl.ylim()
#pl.ylim(yl[1],yl[0])
pl.xlim(date[0],date[-1])
pl.ylabel('Intensity')
yt = pl.yticks()
pl.yticks(yt[0][1:-1])
pl.subplot(212)
#im = pl.imshow(pl.array(ft).transpose(),aspect='auto',interpolation='bilinear',extent=(date[0],date[-1],f1,f0,),cmap=pl.cm.jet)
levels=pl.arange(0.000,0.00000005,0.000000001)
im = pl.contourf(pl.array(ft).transpose(),levels=levels,extent=(date[0],date[-1],f0,f1),cmap=pl.cm.jet)
pl.colorbar(im,orientation='horizontal',shrink=1.0)
#pl.xlabel('HJD (+2453965)')
pl.xlabel('Orbital Phase')
pl.ylabel('Frequency (cycles/day)')
yt = pl.yticks()
pl.yticks(yt[0][1:-1])
pl.subplots_adjust(bottom=0.24,right=0.98,left=0.15)
xticklabels = ax1.get_xticklabels()
pl.setp(xticklabels, visible=False)
#pl.figure()
#pl.plot(date,peaks,'-')
##im = pl.contourf(pl.array(ft).transpose(),levels=levels,extent=(date[0],date[-1],f1,f0),cmap=pl.cm.jet,origin='lower')
pl.show()
| mit | 6,382,390,253,382,159,000 | 22.960784 | 128 | 0.580196 | false | 2.424603 | false | false | false |
kxepal/replipy | replipy/storage.py | 1 | 7359 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
import base64
import hashlib
import pickle
import time
import uuid
from abc import ABCMeta, abstractmethod
from collections import defaultdict
_MetaDatabase = ABCMeta('_MetaDatabase', (object,), {})
class ABCDatabase(_MetaDatabase):
class Conflict(Exception):
"""Raises in case of conflict updates"""
class NotFound(Exception):
"""Raises in case attempt to query on missed document"""
def __init__(self, name):
self._name = name
self._start_time = int(time.time() * 10**6)
self._update_seq = 0
@property
def name(self):
"""Returns database symbolic name as string"""
return self._name
@property
def start_time(self):
"""Returns database start time in microseconds"""
return self._start_time
@property
def update_seq(self):
"""Returns current update sequence value"""
return self._update_seq
def info(self):
"""Returns database information object as dict"""
return {
'db_name': self.name,
'instance_start_time': str(self.start_time),
'update_seq': self.update_seq
}
@abstractmethod
def contains(self, idx, rev=None):
"""Verifies that document with specified idx exists"""
@abstractmethod
def check_for_conflicts(self, idx, rev):
"""Check that specified idx and rev provides no conflicts
or raises Conflict exception otherwise"""
@abstractmethod
def load(self, idx, rev=None):
"""Returns document by specified idx"""
@abstractmethod
def store(self, doc, rev=None):
"""Creates document or updates if rev specified"""
@abstractmethod
def remove(self, idx, rev):
"""Removes document by specified idx and rev"""
@abstractmethod
def revs_diff(self, idrevs):
"""Returns missed revisions for specified id - revs mapping"""
@abstractmethod
def bulk_docs(self, docs, new_edits=True):
"""Bulk update docs"""
@abstractmethod
def ensure_full_commit(self):
"""Ensures that all changes are actually stored on disk"""
@abstractmethod
def changes(self, since=0, feed='normal', style='all_docs', filter=None):
"""Ensures that all changes are actually stored on disk"""
@abstractmethod
def add_attachment(self, doc, name, data, ctype='application/octet-stream'):
"""Adds attachment to specified document"""
class MemoryDatabase(ABCDatabase):
def __init__(self, *args, **kwargs):
super(MemoryDatabase, self).__init__(*args, **kwargs)
self._docs = {}
self._changes = {}
def _new_rev(self, doc):
oldrev = doc.get('_rev')
if oldrev is None:
seq, _ = 0, None
else:
seq, _ = oldrev.split('-', 1)
seq = int(seq)
sig = hashlib.md5(pickle.dumps(doc)).hexdigest()
newrev = '%d-%s' % (seq + 1, sig)
return newrev.lower()
def check_for_conflicts(self, idx, rev):
if self.contains(idx):
if rev is None:
if idx.startswith('_local/'):
return
raise self.Conflict('Document update conflict')
elif not self.contains(idx, rev):
raise self.Conflict('Document update conflict')
elif rev is not None:
raise self.Conflict('Document update conflict')
def contains(self, idx, rev=None):
if idx not in self._docs:
return False
doc = self._docs[idx]
if rev is None:
return not doc.get('_deleted', False)
return self._docs[idx]['_rev'] == rev
def load(self, idx, rev=None):
if not self.contains(idx, rev):
raise self.NotFound(idx)
return self._docs[idx]
def store(self, doc, rev=None, new_edits=True):
if '_id' not in doc:
doc['_id'] = str(uuid.uuid4()).lower()
if rev is None:
rev = doc.get('_rev')
idx = doc['_id']
if new_edits:
self.check_for_conflicts(idx, rev)
doc['_rev'] = self._new_rev(doc)
else:
assert rev, 'Document revision missed'
doc['_rev'] = rev
idx, rev = doc['_id'], doc['_rev']
self._docs[idx] = doc
self._update_seq += 1
self._changes[idx] = self._update_seq
return idx, rev
def remove(self, idx, rev):
if not self.contains(idx):
raise self.NotFound(idx)
elif not self.contains(idx, rev):
raise self.Conflict('Document update conflict')
doc = {
'_id': idx,
'_rev': rev,
'_deleted': True
}
return self.store(doc, rev)
def revs_diff(self, idrevs):
res = defaultdict(dict)
for idx, revs in idrevs.items():
missing = []
if not self.contains(idx):
missing.extend(revs)
res[idx]['missing'] = missing
continue
doc = self._docs[idx]
for rev in revs:
if doc['_rev'] != rev:
missing.append(rev)
if missing:
res[idx]['missing'] = missing
return res
def bulk_docs(self, docs, new_edits=True):
res = []
for doc in docs:
try:
idx, rev = self.store(doc, None, new_edits)
res.append({
'ok': True,
'id': idx,
'rev': rev
})
except Exception as err:
res.append({'id': doc.get('_id'),
'error': type(err).__name__,
'reason': str(err)})
return res
def ensure_full_commit(self):
return {
'ok': True,
'instance_start_time': self.info()['instance_start_time']
}
def changes(self, since=0, feed='normal', style='all_docs', filter=None):
changes = sorted(self._changes.items(), key=lambda i: i[1])
if since:
for idx, seq in changes:
if since <= seq:
yield self.make_event(idx, seq)
break
for idx, seq in changes:
yield self.make_event(idx, seq)
def add_attachment(self, doc, name, data, ctype='application/octet-stream'):
atts = doc.setdefault('_attachments')
digest = 'md5-%s' % base64.b64encode(hashlib.md5(data).digest()).decode()
if doc.get('_rev'):
revpos = int(doc['_rev'].split('-')[0]) + 1
else:
revpos = 1
atts[name] = {
'data': data,
'digest': digest,
'length': len(data),
'content_type': ctype,
'revpos': revpos
}
def make_event(self, idx, seq):
doc = self._docs[idx]
event = {
'id': idx,
'changes': [{'rev': doc['_rev']}],
'seq': seq
}
if doc.get('_deleted'):
event['_deleted'] = True
return event
| mit | 2,523,412,038,708,018,700 | 28.793522 | 81 | 0.533361 | false | 4.202741 | false | false | false |
pidydx/artifacts | artifacts/source_type.py | 1 | 14168 | # -*- coding: utf-8 -*-
r"""The source type objects.
The source type objects define the source of the artifact data. In earlier
versions of the artifact definitions collector definitions had a similar
purpose as the source type. Currently the following source types are defined:
* artifact; the source is one or more artifact definitions;
* file; the source is one or more files;
* path; the source is one or more paths;
* Windows Registry key; the source is one or more Windows Registry keys;
* Windows Registry value; the source is one or more Windows Registry values;
* WMI query; the source is a Windows Management Instrumentation query.
The difference between the file and path source types are that file should
be used to define file entries that contain data and path, file entries that
define a location. E.g. on Windows %SystemRoot% could be considered a path
artifact definition, pointing to a location e.g. C:\Windows. And where
C:\Windows\System32\winevt\Logs\AppEvent.evt a file artifact definition,
pointing to the Application Event Log file.
"""
import abc
from artifacts import definitions
from artifacts import errors
class SourceType(object):
"""Class that implements the artifact definition source type interface."""
TYPE_INDICATOR = None
@property
def type_indicator(self):
"""The type indicator.
Raises:
NotImplementedError: if the type indicator is not defined.
"""
if not self.TYPE_INDICATOR:
raise NotImplementedError(u'Invalid source type missing type indicator.')
return self.TYPE_INDICATOR
@abc.abstractmethod
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
class ArtifactGroupSourceType(SourceType):
"""Class that implements the artifact group source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_ARTIFACT_GROUP
def __init__(self, names=None):
"""Initializes the source type object.
Args:
names: optional list of artifact definition names. The default is None.
Raises:
FormatError: when artifact names is not set.
"""
if not names:
raise errors.FormatError(u'Missing names value.')
super(ArtifactGroupSourceType, self).__init__()
self.names = names
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
return {u'names': self.names}
class FileSourceType(SourceType):
"""Class that implements the file source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_FILE
def __init__(self, paths=None, separator=u'/'):
"""Initializes the source type object.
Args:
paths: optional list of paths. The paths are considered relative
to the root of the file system. The default is None.
separator: optional string containing the path segment separator.
The default is /.
Raises:
FormatError: when paths is not set.
"""
if not paths:
raise errors.FormatError(u'Missing paths value.')
super(FileSourceType, self).__init__()
self.paths = paths
self.separator = separator
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
source_type_attributes = {u'paths': self.paths}
if self.separator != u'/':
source_type_attributes[u'separator'] = self.separator
return source_type_attributes
class CommandSourceType(SourceType):
"""Class that implements the command source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_COMMAND
def __init__(self, args=None, cmd=None):
"""Initializes the source type object.
Args:
args: list of strings that will be passed as arguments to the command.
cmd: string representing the command to run.
Raises:
FormatError: when args or cmd is not set.
"""
if args is None or cmd is None:
raise errors.FormatError(u'Missing args or cmd value.')
super(CommandSourceType, self).__init__()
self.args = args
self.cmd = cmd
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
return {u'cmd': self.cmd, u'args': self.args}
class PathSourceType(SourceType):
"""Class that implements the path source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_PATH
def __init__(self, paths=None, separator=u'/'):
"""Initializes the source type object.
Args:
paths: optional list of paths. The paths are considered relative
to the root of the file system. The default is None.
separator: optional string containing the path segment separator.
The default is /.
Raises:
FormatError: when paths is not set.
"""
if not paths:
raise errors.FormatError(u'Missing paths value.')
super(PathSourceType, self).__init__()
self.paths = paths
self.separator = separator
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
source_type_attributes = {u'paths': self.paths}
if self.separator != u'/':
source_type_attributes[u'separator'] = self.separator
return source_type_attributes
class DirectorySourceType(SourceType):
"""Class that implements the directory source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_DIRECTORY
def __init__(self, paths=None, separator=u'/'):
"""Initializes the source type object.
Args:
paths: optional list of paths. The paths are considered relative
to the root of the file system. The default is None.
separator: optional string containing the path segment separator.
The default is /.
Raises:
FormatError: when paths is not set.
"""
if not paths:
raise errors.FormatError(u'Missing directory value.')
super(DirectorySourceType, self).__init__()
self.paths = paths
self.separator = separator
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
source_type_attributes = {u'paths': self.paths}
if self.separator != u'/':
source_type_attributes[u'separator'] = self.separator
return source_type_attributes
class WindowsRegistryKeySourceType(SourceType):
"""Class that implements the Windows Registry key source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY
VALID_PREFIXES = [
r'HKEY_LOCAL_MACHINE',
r'HKEY_USERS',
r'HKEY_CLASSES_ROOT',
r'%%current_control_set%%',
]
def __init__(self, keys=None):
"""Initializes the source type object.
Args:
keys: optional list of key paths. The key paths are considered relative
to the root of the Windows Registry. The default is None.
Raises:
FormatError: when keys is not set.
"""
if not keys:
raise errors.FormatError(u'Missing keys value.')
if not isinstance(keys, list):
raise errors.FormatError(u'keys must be a list')
for key in keys:
self.ValidateKey(key)
super(WindowsRegistryKeySourceType, self).__init__()
self.keys = keys
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
return {u'keys': self.keys}
@classmethod
def ValidateKey(cls, key_path):
"""Validates this key against supported key names.
Args:
key_path: string containing the path fo the Registry key.
Raises:
FormatError: when key is not supported.
"""
for prefix in cls.VALID_PREFIXES:
if key_path.startswith(prefix):
return
if key_path.startswith(u'HKEY_CURRENT_USER\\'):
raise errors.FormatError(
u'HKEY_CURRENT_USER\\ is not supported instead use: '
u'HKEY_USERS\\%%users.sid%%\\')
raise errors.FormatError(u'Unupported Registry key path: {0}'.format(
key_path))
class WindowsRegistryValueSourceType(SourceType):
"""Class that implements the Windows Registry value source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE
def __init__(self, key_value_pairs=None):
"""Initializes the source type object.
Args:
key_value_pairs: optional list of key path and value name pairs.
The key paths are considered relative to the root
of the Windows Registry. The default is None.
Raises:
FormatError: when key value pairs is not set.
"""
if not key_value_pairs:
raise errors.FormatError(u'Missing key value pairs value.')
if not isinstance(key_value_pairs, list):
raise errors.FormatError(u'key_value_pairs must be a list')
for pair in key_value_pairs:
if not isinstance(pair, dict):
raise errors.FormatError(u'key_value_pair must be a dict')
if set(pair.keys()) != set([u'key', u'value']):
error_message = (
u'key_value_pair missing "key" and "value" keys, got: {0}'
).format(key_value_pairs)
raise errors.FormatError(error_message)
WindowsRegistryKeySourceType.ValidateKey(pair['key'])
super(WindowsRegistryValueSourceType, self).__init__()
self.key_value_pairs = key_value_pairs
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
return {u'key_value_pairs': self.key_value_pairs}
class WMIQuerySourceType(SourceType):
"""Class that implements the WMI query source type."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_WMI_QUERY
def __init__(self, query=None, base_object=None):
"""Initializes the source type object.
Args:
query: optional string containing the WMI query. The default is None.
Raises:
FormatError: when query is not set.
"""
if not query:
raise errors.FormatError(u'Missing query value.')
super(WMIQuerySourceType, self).__init__()
self.base_object = base_object
self.query = query
def CopyToDict(self):
"""Copies the source type to a dictionary.
Returns:
A dictionary containing the source type attributes.
"""
source_type_attributes = {u'query': self.query}
if self.base_object:
source_type_attributes[u'base_object'] = self.base_object
return source_type_attributes
class SourceTypeFactory(object):
"""Class that implements a source type factory."""
_source_type_classes = {
definitions.TYPE_INDICATOR_ARTIFACT_GROUP: ArtifactGroupSourceType,
definitions.TYPE_INDICATOR_COMMAND: CommandSourceType,
definitions.TYPE_INDICATOR_DIRECTORY: DirectorySourceType,
definitions.TYPE_INDICATOR_FILE: FileSourceType,
definitions.TYPE_INDICATOR_PATH: PathSourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY:
WindowsRegistryKeySourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE:
WindowsRegistryValueSourceType,
definitions.TYPE_INDICATOR_WMI_QUERY: WMIQuerySourceType,
}
@classmethod
def CreateSourceType(cls, type_indicator, attributes):
"""Creates a source type object.
Args:
type_indicator: the source type indicator.
attributes: a dictionary containing the source attributes.
Returns:
A source type object (instance of SourceType).
Raises:
The source type object (instance of SourceType) or None if the type
indicator is not supported.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
"""
if type_indicator not in cls._source_type_classes:
raise errors.FormatError(u'Unsupported type indicator: {0}.'.format(
type_indicator))
return cls._source_type_classes[type_indicator](**attributes)
@classmethod
def DeregisterSourceType(cls, source_type_class):
"""Deregisters a source type.
The source types are identified based on their type indicator.
Args:
source_type_class: the source type (subclass of SourceType).
Raises:
KeyError: if a source type is not set for the corresponding type
indicator.
"""
if source_type_class.TYPE_INDICATOR not in cls._source_type_classes:
raise KeyError(u'Source type not set for type: {0}.'.format(
source_type_class.TYPE_INDICATOR))
del cls._source_type_classes[source_type_class.TYPE_INDICATOR]
@classmethod
def GetSourceTypes(cls):
"""Retrieves the source types.
Returns:
A list of source types (subclasses of SourceType).
"""
return cls._source_type_classes.values()
@classmethod
def GetSourceTypeIndicators(cls):
"""Retrieves the source type indicators.
Returns:
A list of source type indicators.
"""
return cls._source_type_classes.keys()
@classmethod
def RegisterSourceType(cls, source_type_class):
"""Registers a source type.
The source types are identified based on their type indicator.
Args:
source_type_class: the source type (subclass of SourceType).
Raises:
KeyError: if source types is already set for the corresponding
type indicator.
"""
if source_type_class.TYPE_INDICATOR in cls._source_type_classes:
raise KeyError(u'Source type already set for type: {0}.'.format(
source_type_class.TYPE_INDICATOR))
cls._source_type_classes[source_type_class.TYPE_INDICATOR] = (
source_type_class)
@classmethod
def RegisterSourceTypes(cls, source_type_classes):
"""Registers source types.
The source types are identified based on their type indicator.
Args:
source_type_classes: a list of source types (instances of SourceType).
"""
for source_type_class in source_type_classes:
cls.RegisterSourceType(source_type_class)
| apache-2.0 | 3,287,414,802,204,713,500 | 28.827368 | 79 | 0.683724 | false | 4.215412 | false | false | false |
mikey1234/script.module.urlresolver | lib/urlresolver/plugins/hostingcup.py | 3 | 3185 | '''
dailymotion urlresolver plugin
Copyright (C) 2011 cyrus007
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import re
import urllib2
from urlresolver import common
from vidxden import unpack_js
class HostingcupResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "hostingcup"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
self.pattern = 'http://(www.)?hostingcup.com/[0-9A-Za-z]+'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
try:
html = self.net.http_GET(web_url).content
page = ''.join(html.splitlines()).replace('\t','')
r = re.search("return p\}\(\'(.+?)\',\d+,\d+,\'(.+?)\'", page)
if r:
p, k = r.groups()
else:
raise Exception ('packed javascript embed code not found')
decrypted_data = unpack_js(p, k)
r = re.search('file.\',.\'(.+?).\'', decrypted_data)
if not r:
r = re.search('src="(.+?)"', decrypted_data)
if r:
stream_url = r.group(1)
else:
raise Exception ('stream url not found')
return stream_url
except urllib2.URLError, e:
common.addon.log_error('Hostingcup: got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 5000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log_error('**** Hostingcup Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]HOSTINGCUP[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://vidpe.com/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/([0-9A-Za-z]+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match(self.pattern, url) or 'hostingcup' in host
| gpl-2.0 | -7,937,407,514,562,584,000 | 36.034884 | 152 | 0.603454 | false | 3.78266 | false | false | false |
mdprewitt/sigmund_droid | color.py | 1 | 2645 | #!/usr/bin/env python3
from ev3dev.ev3 import *
from navigation import speak, abort_on_button, sleep
from setup_sample_data import people
import ev3dev.ev3 as ev3
from PIL import Image
import logging
import requests
import json
LOGGER = logging.getLogger(__name__)
def setup_color_sensor(mode='COL-REFLECT'):
#connect color sensor and check it's connected.
cl = ColorSensor()
assert cl.connected
cl.mode = mode
return cl
@abort_on_button
def get_color(url="http://127.0.0.1:5000", fake_data=False):
"""
gets numerical color value from color sensor
sends value to directory api to retrieve person details
:param url: host:port url of api server
:return: set of coordinates for desk
"""
cl = setup_color_sensor('COL-COLOR')
color = cl.value()
while color < 1:
LOGGER.debug("Waiting to read color")
color = cl.value()
sleep(.1)
# change the sensor mode which makes it emit a red light so we know it's read something
cl.mode= 'COL-REFLECT'
LOGGER.debug("looking for person with sid=%d", color)
if fake_data:
LOGGER.debug("using mock data")
person = people[color]
person_name = '{} {}'.format(person['first'], person['last'])
coordinates = (person['location_x'], person['location_y'])
else:
try:
filters = [dict(
name='sid',
op='==',
val=str(color),
)]
params = dict(q=json.dumps(dict(filters=filters, single=True)))
headers = {'Content-Type': 'application/json'}
LOGGER.debug("Making request [%s] params=[%s]", url, params)
result = requests.get(
url="{url}/api/person".format(url=url),
params=params,
headers=headers,
)
if result.status_code == 404:
LOGGER.error("Person [%s] not found", color)
raise Exception
elif result.status_code != 200:
LOGGER.error("Query error %s - %s", result.status_code, result.reason)
raise Exception
except:
LOGGER.exception("Exception making request")
raise
person = json.loads(result.content.decode('utf-8'))
person_name = '{} {}'.format(person['first'], person['last'])
coordinates = (person['desk']['location_x'], person['desk']['location_y'])
LOGGER.debug("Person=%s, x=%s, y=%s", person_name, coordinates[0], coordinates[1])
message = "Ah Taking you to {}".format(person_name)
speak(message)
return coordinates
| mit | 6,551,336,016,505,546,000 | 28.388889 | 91 | 0.587146 | false | 4.013657 | false | false | false |
tasleson/lvm-dubstep | lvmdbus/utils.py | 1 | 12198 | # Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import xml.etree.ElementTree as Et
import sys
import inspect
import ctypes
import os
import string
import dbus
import dbus.service
import dbus.mainloop.glib
try:
from . import cfg
except SystemError:
import cfg
STDOUT_TTY = os.isatty(sys.stdout.fileno())
def rtype(dbus_type):
"""
Decorator making sure that the decorated function returns a value of
specified type.
:param dbus_type: The specific dbus type to return value as
"""
def decorator(fn):
def decorated(*args, **kwargs):
return dbus_type(fn(*args, **kwargs))
return decorated
return decorator
# Field is expected to be a number, handle the corner cases when parsing
@rtype(dbus.UInt64)
def n(v):
if not v:
return 0
return int(float(v))
@rtype(dbus.UInt32)
def n32(v):
if not v:
return 0
return int(float(v))
# noinspection PyProtectedMember
def init_class_from_arguments(obj_instance):
for k, v in list(sys._getframe(1).f_locals.items()):
if k != 'self':
nt = k
# If the current attribute has a value, but the incoming does
# not, don't overwrite it. Otherwise the default values on the
# property decorator don't work as expected.
cur = getattr(obj_instance, nt, v)
# print 'Init class %s = %s' % (nt, str(v))
if not (cur and len(str(cur)) and (v is None or len(str(v))) == 0):
setattr(obj_instance, nt, v)
def get_properties(f):
"""
Walks through an object instance or it's parent class(es) and determines
which attributes are properties and if they were created to be used for
dbus.
:param f: Object to inspect
:return: A dictionary of tuples with each tuple being:
0 = An array of dicts with the keys being: p_t, p_name,
p_access(type, name, access)
1 = Hash of property names and current value
"""
interfaces = dict()
for c in inspect.getmro(f.__class__):
h = vars(c)
for p, value in h.items():
if isinstance(value, property):
# We found a property, see if it has a metadata type
key = attribute_type_name(p)
if key in h:
interface = h[key][1]
if interface not in interfaces:
interfaces[interface] = ([], {})
access = ''
if getattr(f.__class__, p).fget:
access += 'read'
if getattr(f.__class__, p).fset:
access += 'write'
interfaces[interface][0].append(
dict(
p_t=getattr(f, key)[0],
p_name=p,
p_access=access))
interfaces[interface][1][p] = getattr(f, p)
return interfaces
def get_object_property_diff(o_prop, n_prop):
"""
Walk through each object properties and report what has changed and with
the new values
:param o_prop: Old keys/values
:param n_prop: New keys/values
:return: hash of properties that have changed and their new value
"""
rc = {}
for intf_k, intf_v in o_prop.items():
for k, v in list(intf_v[1].items()):
# print('Comparing %s:%s to %s:%s' %
# (k, o_prop[intf_k][1][k], k, str(n_prop[intf_k][1][k])))
if o_prop[intf_k][1][k] != n_prop[intf_k][1][k]:
new_value = n_prop[intf_k][1][k]
if intf_k not in rc:
rc[intf_k] = dict()
rc[intf_k][k] = new_value
return rc
def add_properties(xml, interface, props):
"""
Given xml that describes the interface, add property values to the XML
for the specified interface.
:param xml: XML to edit
:param interface: Interface to add the properties too
:param props: Output from get_properties
:return: updated XML string
"""
root = Et.fromstring(xml)
if props:
for c in root:
# print c.attrib['name']
if c.attrib['name'] == interface:
for p in props:
temp = '<property type="%s" name="%s" access="%s"/>\n' % \
(p['p_t'], p['p_name'], p['p_access'])
c.append(Et.fromstring(temp))
return Et.tostring(root, encoding='utf8')
return xml
def attribute_type_name(name):
"""
Given the property name, return string of the attribute type
:param name:
:return:
"""
return "_%s_meta" % name
_type_map = dict(
s=dbus.String,
o=dbus.ObjectPath,
t=dbus.UInt64,
x=dbus.Int64,
u=dbus.UInt32,
i=dbus.Int32,
n=dbus.Int16,
q=dbus.UInt16,
d=dbus.Double,
y=dbus.Byte,
b=dbus.Boolean)
def _pass_through(v):
"""
If we have something which is not a simple type we return the original
value un-wrapped.
:param v:
:return:
"""
return v
def _dbus_type(t, value):
return _type_map.get(t, _pass_through)(value)
def dbus_property(interface_name, name, dbus_type, doc=None):
"""
Creates the get/set properties for the given name. It assumes that the
actual attribute is '_' + name and the attribute metadata is stuffed in
_name_type.
There is probably a better way todo this.
:param interface_name: Dbus interface this property is associated with
:param name: Name of property
:param dbus_type: dbus string type eg. s,t,i,x
:param doc: Python __doc__ for the property
:return:
"""
attribute_name = '_' + name
def getter(self):
t = getattr(self, attribute_name + '_meta')[0]
return _dbus_type(t, getattr(self.state, attribute_name[1:]))
prop = property(getter, None, None, doc)
def decorator(cls):
setattr(cls, attribute_name + '_meta', (dbus_type, interface_name))
setattr(cls, name, prop)
return cls
return decorator
def parse_tags(tags):
if len(tags):
if ',' in tags:
return tags.split(',')
return sorted([tags])
return dbus.Array([], signature='s')
def _common_log(msg, *attributes):
cfg.stdout_lock.acquire()
tid = ctypes.CDLL('libc.so.6').syscall(186)
msg = "%d:%d - %s" % (os.getpid(), tid, msg)
if STDOUT_TTY and attributes:
print(color(msg, *attributes))
else:
print(msg)
cfg.stdout_lock.release()
sys.stdout.flush()
# Serializes access to stdout to prevent interleaved output
# @param msg Message to output to stdout
# @return None
def log_debug(msg, *attributes):
if cfg.DEBUG:
_common_log(msg, *attributes)
def log_error(msg, *attributes):
_common_log(msg, *attributes)
# noinspection PyUnusedLocal
def handler(signum, frame):
cfg.run.value = 0
log_debug('Signal handler called with signal %d' % signum)
if cfg.loop is not None:
cfg.loop.quit()
def pv_obj_path_generate():
return cfg.PV_OBJ_PATH + "/%d" % next(cfg.pv_id)
def vg_obj_path_generate():
return cfg.VG_OBJ_PATH + "/%d" % next(cfg.vg_id)
def lv_object_path_method(name, meta):
if name[0] == '[':
return _hidden_lv_obj_path_generate
elif meta[0][0] == 't':
return _thin_pool_obj_path_generate
elif meta[0][0] == 'C' and 'pool' in meta[1]:
return _cache_pool_obj_path_generate
return _lv_obj_path_generate
# Note: None of the individual LV path generate functions should be called
# directly, they should only be dispatched through lv_object_path_method
def _lv_obj_path_generate():
return cfg.LV_OBJ_PATH + "/%d" % next(cfg.lv_id)
def _thin_pool_obj_path_generate():
return cfg.THIN_POOL_PATH + "/%d" % next(cfg.thin_id)
def _cache_pool_obj_path_generate():
return cfg.CACHE_POOL_PATH + "/%d" % next(cfg.cache_pool_id)
def _hidden_lv_obj_path_generate():
return cfg.HIDDEN_LV_PATH + "/%d" % next(cfg.hidden_lv)
def job_obj_path_generate():
return cfg.JOB_OBJ_PATH + "/%d" % next(cfg.job_id)
def color(text, *user_styles):
styles = {
# styles
'reset': '\033[0m',
'bold': '\033[01m',
'disabled': '\033[02m',
'underline': '\033[04m',
'reverse': '\033[07m',
'strike_through': '\033[09m',
'invisible': '\033[08m',
# text colors
'fg_black': '\033[30m',
'fg_red': '\033[31m',
'fg_green': '\033[32m',
'fg_orange': '\033[33m',
'fg_blue': '\033[34m',
'fg_purple': '\033[35m',
'fg_cyan': '\033[36m',
'fg_light_grey': '\033[37m',
'fg_dark_grey': '\033[90m',
'fg_light_red': '\033[91m',
'fg_light_green': '\033[92m',
'fg_yellow': '\033[93m',
'fg_light_blue': '\033[94m',
'fg_pink': '\033[95m',
'fg_light_cyan': '\033[96m',
# background colors
'bg_black': '\033[40m',
'bg_red': '\033[41m',
'bg_green': '\033[42m',
'bg_orange': '\033[43m',
'bg_blue': '\033[44m',
'bg_purple': '\033[45m',
'bg_cyan': '\033[46m',
'bg_light_grey': '\033[47m'
}
color_text = ''
for style in user_styles:
try:
color_text += styles[style]
except KeyError:
return 'def color: parameter {} does not exist'.format(style)
color_text += text
return '\033[0m{0}\033[0m'.format(color_text)
def pv_range_append(cmd, device, start, end):
if (start, end) == (0, 0):
cmd.append(device)
else:
if start != 0 and end == 0:
cmd.append("%s:%d-" % (device, start))
else:
cmd.append(
"%s:%d-%d" %
(device, start, end))
def pv_dest_ranges(cmd, pv_dest_range_list):
if len(pv_dest_range_list):
for i in pv_dest_range_list:
pv_range_append(cmd, *i)
def round_size(size_bytes):
bs = 512
remainder = size_bytes % bs
if not remainder:
return size_bytes
return size_bytes + bs - remainder
_ALLOWABLE_CH = string.ascii_letters + string.digits + '#+.:=@_\/%'
_ALLOWABLE_CH_SET = set(_ALLOWABLE_CH)
_ALLOWABLE_VG_LV_CH = string.ascii_letters + string.digits + '.-_+'
_ALLOWABLE_VG_LV_CH_SET = set(_ALLOWABLE_VG_LV_CH)
_LV_NAME_RESERVED = ("_cdata", "_cmeta", "_corig", "_mimage", "_mlog",
"_pmspare", "_rimage", "_rmeta", "_tdata", "_tmeta", "_vorigin")
# Tags can have the characters, based on the code
# a-zA-Z0-9._-+/=!:&#
_ALLOWABLE_TAG_CH = string.ascii_letters + string.digits + "._-+/=!:&#"
_ALLOWABLE_TAG_CH_SET = set(_ALLOWABLE_TAG_CH)
def _allowable_tag(tag_name):
# LVM should impose a length restriction
return set(tag_name) <= _ALLOWABLE_TAG_CH_SET
def _allowable_vg_name(vg_name):
if vg_name is None:
raise ValueError("VG name is None or empty")
vg_len = len(vg_name)
if vg_len == 0 or vg_len > 127:
raise ValueError("VG name (%s) length (%d) not in the domain 1..127" %
(vg_name, vg_len))
if not set(vg_name) <= _ALLOWABLE_VG_LV_CH_SET:
raise ValueError("VG name (%s) contains invalid character, "
"allowable set(%s)" % (vg_name, _ALLOWABLE_VG_LV_CH))
if vg_name == "." or vg_name == "..":
raise ValueError('VG name (%s) cannot be "." or ".."' % (vg_name))
def _allowable_lv_name(vg_name, lv_name):
if lv_name is None:
raise ValueError("LV name is None or empty")
lv_len = len(lv_name)
# This length is derived from empirical testing
if lv_len == 0 or (len(vg_name) + lv_len) > 125:
raise ValueError("LV name (%s) length (%d) + VG name length "
"not in the domain 1..125" % (lv_name, lv_len))
if not set(lv_name) <= _ALLOWABLE_VG_LV_CH_SET:
raise ValueError("LV name (%s) contains invalid character, "
"allowable (%s)" % (lv_name, _ALLOWABLE_VG_LV_CH))
if any(x in lv_name for x in _LV_NAME_RESERVED):
raise ValueError("LV name (%s) contains a reserved word, "
"reserved set(%s)" % (lv_name, str(_LV_NAME_RESERVED)))
if lv_name.startswith("snapshot") or lv_name.startswith("pvmove"):
raise ValueError("LV name (%s) starts with a reserved word, "
"reserved set(%s)" % (lv_name, str(["snapshot", "pvmove"])))
if lv_name[0] == '-':
raise ValueError("LV name (%s) cannot start with a '-' "
"character" % lv_name)
def validate_device_path(interface, device):
if not set(device) <= _ALLOWABLE_CH_SET:
raise dbus.exceptions.DBusException(
interface, 'Device path (%s) has invalid characters, '
'allowable (%s)' % (device, _ALLOWABLE_CH))
def validate_vg_name(interface, vg_name):
try:
_allowable_vg_name(vg_name)
except ValueError as ve:
raise dbus.exceptions.DBusException(
interface, str(ve))
def validate_lv_name(interface, vg_name, lv_name):
try:
_allowable_lv_name(vg_name, lv_name)
except ValueError as ve:
raise dbus.exceptions.DBusException(
interface, str(ve))
def validate_tag(interface, tag):
if not _allowable_tag(tag):
raise dbus.exceptions.DBusException(
interface, 'tag (%s) contains invalid character, allowable set(%s)'
% (tag, _ALLOWABLE_TAG_CH))
| gpl-2.0 | 4,448,699,849,522,849,000 | 24.202479 | 74 | 0.651254 | false | 2.801562 | false | false | false |
NanoSmasher/cfvg-discordbot | interpret.py | 2 | 4329 | # Parses arithmetic operations with modificaitons:
# +,-,*,/,(,) work as they should
# & is the AND probability (right associativity)
# | is the OR probability (right associativity)
# ^ is the XOR probability (right associativity)
# sorting: mine. returns list containing level n brackets and indexs
# infix2rpn: infix to postfix (reverse polish notation) using shunting-yard algorithim taken and modified from http://andreinc.net/2010/10/05/converting-infix-to-rpn-shunting-yard-algorithm/
# parse: mine. base function calling everythin else.
# parse_pre: mine. adds a ton of spaces and replacements.
# parse_hgcc: mine. Convert ! => to numbers.
# parse_data: mine. Equation parser.
# rpn2num: RPN evaluation taken and modified from https://rosettacode.org/wiki/Parsing/RPN_calculator_algorithm#Python
import math
from fractions import *
from Hyper_Calculator import * #Math stuff
def sorting(data):
dict = {}
stack = []
test = data.find('(')
if test != -1:
level = 0
for i,c in enumerate(data):
#print(dict)
if c == '(':
level = level + 1
stack.append(i)
if (level not in dict): dict[level] = list() #initilize
elif c == ')':
if (not level) or (len(stack) != level): return [] #) found before (
dict[level].append([stack.pop(),i+1])
level = level - 1
#print(level)
if level != 0: return [] # no closing bracket
return dict
else:
return []
'''
Created on Oct 5, 2010
@author: nomemory
'''
#Associativity constants for operators
LEFT_ASSOC = 0
RIGHT_ASSOC = 1
#Supported operators
OPERATORS = {
'+' : (5, LEFT_ASSOC),
'-' : (5, LEFT_ASSOC),
'*' : (10, LEFT_ASSOC),
'/' : (10, LEFT_ASSOC),
'&' : (0, LEFT_ASSOC),
'|' : (0, LEFT_ASSOC),
'^' : (0, LEFT_ASSOC)
}
#Test if a certain token is operator
def isOperator(token):
return token in OPERATORS.keys()
#Test the associativity type of a certain token
def isAssociative(token, assoc):
if not isOperator(token):
raise ValueError('Invalid token: %s' % token)
return OPERATORS[token][1] == assoc
#Compare the precedence of two tokens
def cmpPrecedence(token1, token2):
if not isOperator(token1) or not isOperator(token2):
raise ValueError('Invalid tokens: %s %s' % (token1, token2))
return OPERATORS[token1][0] - OPERATORS[token2][0]
#Transforms an infix expression to RPN
def infix2rpn(tokens):
tokens = tokens.split()
out = []
stack = []
for token in tokens:
if isOperator(token):
while len(stack) != 0 and isOperator(stack[-1]):
if (isAssociative(token, LEFT_ASSOC) and cmpPrecedence(token, stack[-1]) <= 0) or (isAssociative(token, RIGHT_ASSOC) and cmpPrecedence(token, stack[-1]) < 0):
out.append(stack.pop())
continue
break
stack.append(token)
elif token == '(':
stack.append(token)
elif token == ')':
while len(stack) != 0 and stack[-1] != '(':
out.append(stack.pop())
stack.pop()
else:
out.append(token)
while len(stack) != 0:
out.append(stack.pop())
return out
def rpn2num(list):
a=[]
b={
'+': lambda x,y: y+x,
'-': lambda x,y: y-x,
'*': lambda x,y: y*x,
'/': lambda x,y: y/x,
'&': lambda x,y: y*x,
'|': lambda x,y: y+x-y*x,
'^': lambda x,y: x*(1-y)+(1-x)*y
}
for c in list:
if c in b: a.append(b[c](a.pop(),a.pop()))
else: a.append(float(c))
return a[0]
def parse_pre(data):
data = data.replace("AND",'&')
data = data.replace("XOR",'^')
data = data.replace("OR",'|')
data = data.replace("and",'&')
data = data.replace("xor",'^')
data = data.replace("or",'|')
for i in ['&','|','^','+','-','*','/',')','(']: data = data.replace(i,' '+i+' ')
return data
def parse_hgcc(data):
while True:
s = data.find('!')
if s != -1:
e = data.find(" ",s+1)
if e == -1: v = data[s:] #reached end of equation
else: v = data[s:e]
t = v.split(',')
result = HGCC(int(t[2]),int(t[1]),int(t[0][1:]),int(t[3]),find=">=")
data = data.replace(v,str(float(result)))
else:
break
return data
def parse_data(input):
while True:
output = sorting(input)
if len(output) > 0:
i = output[len(output)][0]
tmp = infix2rpn(input[i[0]:i[1]])
tmp = rpn2num(tmp)
input = input.replace(input[i[0]:i[1]],str(tmp))
else: break
return rpn2num(infix2rpn(input))
def parse(data):
try:
data = parse_pre(data)
data = parse_hgcc(data)
data = parse_data(data)
except:
data = "ERROR"
return data | mit | 640,299,169,862,140,300 | 25.89441 | 190 | 0.63271 | false | 2.68882 | false | false | false |
sheagcraig/python-jss | jss/distribution_point.py | 1 | 47421 | #!/usr/bin/env python
# Copyright (C) 2014-2018 Shea G Craig, 2018 Mosen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""distribution_point.py
Classes representing the various types of file storage available to
the JAMF Pro Server.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import shutil
import socket
import subprocess
import sys
import io
import math
import multiprocessing
import threading
sys.path.insert(0, "/Library/AutoPkg/JSSImporter")
import requests
try:
# Python 2.6-2.7
from HTMLParser import HTMLParser
except ImportError:
# Python 3
from html.parser import HTMLParser
# 2 and 3 compatible
try:
from urllib.parse import urlparse, urlencode, unquote, quote
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode, unquote, quote
from urllib2 import urlopen, Request, HTTPError
from . import casper
from . import abstract
from .exceptions import JSSError
try:
from .contrib.mount_shares_better import mount_share
except ImportError:
# mount_shares_better uses PyObjC. If using non-system python,
# chances are good user has not set up PyObjC, so fall back to
# subprocess to mount. (See mount methods).
mount_share = None
from .tools import is_osx, is_linux, is_package
try:
import boto.s3
from boto.s3.connection import S3Connection, OrdinaryCallingFormat, S3ResponseError
from boto.s3.key import Key
BOTO_AVAILABLE = True
except ImportError:
print(
"boto is not available, you will not be able to use the AWS distribution point type"
)
BOTO_AVAILABLE = False
PKG_FILE_TYPE = "0"
EBOOK_FILE_TYPE = "1"
IN_HOUSE_APP_FILE_TYPE = "2"
def auto_mounter(original):
"""Decorator for automatically mounting, if needed."""
def mounter(*args):
"""If not mounted, mount."""
self = args[0]
if not self.is_mounted():
self.mount()
return original(*args)
return mounter
# pylint: disable=too-few-public-methods
class Repository(object):
"""Base class for file repositories.
This class is not usable on its own; however, it provides the base
init which all subclasses should use.
Attributes:
connection (dict): Dictionary for storing connection arguments.
required_attrs (Set): A set of the keys which must be supplied to the initializer,
otherwise a JSSError will be raised.
Raises:
JSSError: If mandatory arguments are not supplied to the initializer.
"""
required_attrs = set()
def __init__(self, **connection_args):
"""Store the connection information."""
if self.required_attrs.issubset(set(connection_args.keys())):
self.connection = connection_args
self._build_url()
else:
missing_attrs = self.required_attrs.difference(set(connection_args.keys()))
raise JSSError(
"Missing REQUIRED argument(s) %s to %s distribution point."
% (list(missing_attrs), self.__class__)
)
def __repr__(self):
"""Return string representation of connection arguments."""
output = ["Distribution Point: %s" % self.connection["url"]]
output.append("Type: %s" % type(self))
output.append("Connection Information:")
for key, val in self.connection.items():
output.append("\t%s: %s" % (key, val))
return "\n".join(output) + "\n"
def _build_url(self):
"""Private build url method."""
raise NotImplementedError
# pylint: enable=too-few-public-methods
class FileRepository(Repository):
"""Local file shares."""
def _build_url(self):
"""Build a connection URL."""
pass
def copy_pkg(self, filename, _):
"""Copy a package to the repo's Package subdirectory.
Args:
filename: Path for file to copy.
_: Ignored. Used for compatibility with JDS repos.
"""
basename = os.path.basename(filename)
self._copy(
filename, os.path.join(self.connection["mount_point"], "Packages", basename)
)
def _copy(self, filename, destination): # pylint: disable=no-self-use
"""Copy a file or folder to the repository.
Will mount if needed.
Args:
filename: Path to copy.
destination: Remote path to copy file to.
"""
full_filename = os.path.abspath(os.path.expanduser(filename))
if os.path.isdir(full_filename):
shutil.copytree(full_filename, destination)
elif os.path.isfile(full_filename):
shutil.copyfile(full_filename, destination)
def delete(self, filename):
"""Delete a file from the repository.
Args:
filename: String filename only (i.e. no path) of file to
delete.
"""
folder = "Packages"
path = os.path.join(self.connection["mount_point"], folder, filename)
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.remove(path)
def exists(self, filename):
"""Report whether a file exists on the distribution point.
Determines file type by extension.
Args:
filename: Filename you wish to check. (No path! e.g.:
"AdobeFlashPlayer-14.0.0.176.pkg")
"""
filepath = os.path.join(self.connection["mount_point"], "Packages", filename)
return os.path.exists(filepath)
def __contains__(self, filename):
"""Magic method to allow constructs similar to:
if 'abc.pkg' in dp:
"""
return self.exists(filename)
class LocalRepository(FileRepository):
"""JAMF Pro repo located on a local filesystem path."""
required_attrs = {"mount_point", "share_name"}
def __init__(self, **connection_args):
"""Set up Local file share.
Args:
connection_args: Dict with the following key/val pairs:
mount_point: Path to a valid mount point.
share_name: The fileshare's name.
"""
super(LocalRepository, self).__init__(**connection_args)
self.connection["url"] = "local://%s" % self.connection["mount_point"]
class MountedRepository(FileRepository):
"""Parent class for mountable file shares.
Attributes:
fs_type: Class attribute, string protocol type (currently AFP
or SMB).
"""
fs_type = "undefined"
def __init__(self, **connection_args):
"""Init a MountedRepository by calling super."""
super(MountedRepository, self).__init__(**connection_args)
def mount(self):
"""Mount the repository."""
if not self.is_mounted():
# ensure the mountpoint exists.
if not os.path.exists(self.connection["mount_point"]):
os.mkdir(self.connection["mount_point"])
self._mount()
def _mount(self):
"""Private mount method."""
raise NotImplementedError
def umount(self, forced=True):
"""Try to unmount our mount point.
Defaults to using forced method. If OS is Linux, it will not
delete the mount point.
Args:
forced: Bool whether to force the unmount. Default is True.
"""
if self.is_mounted():
if is_osx():
cmd = ["/usr/sbin/diskutil", "unmount", self.connection["mount_point"]]
if forced:
cmd.insert(2, "force")
subprocess.check_call(cmd)
else:
cmd = ["umount", self.connection["mount_point"]]
if forced:
cmd.insert(1, "-f")
subprocess.check_call(cmd)
def is_mounted(self):
"""Test for whether a mount point is mounted.
If it is currently mounted, determine the path where it's
mounted and update the connection's mount_point accordingly.
"""
mount_check = subprocess.check_output("mount").decode().splitlines()
# The mount command returns lines like this on OS X...
# //[email protected]/JSS%20REPO on /Users/Shared/JSS REPO
# (afpfs, nodev, nosuid, mounted by local_me)
# and like this on Linux...
# //pretendco.com/jamf on /mnt/jamf type cifs (rw,relatime,
# <options>...)
valid_mount_strings = self._get_valid_mount_strings()
was_mounted = False
if is_osx():
mount_string_regex = re.compile(r"\(([\w]*),*.*\)$")
mount_point_regex = re.compile(r"on ([\w/ -]*) \(.*$")
elif is_linux():
mount_string_regex = re.compile(r"type ([\w]*) \(.*\)$")
mount_point_regex = re.compile(r"on ([\w/ -]*) type .*$")
else:
raise JSSError("Unsupported OS.")
for mount in mount_check:
fs_match = re.search(mount_string_regex, mount)
fs_type = fs_match.group(1) if fs_match else None
# Automounts, non-network shares, and network shares
# all have a slightly different format, so it's easiest to
# just split.
mount_string = mount.split(" on ")[0]
# Does the mount_string match one of our valid_mount_strings?
if [
mstring for mstring in valid_mount_strings if mstring in mount_string
] and self.fs_type == fs_type:
# Get the mount point string between from the end back to
# the last "on", but before the options (wrapped in
# parenthesis). Considers alphanumerics, / , _ , - and a
# blank space as valid, but no crazy chars.
match = re.search(mount_point_regex, mount)
mount_point = match.group(1) if match else None
was_mounted = True
# Reset the connection's mount point to the discovered
# value.
if mount_point:
self.connection["mount_point"] = mount_point
if self.connection["jss"].verbose:
print(
"%s is already mounted at %s.\n"
% (self.connection["url"], mount_point)
)
# We found the share, no need to continue.
break
if not was_mounted:
# If the share is not mounted, check for another share
# mounted to the same path and if found, incremement the
# name to avoid conflicts.
count = 1
while os.path.ismount(self.connection["mount_point"]):
self.connection["mount_point"] = "%s-%s" % (
self.connection["mount_point"],
count,
)
count += 1
# Do an inexpensive double check...
return os.path.ismount(self.connection["mount_point"])
def _get_valid_mount_strings(self):
"""Return a tuple of potential mount strings.
Casper Admin seems to mount in a number of ways:
- hostname/share
- fqdn/share
Plus, there's the possibility of:
- IPAddress/share
Then factor in the possibility that the port is included too!
This gives us a total of up to six valid addresses for mount
to report.
"""
results = set()
join = os.path.join
url = self.connection["url"]
share_name = quote(self.connection["share_name"], safe="~()*!.'$")
port = self.connection["port"]
# URL from python-jss form:
results.add(join(url, share_name))
results.add(join("%s:%s" % (url, port), share_name))
# IP Address form:
# socket.gethostbyname() will return an IP address whether
# an IP address, FQDN, or .local name is provided.
ip_address = socket.gethostbyname(url)
results.add(join(ip_address, share_name))
results.add(join("%s:%s" % (ip_address, port), share_name))
# Domain name only form:
domain_name = url.split(".")[0]
results.add(join(domain_name, share_name))
results.add(join("%s:%s" % (domain_name, port), share_name))
# FQDN form using getfqdn:
# socket.getfqdn() could just resolve back to the ip
# or be the same as the initial URL so only add it if it's
# different than both.
fqdn = socket.getfqdn(ip_address)
results.add(join(fqdn, share_name))
results.add(join("%s:%s" % (fqdn, port), share_name))
return tuple(results)
@auto_mounter
def _copy(self, filename, destination):
"""Copy a file or folder to the repository.
Will mount if needed.
Args:
filename: Path to copy.
destination: Remote path to copy file to.
"""
super(MountedRepository, self)._copy(filename, destination)
@auto_mounter
def delete(self, filename):
"""Delete a file from the repository.
Args:
filename: String filename only (i.e. no path) of file to
delete.
"""
super(MountedRepository, self).delete(filename)
@auto_mounter
def exists(self, filename):
"""Report whether a file exists on the distribution point.
Determines file type by extension.
Args:
filename: Filename you wish to check. (No path! e.g.:
"AdobeFlashPlayer-14.0.0.176.pkg")
"""
return super(MountedRepository, self).exists(filename)
def __repr__(self):
"""Return a formatted string of connection info."""
# Do an "update" to get current mount points.
self.is_mounted()
output = super(MountedRepository, self).__repr__()
output += "Mounted: %s\n" % self.is_mounted()
return output
@property
def _encoded_password(self):
"""Returns the safely url-quoted password for this DP."""
return quote(self.connection["password"], safe="~()*!.'$")
class AFPDistributionPoint(MountedRepository):
"""Represents an AFP repository."""
protocol = "afp"
fs_type = "afpfs"
required_attrs = {"url", "mount_point", "username", "password", "share_name"}
def __init__(self, **connection_args):
"""Set up an AFP connection.
Args:
connection_args (dict): Dict with the following key/val pairs:
url: URL to the mountpoint,including volume name e.g.:
"my_repository.domain.org/jamf" (Do _not_ include
protocol or auth info.)
mount_point: Path to a valid mount point.
share_name: The fileshare's name.
username: Share R/W username.
password: Share R/W password.
"""
super(AFPDistributionPoint, self).__init__(**connection_args)
# Check to see if share is mounted, and update mount point
self.is_mounted()
def _build_url(self):
"""Build the URL string to mount this file share."""
if self.connection.get("username") and self.connection.get("password"):
auth = "%s:%s@" % (self.connection["username"], self._encoded_password)
else:
auth = ""
# Optional port number
port = self.connection.get("port")
port = ":" + port if port else ""
self.connection["mount_url"] = "%s://%s%s%s/%s" % (
self.protocol,
auth,
self.connection["url"],
port,
self.connection["share_name"],
)
def _mount(self):
"""Mount based on which OS is running."""
# mount_afp "afp://scraig:<password>@address/share" <mnt_point>
if is_osx():
if self.connection["jss"].verbose:
print(self.connection["mount_url"])
if mount_share:
self.connection["mount_point"] = mount_share(
self.connection["mount_url"]
)
else:
# Non-Apple OS X python:
args = [
"mount",
"-t",
self.protocol,
self.connection["mount_url"],
self.connection["mount_point"],
]
if self.connection["jss"].verbose:
print(" ".join(args))
subprocess.check_call(args)
elif is_linux():
args = [
"mount_afp",
"-t",
self.protocol,
self.connection["mount_url"],
self.connection["mount_point"],
]
if self.connection["jss"].verbose:
print(" ".join(args))
subprocess.check_call(args)
else:
raise JSSError("Unsupported OS.")
class SMBDistributionPoint(MountedRepository):
"""Represents a SMB distribution point."""
protocol = "smbfs"
required_attrs = {
"url",
"share_name",
"mount_point",
"domain",
"username",
"password",
}
def __init__(self, **connection_args):
"""Set up a SMB connection.
Args:
connection_args: Dict with the following key/val pairs:
url: URL to the mountpoint,including volume name e.g.:
"my_repository.domain.org/jamf" (Do _not_ include
protocol or auth info.)
mount_point: Path to a valid mount point.
share_name: The fileshare's name.
domain: Specify the domain.
username: Share R/W username.
password: Share R/W password.
"""
super(SMBDistributionPoint, self).__init__(**connection_args)
if is_osx():
self.fs_type = "smbfs"
if is_linux():
self.fs_type = "cifs"
# Check to see if share is mounted, and update.
self.is_mounted()
def _build_url(self):
"""Build the URL string to mount this file share."""
if self.connection.get("username") and self.connection.get("password"):
auth = "%s:%s@" % (self.connection["username"], self._encoded_password)
if self.connection.get("domain"):
auth = r"%s;%s" % (self.connection["domain"], auth)
else:
auth = ""
# Optional port number
port = self.connection.get("port")
port = ":" + port if port else ""
# Construct mount_url
self.connection["mount_url"] = "//%s%s%s/%s" % (
auth,
self.connection["url"],
port,
self.connection["share_name"],
)
def _mount(self):
"""Mount based on which OS is running."""
# mount -t cifs -o \
# username=<user>,password=<password>,domain=<domain>,port=445 \
# //server/share /mnt/<mountpoint>
if is_osx():
if mount_share:
mount_url = "smb:%s" % self.connection["mount_url"]
if self.connection["jss"].verbose:
print(mount_url)
self.connection["mount_point"] = mount_share(mount_url)
else:
# Non-Apple OS X python:
args = [
"mount",
"-t",
self.protocol,
self.connection["mount_url"],
self.connection["mount_point"],
]
if self.connection["jss"].verbose:
print(" ".join(args))
subprocess.check_call(args)
elif is_linux():
args = [
"mount",
"-t",
"cifs",
"-o",
"username=%s,password=%s,domain=%s,port=%s"
% (
self.connection["username"],
self.connection["password"],
self.connection["domain"],
self.connection["port"],
),
"//%s/%s" % (self.connection["url"], self.connection["share_name"]),
self.connection["mount_point"],
]
if self.connection["jss"].verbose:
print(" ".join(args))
subprocess.check_call(args)
else:
raise JSSError("Unsupported OS.")
class DistributionServer(Repository):
"""Abstract class for representing JDS and CDP type repos.
The JSS has a folder to which packages are uploaded via a private
API call to dbfileupload. From there, the JSS handles the
distribution to its Cloud and JDS points.
There are caveats to its exists() method which you should be
aware of, along with a private API exists_with_casper method, which
probably works more like what one would expect. Please see those
methods for more information.
"""
required_attrs = {"jss"}
destination = "0"
def __init__(self, **connection_args):
"""Set up a connection to a distribution server.
Args:
connection_args: Dict, with required key:
jss: A JSS Object.
"""
super(DistributionServer, self).__init__(**connection_args)
self.connection["url"] = self.connection["jss"].base_url
def _build_url(self):
"""Build the URL for POSTing files. 10.2 and earlier.
This actually still works in some scenarios, but it seems like it will be deprecated soon.
"""
self.connection["upload_url"] = "%s/%s" % (
self.connection["jss"].base_url,
"dbfileupload",
)
self.connection["delete_url"] = "%s/%s" % (
self.connection["jss"].base_url,
"casperAdminSave.jxml",
)
def _build_url_modern(self):
"""Build the URL for POSTing files.
This uses the UploadServlet that has been used to handle most file uploads into JAMF Pro.
"""
self.connection["upload_url"] = "%s/%s" % (
self.connection["jss"].base_url,
"upload",
)
self.connection["delete_url"] = "%s/%s" % (
self.connection["jss"].base_url,
"casperAdminSave.jxml",
)
def copy_pkg(self, filename, id_=-1):
"""Copy a package to the distribution server.
Bundle-style packages must be zipped prior to copying.
Args:
filename: Full path to file to upload.
id_: ID of Package object to associate with, or -1 for new
packages (default).
"""
self._copy(filename, id_=id_, file_type=PKG_FILE_TYPE)
def _copy(self, filename, id_=-1, file_type=0):
"""Upload a file to the distribution server. 10.2 and earlier
Directories/bundle-style packages must be zipped prior to
copying.
"""
if os.path.isdir(filename):
raise TypeError(
"Distribution Server type repos do not permit directory "
"uploads. You are probably trying to upload a non-flat "
"package. Please zip or create a flat package."
)
basefname = os.path.basename(filename)
resource = open(filename, "rb")
headers = {
"DESTINATION": self.destination,
"OBJECT_ID": str(id_),
"FILE_TYPE": file_type,
"FILE_NAME": basefname,
}
response = self.connection["jss"].session.post(
url=self.connection["upload_url"], data=resource, headers=headers
)
if self.connection["jss"].verbose:
print(response)
def _copy_new(self, filename, id_=-1, file_type=0):
"""Upload a file to the distribution server.
Directories/bundle-style packages must be zipped prior to
copying.
"""
if os.path.isdir(filename):
raise TypeError(
"Distribution Server type repos do not permit directory "
"uploads. You are probably trying to upload a non-flat "
"package. Please zip or create a flat package."
)
basefname = os.path.basename(filename)
resource = open(filename, "rb")
headers = {
"sessionIdentifier": "com.jamfsoftware.jss.objects.packages.Package:%s"
% str(id_),
"fileIdentifier": "FIELD_FILE_NAME_FOR_DIST_POINTS",
}
response = self.connection["jss"].session.post(
url=self.connection["upload_url"], data=resource, headers=headers
)
print(response)
if self.connection["jss"].verbose:
print(response)
def delete_with_casper_admin_save(self, pkg):
"""Delete a pkg from the distribution server.
Args:
pkg: Can be a jss.Package object, an int ID of a package, or
a filename.
"""
# The POST needs the package ID.
if pkg.__class__.__name__ == "Package":
package_to_delete = pkg.id
elif isinstance(pkg, int):
package_to_delete = pkg
elif isinstance(pkg, str):
package_to_delete = self.connection["jss"].Package(pkg).id
else:
raise TypeError
data_dict = {
"username": self.connection["jss"].user,
"password": self.connection["jss"].password,
"deletedPackageID": package_to_delete,
}
self.connection["jss"].session.post(
url=self.connection["delete_url"], data=data_dict
)
# There's no response if it works.
def delete(self, filename):
"""Delete a package distribution server.
This method simply finds the Package object from the database
with the API GET call and then deletes it. This will remove the
file from the database blob.
For setups which have file share distribution points, you will
need to delete the files on the shares also.
Args:
filename: Filename (no path) to delete.
"""
if is_package(filename):
self.connection["jss"].Package(filename).delete()
def exists(self, filename):
"""Check for the existence of a package.
Unlike other DistributionPoint types, JDS and CDP types have no
documented interface for checking whether the server and its
children have a complete copy of a file. The best we can do is
check for an object using the API /packages URL--JSS.Package()
and look for matches on the filename.
If this is not enough, please use the alternate
exists_with_casper method. For example, it's possible to create
a Package object but never upload a package file, and this
method will still return "True".
Also, this may be slow, as it needs to retrieve the complete
list of packages from the server.
"""
# Technically, the results of the casper.jxml page list the
# package files on the server. This is an undocumented
# interface, however.
result = False
if is_package(filename):
packages = self.connection["jss"].Package().retrieve_all()
for package in packages:
if package.findtext("filename") == filename:
result = True
break
return result
def exists_using_casper(self, filename):
"""Check for the existence of a package file.
Unlike other DistributionPoint types, JDS and CDP types have no
documented interface for checking whether the server and its
children have a complete copy of a file. The best we can do is
check for an object using the API /packages URL--JSS.Package()
and look for matches on the filename.
If this is not enough, this method uses the results of the
casper.jxml page to determine if a package exists. This is an
undocumented feature and as such should probably not be relied
upon.
It will test for whether the file exists on ALL configured
distribution servers. This may register False if the JDS is busy
syncing them.
"""
casper_results = casper.Casper(self.connection["jss"])
distribution_servers = casper_results.find("distributionservers")
# Step one: Build a list of sets of all package names.
all_packages = []
for distribution_server in distribution_servers:
packages = set()
for package in distribution_server.findall("packages/package"):
packages.add(os.path.basename(package.find("fileURL").text))
all_packages.append(packages)
# Step two: Intersect the sets.
base_set = all_packages.pop()
for packages in all_packages:
base_set = base_set.intersection(packages)
# Step three: Check for membership.
return filename in base_set
class JDS(DistributionServer):
"""Class for representing a JDS and its controlling JSS.
The JSS has a folder to which packages are uploaded. From there, the
JSS handles the distribution to its Cloud and JDS points.
This class should be considered experimental!
- There are caveats to its .exists() method
- It is unclear at the moment what the interaction is in systems
that have both a JDS and a CDP, especially depending on which is the
master.
"""
required_attrs = {"jss"}
destination = "1"
class CDP(DistributionServer):
"""Class for representing a CDP and its controlling JSS.
The JSS has a folder to which packages are uploaded. From there, the
JSS handles the distribution to its Cloud and JDS points.
This class should be considered experimental!
- There are caveats to its .exists() method
- It is unclear at the moment what the interaction is in systems
that have both a JDS and a CDP, especially depending on which is the
master.
"""
required_attrs = {"jss"}
destination = "2"
class CloudDistributionServer(Repository):
"""Abstract class for representing JCDS type repos.
"""
def package_index_using_casper(self):
"""Get a list of packages on the JCDS
Similar to JDS and CDP, JCDS types have no
documented interface for checking whether the server and its
children have a complete copy of a file. The best we can do is
check for an object using the API /packages URL--JSS.Package()
and look for matches on the filename.
If this is not enough, this method uses the results of the
casper.jxml page to determine if a package exists. This is an
undocumented feature and as such should probably not be relied
upon.
It will test for whether the file exists on only cloud distribution points.
"""
casper_results = casper.Casper(self.connection["jss"])
cloud_distribution_points = casper_results.find("cloudDistributionPoints")
# Step one: Build a list of sets of all package names.
all_packages = []
for distribution_point in cloud_distribution_points:
if distribution_point.findtext("name") != "Jamf Cloud":
continue # type 4 might be reserved for JCDS?
for package in distribution_point.findall("packages/package"):
package_obj = casper_results.find(
"./packages/package[id='%s']" % (package.findtext("id"),)
)
all_packages.append(
{
"id": package.findtext("id"),
"checksum": package.findtext("checksum"),
"size": package.findtext("size"),
"lastModified": package.findtext("lastModified"),
"fileURL": unquote(package.findtext("fileURL")),
"name": package_obj.findtext("name"),
"filename": package_obj.findtext("filename"),
}
)
return all_packages
def _jcds_upload_chunk(
filename, base_url, upload_token, chunk_index, chunk_size, total_chunks
):
"""Upload a single chunk of a file to JCDS.
Args:
filename (str): The full path to the file being uploaded.
base_url (str): The JCDS base URL which includes the regional hostname and the tenant id.
upload_token (str): The upload token, scraped from legacy/packages.html
chunk_index (int): The zero-based index of the chunk being uploaded.
total_chunks (int): The total count of chunks to upload
Returns:
dict: JSON Response from JCDS
"""
print("Working on Chunk [{}/{}]".format(chunk_index + 1, total_chunks))
resource = open(filename, "rb")
resource.seek(chunk_index * chunk_size)
chunk_data = resource.read(chunk_size)
basefname = os.path.basename(filename)
chunk_url = "{}/{}/part?chunk={}&chunks={}".format(
base_url, basefname, chunk_index, total_chunks
)
chunk_reader = io.BytesIO(chunk_data)
headers = {"X-Auth-Token": upload_token}
response = requests.post(
url=chunk_url, headers=headers, files={"file": chunk_reader},
)
return response.json()
# Semaphore controlling max workers for chunked uploads
jcds_semaphore = threading.BoundedSemaphore(value=3)
class JCDSChunkUploadThread(threading.Thread):
def __init__(self, *args, **kwargs):
self.filename = kwargs["filename"]
self.base_url = kwargs["base_url"]
self.upload_token = kwargs["upload_token"]
self.chunk_index = kwargs["chunk_index"]
self.chunk_size = kwargs["chunk_size"]
self.total_chunks = kwargs["total_chunks"]
super_kwargs = dict(kwargs)
del super_kwargs["filename"]
del super_kwargs["base_url"]
del super_kwargs["upload_token"]
del super_kwargs["chunk_index"]
del super_kwargs["chunk_size"]
del super_kwargs["total_chunks"]
super(JCDSChunkUploadThread, self).__init__(*args, **super_kwargs)
def run(self):
jcds_semaphore.acquire()
try:
print(
"Working on Chunk [{}/{}]".format(
self.chunk_index + 1, self.total_chunks
)
)
resource = open(self.filename, "rb")
resource.seek(self.chunk_index * self.chunk_size)
chunk_data = resource.read(self.chunk_size)
basefname = os.path.basename(self.filename)
chunk_url = "{}/{}/part?chunk={}&chunks={}".format(
self.base_url, basefname, self.chunk_index, self.total_chunks
)
chunk_reader = io.BytesIO(chunk_data)
headers = {"X-Auth-Token": self.upload_token}
response = requests.post(
url=chunk_url, headers=headers, files={"file": chunk_reader},
)
return response.json()
except:
pass
finally:
jcds_semaphore.release()
class AWS(CloudDistributionServer, abstract.AbstractRepository):
"""Class for representing an AWS Cloud Distribution Point and its controlling JSS.
"""
required_attrs = {"jss", "bucket"}
def __init__(self, **connection_args):
"""Set up a connection to an AWS S3 bucket.
It is more secure to use the following environment variables provided by boto:
AWS_ACCESS_KEY_ID - The access key id to the jamf bucket
AWS_SECRET_ACCESS_KEY - The secret access key to the jamf bucket
You may also use the file ~/.boto as described in the boto documentation.
Args:
connection_args: Dict, with required keys:
jss: A JSS Object.
bucket: Name of the JAMF bucket.
aws_access_key_id (optional): The access key id
secret_access_key (optional): The secret access key, use environment instead.
host (optional): A bucket host. Seems to be needed if your bucket is not in the default location
eg. southeast asia ap 2
chunk_size (optional): The chunk size for large objects >50mb
Throws:
S3ResponseError if the bucket does not exist
"""
super(AWS, self).__init__(**connection_args)
self.s3 = S3Connection(
aws_access_key_id=connection_args.get("aws_access_key_id", None),
aws_secret_access_key=connection_args.get("aws_secret_access_key", None),
host=connection_args.get("host", boto.s3.connection.NoHostProvided),
)
try:
self.bucket = self.s3.get_bucket(connection_args["bucket"])
except S3ResponseError as e:
raise JSSError(
"got error getting bucket, may not exist: {}".format(
connection_args["bucket"]
)
)
self.connection["url"] = self.bucket
self.chunk_size = connection_args.get("chunk_size", 52428800) # 50 mb default
def _build_url(self):
"""Build a connection URL."""
pass
def copy_pkg(self, filename, id_=-1):
"""Copy a package to the repo's Package subdirectory.
Args:
filename: Path for file to copy.
id_: Unused
"""
self._copy(filename, id_=id_)
def _copy(self, filename, id_=-1): # type: (str, int) -> None
"""Copy a file or folder to the bucket.
Does not yet support chunking.
Args:
filename: Path to copy.
destination: Remote path to copy file to.
"""
bucket_key = os.path.basename(filename)
exists = self.bucket.get_key(bucket_key)
if exists:
print("Already exists")
else:
k = Key(self.bucket)
k.key = bucket_key
k.set_metadata("jamf-package-id", id_)
k.set_contents_from_filename(filename)
def delete(self, filename): # type: (str) -> None
bucket_key = os.path.basename(filename)
self.bucket.delete_key(bucket_key)
def exists(self, filename): # type: (str) -> bool
"""Check whether a package already exists by checking for a bucket item with the same filename.
Args:
filename: full path to filename. Only the name itself will be checked.
Returns:
True if the package exists, else false
"""
k = self.bucket.get_key(os.path.basename(filename))
return k is not None
class JCDS(CloudDistributionServer):
"""Class for representing a JCDS and its controlling jamfcloud JSS.
The JSS allows direct upload to the JCDS by exposing the access token from the package upload page.
This class should be considered experimental!
"""
required_attrs = {"jss"}
destination = "3"
workers = 3
chunk_size = 1048768
def __init__(self, **connection_args):
"""Set up a connection to a distribution server.
Args:
connection_args (dict):
jss (JSS): The associated JAMF Pro Server instance
"""
super(JCDS, self).__init__(**connection_args)
self.connection["url"] = "JCDS"
def _scrape_tokens(self):
"""Scrape JCDS upload URL and upload access token from the jamfcloud instance."""
jss = self.connection["jss"]
response = jss.scrape("legacy/packages.html?id=-1&o=c")
matches = re.search(
r'data-base-url="([^"]*)"', response.content.decode("utf-8")
)
if matches is None:
raise JSSError(
"Did not find the JCDS base URL on the packages page. Is this actually Jamfcloud?"
)
jcds_base_url = matches.group(1)
matches = re.search(
r'data-upload-token="([^"]*)"', response.content.decode("utf-8")
)
if matches is None:
raise JSSError(
"Did not find the JCDS upload token on the packages page. Is this actually Jamfcloud?"
)
jcds_upload_token = matches.group(1)
h = HTMLParser()
jcds_base_url = h.unescape(jcds_base_url)
self.connection["jcds_base_url"] = jcds_base_url
self.connection["jcds_upload_token"] = jcds_upload_token
self.connection[
"url"
] = jcds_base_url # This is to make JSSImporter happy because it accesses .connection
def _build_url(self):
"""Build a connection URL."""
pass
def copy_pkg(self, filename, id_=-1):
"""Copy a package to the JAMF Cloud distribution server.
Bundle-style packages must be zipped prior to copying.
Args:
filename: Full path to file to upload.
id_: ID of Package object to associate with, or -1 for new
packages (default).
"""
self._copy(filename, id_=id_)
def _build_chunk_url(self, filename, chunk, chunk_total):
"""Build the path to the chunk being uploaded to the JCDS."""
return "{}/{}/part?chunk={}&chunks={}".format(
self.connection["jcds_base_url"], filename, chunk, chunk_total
)
def _copy_multiprocess(self, filename, upload_token, id_=-1):
"""Upload a file to the distribution server using multiple processes to upload several chunks in parallel.
Directories/bundle-style packages must be zipped prior to copying.
"""
fsize = os.stat(filename).st_size
total_chunks = int(math.ceil(fsize / JCDS.chunk_size))
p = multiprocessing.Pool(3)
def _chunk_args(chunk_index):
return [
filename,
self.connection["jcds_base_url"],
upload_token,
chunk_index,
JCDS.chunk_size,
total_chunks,
]
for chunk in xrange(0, total_chunks):
res = p.apply_async(_jcds_upload_chunk, _chunk_args(chunk))
data = res.get(timeout=10)
print(
"id: {0}, version: {1}, size: {2}, filename: {3}, lastModified: {4}, created: {5}".format(
data["id"],
data["version"],
data["size"],
data["filename"],
data["lastModified"],
data["created"],
)
)
def _copy_threaded(self, filename, upload_token, id_=-1):
"""Upload a file to the distribution server using multiple threads to upload several chunks in parallel."""
fsize = os.stat(filename).st_size
total_chunks = int(math.ceil(fsize / JCDS.chunk_size))
for chunk in xrange(0, total_chunks):
t = JCDSChunkUploadThread(
filename=filename,
base_url=self.connection["jcds_base_url"],
upload_token=upload_token,
chunk_index=chunk,
chunk_size=JCDS.chunk_size,
total_chunks=total_chunks,
)
t.start()
def _copy_sequential(self, filename, upload_token, id_=-1):
"""Upload a file to the distribution server using the same process as python-jss.
Directories/bundle-style packages must be zipped prior to copying.
"""
fsize = os.stat(filename).st_size
total_chunks = int(math.ceil(fsize / JCDS.chunk_size))
basefname = os.path.basename(filename)
resource = open(filename, "rb")
headers = {
"X-Auth-Token": self.connection["jcds_upload_token"],
# "Content-Type": "application/octet-steam",
}
for chunk in xrange(0, total_chunks):
resource.seek(chunk * JCDS.chunk_size)
chunk_data = resource.read(JCDS.chunk_size)
chunk_reader = io.BytesIO(chunk_data)
chunk_url = self._build_chunk_url(basefname, chunk, total_chunks)
response = self.connection["jss"].session.post(
url=chunk_url, headers=headers, files={"file": chunk_reader},
)
if self.connection["jss"].verbose:
print(response.json())
resource.close()
def _copy(self, filename, id_=-1, file_type=0):
"""Upload a file to the distribution server. 10.2 and earlier
Directories/bundle-style packages must be zipped prior to
copying.
JCDS returns a JSON structure like this::
{
u'status': u'PENDING',
u'created': u'2018-07-10T03:21:17.000Z',
u'lastModified': u'2018-07-11T03:55:32.000Z',
u'filename': u'SkypeForBusinessInstaller-16.18.0.51.pkg',
u'version': 6,
u'md5': u'',
u'sha512': u'',
u'id': u'3a7e6a7479fc4000bf53a9693d906b11',
u'size': 35658112
}
"""
if os.path.isdir(filename):
raise TypeError(
"JCDS Server type repos do not permit directory "
"uploads. You are probably trying to upload a non-flat "
"package. Please zip or create a flat package."
)
if "jcds_upload_token" not in self.connection:
self._scrape_tokens()
self._copy_threaded(filename, self.connection["jcds_upload_token"])
# if False:
# self._copy_sequential(filename, self.connection['jcds_upload_token'])
# else:
# self._copy_threaded(filename, self.connection['jcds_upload_token'])
def exists(self, filename):
"""Check whether a package file already exists."""
packages = self.package_index_using_casper()
for p in packages:
url, token = p["fileURL"].split("?", 2)
urlparts = url.split("/")
if urlparts[-1] == filename:
return True
return False
def __repr__(self):
"""Return string representation of connection arguments."""
output = [
"JAMF Cloud Distribution Server: %s" % self.connection["jss"].base_url
]
output.append("Type: %s" % type(self))
output.append("Connection Information:")
for key, val in self.connection.items():
output.append("\t%s: %s" % (key, val))
return "\n".join(output) + "\n"
| gpl-3.0 | 7,082,928,960,910,337,000 | 34.388806 | 115 | 0.575125 | false | 4.254531 | false | false | false |
csirtgadgets/bearded-avenger | cif/hunter/spamhaus_fqdn.py | 1 | 3154 |
import logging
from csirtg_indicator import Indicator
from pprint import pprint
from cif.utils import resolve_ns
import arrow
CONFIDENCE = 9
PROVIDER = 'spamhaus.org'
CODES = {
'127.0.1.2': {
'tags': 'suspicious',
'description': 'spammed domain',
},
'127.0.1.3': {
'tags': 'suspicious',
'description': 'spammed redirector / url shortener',
},
'127.0.1.4': {
'tags': 'phishing',
'description': 'phishing domain',
},
'127.0.1.5': {
'tags': 'malware',
'description': 'malware domain',
},
'127.0.1.6': {
'tags': 'botnet',
'description': 'Botnet C&C domain',
},
'127.0.1.102': {
'tags': 'suspicious',
'description': 'abused legit spam',
},
'127.0.1.103': {
'tags': 'suspicious',
'description': 'abused legit spammed redirector',
},
'127.0.1.104': {
'tags': 'phishing',
'description': 'abused legit phish',
},
'127.0.1.105': {
'tags': 'malware',
'description': 'abused legit malware',
},
'127.0.1.106': {
'tags': 'botnet',
'description': 'abused legit botnet',
},
'127.0.1.255': {
'description': 'BANNED',
},
}
class SpamhausFqdn(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.is_advanced = True
def _resolve(self, data):
data = '{}.dbl.spamhaus.org'.format(data)
data = resolve_ns(data)
if data and data[0]:
return data[0]
def process(self, i, router):
if 'search' in i.tags:
return
if i.itype == 'fqdn' and i.provider != 'spamhaus.org':
try:
r = self._resolve(i.indicator)
try:
r = CODES.get(str(r), None)
except Exception as e:
# https://www.spamhaus.org/faq/section/DNSBL%20Usage
self.logger.error(e)
self.logger.info('check spamhaus return codes')
r = None
if r:
confidence = CONFIDENCE
if ' legit ' in r['description']:
confidence = 6
f = Indicator(**i.__dict__())
f.tags = [r['tags']]
if 'hunter' not in f.tags:
f.tags.append('hunter')
f.description = r['description']
f.confidence = confidence
f.provider = PROVIDER
f.reference_tlp = 'white'
f.reference = 'http://www.spamhaus.org/query/dbl?domain={}'.format(f.indicator)
f.lasttime = f.reporttime = arrow.utcnow()
x = router.indicators_create(f)
self.logger.debug('Spamhaus FQDN: {}'.format(x))
except KeyError as e:
self.logger.error(e)
except Exception as e:
self.logger.error('[Hunter: SpamhausFqdn] {}: giving up on indicator {}'.format(e, i))
Plugin = SpamhausFqdn
| mpl-2.0 | -6,501,710,877,080,091,000 | 27.93578 | 102 | 0.484147 | false | 3.642032 | false | false | false |
Koheron/zynq-sdk | make.py | 2 | 7794 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import jinja2
import yaml
import server
def append_path(filename, file_path):
''' If a filename starts with './' then it is relative to the config.yml path.
'''
if filename.startswith('./'):
filename = os.path.join(file_path, filename)
else:
filename = os.path.join(SDK_PATH, filename)
return filename
def load_config(config_filename):
''' Get the config dictionary from the file 'config.yml' '''
with open(config_filename) as f:
config = yaml.safe_load(f)
return config
def parse_brackets(string):
''' ex: 'pwm', '4' = parse_brackets('pwm[4]') '''
start, end = map(lambda char : string.find(char), ('[', ']'))
if start >= 0 and end >= 0:
return string[0 : start], string[start + 1 : end]
else:
return string, '1'
def read_parameters(string, parameters):
string, parameter = parse_brackets(string)
if parameter.isdigit():
parameter = int(parameter)
else:
assert parameter in parameters
parameter = parameters[parameter]
return string, parameter
def build_memory(memory, parameters):
for address in memory:
address['name'], address['n_blocks'] = read_parameters(address['name'], parameters)
assert (address['n_blocks'] > 0)
# Protection
if not 'protection' in address:
address['prot_flag'] = 'PROT_READ|PROT_WRITE'
elif address['protection'] == 'read':
address['prot_flag'] = 'PROT_READ'
elif address['protection'] == 'write':
address['prot_flag'] = 'PROT_WRITE'
return memory
def build_registers(registers, parameters):
new_registers = []
for register in registers:
register, parameter = read_parameters(register, parameters)
if parameter == 1:
new_registers.append(register)
else:
for i in range(parameter):
new_registers.append(register+str(i))
registers = new_registers
return registers
def append_memory_to_config(config):
parameters = config.get('parameters', {})
config['memory'] = build_memory(config.get('memory', {}), parameters)
config['control_registers'] = build_registers(config.get('control_registers', {}), parameters)
config['ps_control_registers'] = build_registers(config.get('ps_control_registers', {}), parameters)
config['status_registers'] = build_registers(config.get('status_registers', {}), parameters)
config['ps_status_registers'] = build_registers(config.get('ps_status_registers', {}), parameters)
return config
def build_json(dict):
dict_json = json.dumps(dict, separators=(',', ':')).replace('"', '\\"')
return dict_json
def dump_if_changed(filename, new_dict):
changed = False
if os.path.isfile(filename):
with open(filename, 'r') as yml_file:
old_dict = yaml.safe_load(yml_file)
if old_dict != new_dict:
changed = True
if not os.path.isfile(filename) or changed:
with open(filename, 'w') as yml_file:
yaml.dump(new_dict, yml_file)
#########################
# Jinja2 template engine
#########################
def get_renderer():
renderer = jinja2.Environment(
block_start_string = '{%',
block_end_string = '%}',
variable_start_string = '{{',
variable_end_string = '}}',
loader = jinja2.FileSystemLoader([os.path.join(SDK_PATH, 'fpga'), os.path.join(SDK_PATH, 'server/templates')])
)
def quote(list_):
return ['"%s"' % element for element in list_]
def remove_extension(filename):
toks = filename.split('.')
return toks[0]
def replace_KMG(string):
return string.replace('K', '*1024').replace('M', '*1024*1024').replace('G', '*1024*1024*1024')
renderer.filters['quote'] = quote
renderer.filters['remove_extension'] = remove_extension
renderer.filters['replace_KMG'] = replace_KMG
return renderer
def fill_template(config, template_filename, output_filename):
template = get_renderer().get_template(template_filename)
with open(output_filename, 'w') as output:
output.write(template.render(config=config))
###################
# Main
###################
SDK_PATH = os.getenv('SDK_PATH', '')
if __name__ == "__main__":
cmd = sys.argv[1]
config_filename = sys.argv[2]
output_filename = sys.argv[3]
output_dirname = os.path.dirname(output_filename)
if not os.path.exists(output_dirname):
os.makedirs(output_dirname)
config = load_config(config_filename)
config_path = os.path.dirname(config_filename)
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding('utf-8')
if cmd == '--name':
with open(output_filename, 'w') as f:
f.write(config['name'])
elif cmd == '--memory_yml':
for field in ['drivers', 'web', 'cores', 'modules', 'name', 'board', 'version']:
config.pop(field, None)
dump_if_changed(output_filename, config)
elif cmd == '--config_tcl':
fill_template(append_memory_to_config(config), 'config.tcl', output_filename)
elif cmd == '--cores':
for module in config.get('modules', []):
module_path = os.path.dirname(module)
module = append_path(module, module_path)
module_config = load_config(module)
module_cores = module_config.get('cores')
if module_cores is not None:
config['cores'].extend(module_cores)
config['cores'] = list(set(config['cores']))
for i in range(len(config['cores'])):
config['cores'][i] = append_path(config['cores'][i], config_path)
with open(output_filename, 'w') as f:
f.write(' '.join(config.get('cores', [])))
elif cmd == '--board':
config['board'] = append_path(config['board'], config_path)
with open(output_filename, 'w') as f:
f.write(config['board'])
elif cmd == '--drivers':
for i, path in enumerate(config.get('drivers', [])):
config['drivers'][i] = append_path(path, config_path)
with open(output_filename, 'w') as f:
f.write(' '.join(config.get('drivers', [])))
elif cmd == '--xdc':
for i, path in enumerate(config.get('xdc', [])):
config['xdc'][i] = append_path(path, config_path)
with open(output_filename, 'w') as f:
f.write(' '.join(config.get('xdc', [])))
elif cmd == '--memory_hpp':
config = append_memory_to_config(config)
config['json'] = build_json(config)
fill_template(config, 'memory.hpp', output_filename)
elif cmd == '--render_template':
template_filename = sys.argv[4]
for i in range(len(config['drivers'])):
config['drivers'][i] = append_path(config['drivers'][i], config_path)
server.render_template(template_filename, output_filename, server.get_drivers(config['drivers']))
elif cmd == '--render_interface':
driver_filename_hpp = sys.argv[4]
id_ = server.get_driver_id(config['drivers'], driver_filename_hpp)
server.render_driver(server.get_driver(driver_filename_hpp, id_), output_filename)
elif cmd == '--web':
for i, path in enumerate(config.get('web', [])):
config['web'][i] = append_path(path, config_path)
with open(output_filename, 'w') as f:
f.write(' '.join(config.get('web', [])))
elif cmd == '--version':
config['version'] = config.get('version', '0.0.0')
with open(output_filename, 'w') as f:
f.write(config['version'])
else:
raise ValueError('Unknown command') | mit | -384,442,412,388,430,460 | 32.599138 | 116 | 0.594303 | false | 3.776163 | true | false | false |
spapageo0x01/dioskrS | dioskr.py | 1 | 1583 | #!/usr/bin/python
#TODO: create a torrent file for reconstruction purposes!
import sys
import argparse
import os
import hashlib
from layer_block import BlockStore
output_dir = "/mnt/shared_volume/dskrs_field"
def input_check(arg):
#check if file exists
if os.path.isfile(arg) is False:
raise argparse.ArgumentTypeError('Input file does not exist')
return arg
def block_check(arg):
value = int(arg)
if value <= 0:
raise argparse.ArgumentTypeError('Block size should be a positive value')
if ((value % 4096) != 0):
raise argparse.ArgumentTypeError('Block size should be 4K aligned')
return value
def init_vars():
parser = argparse.ArgumentParser(description='dioskr input parameters.')
parser.add_argument('input_file', default='', type=input_check, help='input file path')
parser.add_argument('block_size', default='4096', type=block_check, help='size of block deduplication in bytes')
#parser.add_argument('output_dir', help='output directory')
args = parser.parse_args()
return args.input_file, args.block_size
if __name__ == "__main__":
print '=======dioskrS v0.1========'
input_file, block_size = init_vars()
block_eng = BlockStore(input_file, block_size, output_dir)
blocks_nr = 0
while True:
block = block_eng.get_sync()
blocks_nr += 1
hash_object = hashlib.sha512(block)
hex_dig = hash_object.hexdigest()
if block == "":
break
print 'Finished reading file! Number of %d blocks: %d' % (block_size, blocks_nr)
| gpl-3.0 | -8,450,636,455,122,632,000 | 26.77193 | 116 | 0.657612 | false | 3.698598 | false | false | false |
kracekumar/twython | twython3k/twython.py | 1 | 21271 | #! /usr/bin/python
"""
Twython is a library for Python that wraps the Twitter API.
It aims to abstract away all the API endpoints, so that additions to the \
library and/or the Twitter API won't cause any overall problems.
Questions, comments? [email protected]
"""
__author__ = "Ryan McGrath <[email protected]>"
__version__ = "1.4.1"
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import urllib.parse
import http.client
import httplib2
import mimetypes
from email.generator import _make_boundary
import re
import oauth2 as oauth
# Twython maps keyword based arguments to Twitter API endpoints. The endpoints
# table is a file with a dictionary of every API endpoint that Twython supports.
from .twitter_endpoints import base_url, api_table
from urllib.error import HTTPError
import json as simplejson
class TwythonError(AttributeError):
"""
Generic error class, catch-all for most Twython issues.
Special cases are handled by APILimit and AuthError.
Note: To use these, the syntax has changed as of Twython 1.3. \
To catch these, you need to explicitly import them into your code,
e.g:
from twython import TwythonError, APILimit, AuthError
"""
def __init__(self, msg, error_code=None):
self.msg = msg
if error_code == 400:
raise APILimit(msg)
def __str__(self):
return repr(self.msg)
class APILimit(TwythonError):
"""
Raised when you've hit an API limit. Try to avoid these, read the API
docs if you're running into issues here, Twython does not concern \
itself with this matter beyond telling you that you've done goofed.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class AuthError(TwythonError):
"""
Raised when you try to access a protected resource and it fails due to \
some issue with your authentication.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Twython(object):
def __init__(self, twitter_token = None, twitter_secret = None,\
oauth_token = None, oauth_token_secret = None, headers=None):
"""setup(self, oauth_token = None, headers = None)
Instantiates an instance of Twython. Takes optional parameters for \
authentication and such (see below).
Parameters:
twitter_token - Given to you when you register your application\
with Twitter.
twitter_secret - Given to you when you register your \
application with Twitter.
oauth_token - If you've gone through the authentication process\
and have a token for this user,pass it in and \
it'll be used for all requests going forward.
oauth_token_secret - see oauth_token; it's the other half.
headers - User agent header, dictionary style aka \
{'User-Agent': 'Bert'}
** Note: versioning is not currently used by search.twitter \
functions; when Twitter moves their junk, it'll be supported.
"""
# Needed for hitting that there API.
self.request_token_url = 'http://twitter.com/oauth/request_token'
self.access_token_url = 'http://twitter.com/oauth/access_token'
self.authorize_url = 'http://twitter.com/oauth/authorize'
self.authenticate_url = 'http://twitter.com/oauth/authenticate'
self.twitter_token = twitter_token
self.twitter_secret = twitter_secret
self.oauth_token = oauth_token
self.oauth_secret = oauth_token_secret
# If there's headers, set them, otherwise be an embarassing parent for \
# their own good.
self.headers = headers
if self.headers is None:
self.headers = {'User-agent': 'Twython Python Twitter Library v1.3'}
consumer = None
token = None
if self.twitter_token is not None and self.twitter_secret is not None:
consumer = oauth.Consumer(self.twitter_token, self.twitter_secret)
if self.oauth_token is not None and self.oauth_secret is not None:
token = oauth.Token(oauth_token, oauth_token_secret)
# Filter down through the possibilities here - if they have a token, \
# if they're first stage, etc.
if consumer is not None and token is not None:
self.client = oauth.Client(consumer, token)
elif consumer is not None:
self.client = oauth.Client(consumer)
else:
# If they don't do authentication, but still want to request \
# unprotected resources, we need an opener.
self.client = httplib2.Http()
def __getattr__(self, api_call):
"""
The most magically awesome block of code you'll see in 2010.
Rather than list out 9 million damn methods for this API, \
we just keep a table (see above) of every API endpoint and their \
corresponding function id for this library. This pretty much gives
unlimited flexibility in API support - there's a slight chance of a\
performance hit here, but if this is going to be your bottleneck...\
well, don't use Python. ;P
For those who don't get what's going on here, Python classes have \
this great feature known as __getattr__().
It's called when an attribute that was called on an object doesn't \
seem to exist - since it doesn't exist,we can take over and find \
the API method in our table. We then return a function that \
downloads and parses what we're looking for, based on the keywords \
passed in.
I'll hate myself for saying this, but this is heavily inspired by \
Ruby's "method_missing".
"""
def get(self, **kwargs):
# Go through and replace any mustaches that are in our API url.
fn = api_table[api_call]
base = re.sub(
'\{\{(?P<m>[a-zA-Z_]+)\}\}',
lambda m: "%s" % kwargs.get(m.group(1), '1'),\
# The '1' here catches the API version. Slightly hilarious.
base_url + fn['url']
)
# Then open and load that shiiit, yo.
# TODO: check HTTP method and junk, handle errors/authentication
if fn['method'] == 'POST':
resp, content = self.client.request(base, fn['method'], \
urllib.parse.urlencode(dict([k, v.encode('utf-8')] \
for k, v in list(kwargs.items()))))
else:
url = base + "?" + "&".join(["%s=%s" %(key, value) \
for (key, value) in list(kwargs.items())])
resp, content = self.client.request(url, fn['method'])
return simplejson.loads(content.decode('utf-8'))
if api_call in api_table:
return get.__get__(self)
else:
raise TwythonError(api_call)
def get_authentication_tokens(self):
"""
get_auth_url(self)
Returns an authorization URL for a user to hit.
"""
resp, content = self.client.request(self.request_token_url, "GET")
if resp['status'] != '200':
raise AuthError("Seems something couldn't be verified with your \
OAuth junk. Error: %s, Message: %s" % (resp['status'], content))
request_tokens = dict(urllib.parse.parse_qsl(content))
request_tokens['auth_url'] = "%s?oauth_token=%s" % \
(self.authenticate_url, request_tokens['oauth_token'])
return request_tokens
def get_authorized_tokens(self):
"""
get_authorized_tokens
Returns authorized tokens after they go through the auth_url phase.
"""
resp, content = self.client.request(self.access_token_url, "GET")
return dict(urllib.parse.parse_qsl(content))
# --------------------------------------------------------------------------
# The following methods are all different in some manner or require special\
# attention with regards to the Twitter API.
# Because of this, we keep them separate from all the other endpoint \
# definitions - ideally this should be change-able,but it's not high on \
# the priority list at the moment.
# --------------------------------------------------------------------------
@staticmethod
def constructApiURL(base_url, params):
return base_url + "?" + "&".join(["%s=%s" %(Twython.unicode2utf8(key),\
urllib.parse.quote_plus(Twython.unicode2utf8(value))) \
for (key, value) in list(params.items())])
@staticmethod
def shortenURL(url_to_shorten, shortener = "http://is.gd/api.php", \
query = "longurl"):
"""shortenURL(url_to_shorten, shortener = "http://is.gd/api.php", \
query = "longurl")
Shortens url specified by url_to_shorten.
Parameters:
url_to_shorten - URL to shorten.
shortener - In case you want to use a url shortening service \
other than is.gd.
"""
try:
content = urllib.request.urlopen(shortener + "?" + \
urllib.parse.urlencode(\
{query: Twython.unicode2utf8(url_to_shorten)})).read()
return content
except HTTPError as e:
raise TwythonError("shortenURL() failed with a %s error code." % \
repr(e.code))
def bulkUserLookup(self, ids = None, screen_names = None, version = None):
""" bulkUserLookup(self, ids = None, screen_names = None, \
version = None)
A method to do bulk user lookups against the Twitter API. \
Arguments (ids (numbers) / screen_names (strings)) should be flat \
Arrays that contain their respective data sets.
Statuses for the users in question will be returned inline if \
they exist. Requires authentication!
"""
apiURL = "http://api.twitter.com/1/users/lookup.json?lol=1"
if ids is not None:
apiURL += "&user_id="
for id in ids:
apiURL += repr(id) + ","
if screen_names is not None:
apiURL += "&screen_name="
for name in screen_names:
apiURL += name + ","
try:
resp, content = self.client.request(apiURL, "GET")
return simplejson.loads(content)
except HTTPError as e:
raise TwythonError("bulkUserLookup() failed with a %s error code." \
% repr(e.code), e.code)
def search(self, **kwargs):
"""search(search_query, **kwargs)
Returns tweets that match a specified query.
Parameters:
See the documentation at http://dev.twitter.com/doc/get/search.\
Pass in the API supported arguments as named parameters.
e.g x.search(q = "python")
"""
searchURL = Twython.constructApiURL(\
"http://search.twitter.com/search.json", kwargs)
try:
resp, content = self.client.request(searchURL, "GET")
return simplejson.loads(content)
except HTTPError as e:
raise TwythonError("getSearchTimeline() failed with a %s error \
code." % repr(e.code), e.code)
def searchTwitter(self, **kwargs):
"""use search(search_query, **kwargs)
searchTwitter(q = "python", page = "2")"""
return search(self, **kwargs)
def searchGen(self, search_query, **kwargs):
"""searchGen(search_query, **kwargs)
Returns a generator of tweets that match a specified query.
Parameters:
See the documentation at http://dev.twitter.com/doc/get/search.\
Pass in the API supported arguments as named parameters.
e.g x.search(search_query="python", page="2")
"""
searchURL = Twython.constructApiURL(\
"http://search.twitter.com/search.json?q=%s" % Twython.unicode2utf8(\
search_query), kwargs)
try:
resp, content = self.client.request(searchURL, "GET")
data = simplejson.loads(content)
except HTTPError as e:
raise TwythonError("searchTwitterGen() failed with a %s error \
code." % repr(e.code), e.code)
if not data['results']:
raise StopIteration
for tweet in data['results']:
yield tweet
if 'page' not in kwargs:
kwargs['page'] = '2'
else:
try:
kwargs['page'] = int(kwargs['page'])
kwargs['page'] += 1
kwargs['page'] = str(kwargs['page'])
except TypeError:
raise TwythonError("searchGen() exited because page takes str")
except e:
raise TwythonError("searchGen() failed with %s error code" %\
repr(e.code), e.code)
for tweet in self.searchGen(search_query, **kwargs):
yield tweet
def isListMember(self, list_id, id, username, version = 1):
""" isListMember(self, list_id, id, version)
Check if a specified user (id) is a member of the list in question \
(list_id).
**Note: This method may not work for private/protected lists, \
unless you're authenticated and have access to those lists.
Parameters:
list_id - Required. The slug of the list to check against.
id - Required. The ID of the user being checked in the list.
username - User who owns the list you're checking against \
(username)
version (number) - Optional. API version to request.
Entire Twython class defaults to 1, but you can override on a
function-by-function or class basis - (version=2), etc.
"""
try:
resp, content = self.client.request("http://api.twitter.com/%d/%s/\
%s/members/%s.json" % (version, username, list_id, repr(id)))
return simplejson.loads(content)
except HTTPError as e:
raise TwythonError("isListMember() failed with a %d error code." % \
repr(e.code), e.code)
def isListSubscriber(self, username, list_id, id, version = 1):
""" isListSubscriber(self, list_id, id, version)
Check if a specified user (id) is a subscriber of the list in
question (list_id).
**Note: This method may not work for private/protected lists,
unless you're authenticated and have access to those lists.
Parameters:
list_id - Required. The slug of the list to check against.
id - Required. The ID of the user being checked in the list.
username - Required. The username of the owner of the list \
that you're seeing if someone is subscribed to.
version (number) - Optional. API version to request.\
Entire Twython class defaults to 1, but you can override on a \
function-by-function or class basis - (version=2), etc.
"""
try:
resp, content = "http://api.twitter.com/%d/%s/%s/following/%s.json"\
% (version, username, list_id, repr(id))
return simplejson.loads(content)
except HTTPError as e:
raise TwythonError("isListMember() failed with a %d error code." % \
repr(e.code) , e.code)
# The following methods are apart from the other Account methods, \
# because they rely on a whole multipart-data posting function set.
def updateProfileBackgroundImage(self, filename, tile="true", version = 1):
""" updateProfileBackgroundImage(filename, tile="true")
Updates the authenticating user's profile background image.
Parameters:
image - Required. Must be a valid GIF, JPG, or PNG image of \
less than 800 kilobytes in size. Images with width larger than \
2048 pixels will be forceably scaled down.
tile - Optional (defaults to true). If set to true the \
background image will be displayed tiled. The image will not \
be tiled otherwise.
** Note: It's sad, but when using this method, pass the tile \
value as a string, e.g tile="false"
version (number) - Optional. API version to request.
Entire Twython class defaults to 1, but you can override on a \
function-by-function or class basis - (version=2), etc.
"""
try:
files = [("image", filename, open(filename, 'rb').read())]
fields = []
content_type, body = Twython.encode_multipart_formdata(fields, \
files)
headers = {'Content-Type': content_type, 'Content-Length': \
str(len(body))}
r = urllib.request.Request("http://api.twitter.com/%d/account/\
update_profile_background_image.json?tile=%s" %\
(version, tile), body, headers)
return urllib.request.urlopen(r).read()
except HTTPError as e:
raise TwythonError("updateProfileBackgroundImage() \
failed with a %d error code." % repr(e.code), e.code)
def updateProfileImage(self, filename, version = 1):
""" updateProfileImage(filename)
Updates the authenticating user's profile image (avatar).
Parameters:
image - Required. Must be a valid GIF, JPG, or PNG image of \
less than 700 kilobytes in size. Images with width larger than \
500 pixels will be scaled down.
version (number) - Optional. API version to request. \
Entire Twython class defaults to 1, but you can override on a \
function-by-function or class basis - (version=2), etc.
"""
try:
files = [("image", filename, open(filename, 'rb').read())]
fields = []
content_type, body = Twython.encode_multipart_formdata(fields, \
files)
headers = {'Content-Type': content_type, 'Content-Length': \
str(len(body))}
r = urllib.request.Request("http://api.twitter.com/%d/account/\
update_profile_image.json" % version, body, headers)
return urllib.request.urlopen(r).read()
except HTTPError as e:
raise TwythonError("updateProfileImage() failed with a %d error \
code." % repr(e.code), e.code)
@staticmethod
def encode_multipart_formdata(fields, files):
BOUNDARY = _make_boundary()
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"'\
% (key, filename))
L.append('Content-Type: %s' % mimetypes.guess_type(filename)[0] or \
'application/octet-stream')
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
@staticmethod
def unicode2utf8(text):
try:
if isinstance(text, str):
text = text.encode('utf-8')
except:
pass
return text
| mit | -3,541,911,071,432,089,000 | 41.457086 | 80 | 0.546237 | false | 4.501799 | false | false | false |
gdos/serge | serialize.py | 2 | 4746 | """The serialization logic"""
import cPickle
import copy
class InvalidFile(Exception): """The file used to thaw an item was not a valid serialized file"""
class Bag(object):
"""Bag to hold properties"""
def __init__(self, **kw):
"""Initialise the bag"""
for name, value in kw.iteritems():
setattr(self, name, value)
# Types
class Int(int):
"""An int"""
class Float(float):
"""An float"""
class String(str):
"""A str"""
class List(list):
"""A list"""
class Dict(dict):
"""A dict"""
class Bool(int):
"""A boolean"""
class Obj(object):
"""An object"""
def initType(item, name, description=None):
"""Initialize the type"""
item.name = name
item.description = description if description else name
def I(name, value, description=None):
v = Int(value)
initType(v, name, description)
return v
def F(name, value, description=None):
v = Float(value)
initType(v, name, description)
return v
def S(name, value, description=None):
v = String(value)
initType(v, name, description)
return v
def L(name, value, description=None):
v = List(value)
initType(v, name, description)
return v
def D(name, value, description=None):
v = Dict(value)
initType(v, name, description)
return v
def B(name, value, description=None):
v = Bool(value)
initType(v, name, description)
return v
def O(name, value, description=None):
v = Obj()
initType(v, name, description)
return v
class Serializable(object):
"""A mixing class to help serialize and deserialize objects"""
# This is where you put the properties that your object has
# This should be a list of tuples
# name, default value, type, description
my_properties = ()
@classmethod
def createInstance(cls):
"""Return an instance of the class with all default properties set"""
instance = cls()
instance.__setstate__()
return instance
@classmethod
def _getProperties(cls):
"""Get the properties all the way up the inheritance tree"""
props = dict([(obj.name, obj) for obj in cls.my_properties])
for the_cls in cls.__bases__:
if issubclass(the_cls, Serializable):
for key, value in the_cls._getProperties():
if key not in props:
props[key] = value
return props.iteritems()
def __getstate__(self):
"""Return the live properties suitable for pickling"""
values = []
for name, _ in self.__class__._getProperties():
values.append((name, getattr(self, name)))
return values
def __setstate__(self, state=None):
"""Initialize the object to the given state for unpickling"""
self.initial_properties = Bag()
#
# Initialize first from the defaults and then from the live state
for this_state in (self.__class__._getProperties(), state):
if this_state:
for name, value in this_state:
setattr(self, name, value)
setattr(self.initial_properties, name, value)
def asString(self):
"""Return the properties of this object as a string"""
return cPickle.dumps(self, protocol=2)
def toFile(self, filename):
"""Store this object in a file"""
with file(filename, 'wb') as f:
f.write(self.asString())
@classmethod
def fromString(cls, text):
"""Return a new instance from a string"""
obj = cPickle.loads(text)
obj.init()
return obj
@classmethod
def fromFile(cls, filename):
"""Return a new instance from a file"""
with file(filename, 'rb') as f:
try:
return cls.fromString(f.read())
except Exception, err:
raise InvalidFile('Failed to load data from file "%s": %s' % (filename, err))
def init(self):
"""Implement this method to do any object initialization after unpickling"""
pass
def copy(self):
"""Return another copy of this item"""
return self.fromString(self.asString())
class SerializedBag(object):
"""A bag that can be serialized"""
def __init__(self, **kw):
"""Initialise the bag"""
for name, value in kw.iteritems():
setattr(self, name, value)
def init(self):
"""Initialise - here to meet the Serialized protocol"""
pass
def copy(self):
"""Return a copy"""
return copy.deepcopy(self)
| lgpl-3.0 | 3,953,179,096,236,491,000 | 25.813559 | 97 | 0.576485 | false | 4.20372 | false | false | false |
rcbops/quantum-buildpackage | quantum/plugins/openvswitch/ovs_db.py | 4 | 1694 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
from sqlalchemy.orm import exc
import quantum.db.api as db
import quantum.db.models as models
import ovs_models
def get_vlans():
session = db.get_session()
try:
bindings = session.query(ovs_models.VlanBinding).\
all()
except exc.NoResultFound:
return []
res = []
for x in bindings:
res.append((x.vlan_id, x.network_id))
return res
def add_vlan_binding(vlanid, netid):
session = db.get_session()
binding = ovs_models.VlanBinding(vlanid, netid)
session.add(binding)
session.flush()
return binding.vlan_id
def remove_vlan_binding(netid):
session = db.get_session()
try:
binding = session.query(ovs_models.VlanBinding).\
filter_by(network_id=netid).\
one()
session.delete(binding)
except exc.NoResultFound:
pass
session.flush()
| apache-2.0 | -2,876,906,124,706,180,000 | 28.206897 | 78 | 0.677096 | false | 3.619658 | false | false | false |
bijandhakal/pattern | examples/01-web/15-sort.py | 21 | 1208 | import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.web import GOOGLE, YAHOO, BING, sort
# The pattern.web module includes an interesting sort() algorithm.
# Ir classifies search terms according to a search engine's total results count.
# When a context is defined, it sorts according to relevancy to the context:
# sort(terms=["black", "green", "red"], context="Darth Vader") =>
# yields "black" as the best candidate,
# because "black Darth Vader" yields more search results.
results = sort(
terms = [
"arnold schwarzenegger",
"chuck norris",
"dolph lundgren",
"steven seagal",
"sylvester stallone",
"mickey mouse",
],
context = "dangerous", # Term used for sorting.
service = BING, # GOOGLE, YAHOO, BING, ...
license = None, # You should supply your own API license key for the given service.
strict = True, # Wraps the query in quotes, i.e. 'mac sweet'.
reverse = True, # Reverses term and context: 'sweet mac' instead of 'mac sweet'.
cached = True)
for weight, term in results:
print "%5.2f" % (weight * 100) + "%", term | bsd-3-clause | 4,632,375,123,827,753,000 | 40.689655 | 94 | 0.627483 | false | 3.461318 | false | false | false |
iuga/kronos | kronos/preprocessing/image.py | 1 | 5407 | from PIL import Image
import numpy as np
class Images(object):
RESIZE_METHODS = {
'bilinear': Image.BILINEAR,
'nearest': Image.NEAREST,
'lanczos': Image.LANCZOS,
'bicubic': Image.BICUBIC
}
NORMALIZATION_ZERO_ONE = 'zero_one'
NORMALIZATION_MINUS_PLUS_ONE = 'minus_plus_one'
NORMALIZATION_METHODS = [
NORMALIZATION_ZERO_ONE, NORMALIZATION_MINUS_PLUS_ONE
]
def load(self, filename):
"""
Load an image into PIL format
"""
self.img = Image.open(filename)
self.img = self.img.convert('RGB')
return self
def save(self, filename='/tmp/out.jpg'):
"""
Saves this image under the given filename. The format to use is determined from the filename extension.
"""
self.img.save(filename)
return self
def describe(self):
"""
Print some useful information for debugging
"""
print("Size: {}".format(self.img.size))
return self
def to_array(self, normalization=False, mean_normalized=False):
"""
Return a NumpyArray with (height, width, channel) format.
normalization: If False/None/Empty no normalization will be applied. Otherwise, the method should be passed.
normalization methods:
- zero_one: All the values will be normalized between [0, 1]
- minus_plus_one: All the values will be normalized between [-1, 1]
mean_normalized: Mean normalized perform a channel normalization. E.g: (123.68, 116.779, 103.939).
"""
# Validate the normalization method
if normalization and normalization not in self.NORMALIZATION_METHODS:
raise ValueError("Invalid Normalization method. Valid values: {}".format(self.NORMALIZATION_METHODS.keys()))
# Numpy array x has format (height, width, channel)
# but original PIL image has format (width, height, channel)
the_image_array = np.asarray(self.img, dtype='int16')
if normalization:
the_image_array = the_image_array.astype('float16')
if mean_normalized:
if len(mean_normalized) != 3:
raise ValueError("mean_normalized should have shape 3 for (r,g,b)")
the_image_array[:, :, 0] -= mean_normalized[0]
the_image_array[:, :, 1] -= mean_normalized[1]
the_image_array[:, :, 2] -= mean_normalized[2]
if normalization == self.NORMALIZATION_ZERO_ONE:
# All values are between 0 and 1
the_image_array /= 255.
if normalization == self.NORMALIZATION_MINUS_PLUS_ONE:
# All values are between -1 and 1
the_image_array /= 255.
the_image_array -= 0.5
the_image_array *= 2.
return the_image_array
def resize(self, width=224, height=224, method='bilinear'):
"""
Resize this image to the given size using the defined method.
"""
self.img = self.img.resize(size=(width, height), resample=self.RESIZE_METHODS.get(method, Image.BILINEAR))
return self
def central_crop(self, central_fraction=0.50):
"""
Crop the central region of the image.
Remove the outer parts of an image but retain the central region of the image along each dimension.
If we specify central_fraction = 0.5, this function returns the region marked with "X" in the below diagram.
--------
| |
| XXXX |
| XXXX |
| | where "X" is the central 50% of the image.
--------
"""
w, h = self.img.size
nw, nh = w * central_fraction, h * central_fraction
left = np.ceil((w - nw) / 2.)
top = np.ceil((h - nh) / 2.)
right = np.floor((w + nw) / 2)
bottom = np.floor((h + nh) / 2)
self.img = self.img.crop((left, top, right, bottom))
return self
def centered_crop(self, width, height):
"""
Crop the image to the new size keeping the content in the center.
Remove the outer parts of an image but retain the central region of the image along each dimension.
--------
| |
| XXXX |
| XXXX | where "X" has (width, height) size
| |
--------
"""
w, h = self.img.size
nw, nh = width, height
if width > w:
width = w
if height > h:
height = h
left = np.ceil((w - nw) / 2.)
top = np.ceil((h - nh) / 2.)
right = np.floor((w + nw) / 2)
bottom = np.floor((h + nh) / 2)
self.img = self.img.crop((left, top, right, bottom))
return self
def pad_to_square(self):
"""
Creates a padding in the shorter side with 0 (black) until the image is squared.
The image size will be (longer_side_size, longer_side_size, 3)
"""
longer_side = max(self.img.size)
horizontal_padding = (longer_side - self.img.size[0]) / 2
vertical_padding = (longer_side - self.img.size[1]) / 2
self.img = self.img.crop(
(
-horizontal_padding,
-vertical_padding,
self.img.size[0] + horizontal_padding,
self.img.size[1] + vertical_padding
)
)
return self
| mit | 3,594,548,231,729,691,000 | 33.006289 | 120 | 0.560015 | false | 3.946715 | false | false | false |
lvdongbing/bilean | bilean/engine/policy.py | 1 | 4369 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from bilean.common import exception
from bilean.common import utils
from bilean.db import api as db_api
from bilean.rules import base as rule_base
class Policy(object):
"""Policy object contains all policy operations"""
def __init__(self, name, **kwargs):
self.name = name
self.id = kwargs.get('id', None)
self.is_default = kwargs.get('is_default', False)
# rules schema like [{'id': 'xxx', 'type': 'os.nova.server'}]
self.rules = kwargs.get('rules', [])
self.metadata = kwargs.get('metadata', None)
self.created_at = kwargs.get('created_at', None)
self.updated_at = kwargs.get('updated_at', None)
self.deleted_at = kwargs.get('deleted_at', None)
def store(self, context):
"""Store the policy record into database table."""
values = {
'name': self.name,
'rules': self.rules,
'is_default': self.is_default,
'meta_data': self.metadata,
'created_at': self.created_at,
'updated_at': self.updated_at,
'deleted_at': self.deleted_at,
}
if self.id:
db_api.policy_update(context, self.id, values)
else:
policy = db_api.policy_create(context, values)
self.id = policy.id
return self.id
@classmethod
def _from_db_record(cls, record):
'''Construct a policy object from database record.
:param record: a DB policy object that contains all fields;
'''
kwargs = {
'id': record.id,
'rules': record.rules,
'is_default': record.is_default,
'metadata': record.meta_data,
'created_at': record.created_at,
'updated_at': record.updated_at,
'deleted_at': record.deleted_at,
}
return cls(record.name, **kwargs)
@classmethod
def load(cls, context, policy_id=None, policy=None, show_deleted=False):
'''Retrieve a policy from database.'''
if policy is None:
policy = db_api.policy_get(context, policy_id,
show_deleted=show_deleted)
if policy is None:
raise exception.PolicyNotFound(policy=policy_id)
return cls._from_db_record(policy)
@classmethod
def load_all(cls, context, limit=None, marker=None,
sort_keys=None, sort_dir=None,
filters=None, show_deleted=False):
'''Retrieve all policies of from database.'''
records = db_api.policy_get_all(context,
limit=limit, marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
filters=filters,
show_deleted=show_deleted)
return [cls._from_db_record(record) for record in records]
def find_rule(self, context, rtype):
'''Find the exact rule from self.rules by rtype'''
for rule in self.rules:
if rtype == rule['type']:
return rule_base.Rule.load(context, rule_id=rule['id'])
return None
def to_dict(self):
policy_dict = {
'id': self.id,
'name': self.name,
'rules': self.rules,
'is_default': self.is_default,
'metadata': self.metadata,
'created_at': utils.format_time(self.created_at),
'updated_at': utils.format_time(self.updated_at),
'deleted_at': utils.format_time(self.deleted_at),
}
return policy_dict
def do_delete(self, context):
db_api.policy_delete(context, self.id)
return True
| apache-2.0 | -2,198,763,193,367,899,400 | 33.952 | 78 | 0.563516 | false | 4.083178 | false | false | false |
google-research/google-research | eim/models/lars.py | 1 | 5992 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Learned Acccept/Reject Sampling (Bauer & Mnih, 2018)."""
from __future__ import absolute_import
import functools
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from eim.models import base
tfd = tfp.distributions
class LARS(object):
"""Learned Accept/Reject Sampling model."""
def __init__(self,
K,
T,
data_dim,
accept_fn_layers,
proposal=None,
data_mean=None,
ema_decay=0.99,
dtype=tf.float32,
is_eval=False):
self.k = K
self.T = T # pylint: disable=invalid-name
self.data_dim = data_dim
self.ema_decay = ema_decay
self.dtype = dtype
if data_mean is not None:
self.data_mean = data_mean
else:
self.data_mean = tf.zeros((), dtype=dtype)
self.accept_fn = functools.partial(
base.mlp,
layer_sizes=accept_fn_layers + [1],
final_activation=tf.math.log_sigmoid,
name="a")
if proposal is None:
self.proposal = base.get_independent_normal(data_dim)
else:
self.proposal = proposal
self.is_eval = is_eval
if is_eval:
self.Z_estimate = tf.placeholder(tf.float32, shape=[]) # pylint: disable=invalid-name
with tf.variable_scope("LARS_Z_ema", reuse=tf.AUTO_REUSE):
self.Z_ema = tf.get_variable( # pylint: disable=invalid-name
name="LARS_Z_ema",
shape=[],
dtype=dtype,
initializer=tf.constant_initializer(0.5),
trainable=False)
def log_prob(self, data, log_q_data=None, num_samples=1):
"""Compute log likelihood estimate."""
# Compute log a(z), log pi(z), and log q(z)
log_a_z_r = tf.squeeze(self.accept_fn(data - self.data_mean),
axis=-1) # [batch_size]
# [batch_size]
try:
# Try giving the proposal lower bound num_samples if it can use it.
log_pi_z_r = self.proposal.log_prob(data, num_samples=num_samples)
except TypeError:
log_pi_z_r = self.proposal.log_prob(data)
tf.summary.histogram("log_energy_data", log_a_z_r)
if not self.is_eval:
# Sample zs from proposal to estimate Z
z_s = self.proposal.sample(self.k) # [K, data_dim]
# Compute log a(z) for zs sampled from proposal
log_a_z_s = tf.squeeze(self.accept_fn(z_s - self.data_mean),
axis=-1) # [K]
tf.summary.histogram("log_energy_proposal", log_a_z_s)
# pylint: disable=invalid-name
log_ZS = tf.reduce_logsumexp(log_a_z_s) # []
log_Z_curr_avg = log_ZS - tf.log(tf.to_float(self.k))
if log_q_data is not None:
# This may only be valid when log pi is exact (i.e., not a lower bound).
Z_curr_avg = (1. / (self.k + 1.)) * (
tf.exp(log_ZS) +
tf.exp(log_a_z_r + tf.stop_gradient(log_pi_z_r - log_q_data)))
else:
Z_curr_avg = tf.exp(log_Z_curr_avg)
self.Z_smooth = (
self.ema_decay * self.Z_ema + (1 - self.ema_decay) * Z_curr_avg)
# In forward pass, log Z is log of the smoothed ema version of Z
# In backward pass it is the current estimate of log Z, log_Z_curr_avg
Z = Z_curr_avg + tf.stop_gradient(self.Z_smooth - Z_curr_avg)
tf.summary.scalar("Z", tf.reduce_mean(Z))
else:
Z = self.Z_estimate # pylint: disable=invalid-name
# pylint: enable=invalid-name
alpha = tf.pow(1. - Z, self.T - 1)
log_prob = log_pi_z_r + tf.log(tf.exp(log_a_z_r) * (1. - alpha) / Z + alpha)
return log_prob
def post_train_op(self):
# Set up EMA of Z (EMA is updated after gradient step).
return tf.assign(self.Z_ema, tf.reduce_mean(self.Z_smooth))
def compute_Z(self, num_samples): # pylint: disable=invalid-name
r"""Returns log(\sum_i a(z_i) / num_samples)."""
z_s = self.proposal.sample(num_samples) # [num_samples, data_dim]
# Compute log a(z) for zs sampled from proposal
log_a_z_s = tf.squeeze(self.accept_fn(z_s - self.data_mean),
axis=-1) # [num_samples]
log_Z = tf.reduce_logsumexp(log_a_z_s) - tf.log( # pylint: disable=invalid-name
tf.to_float(num_samples)) # []
return log_Z
def sample(self, num_samples=1):
"""Sample from the model."""
def while_body(t, z, accept):
"""Truncated rejection sampling."""
new_z = self.proposal.sample(num_samples)
accept_prob = tf.squeeze(tf.exp(self.accept_fn(new_z - self.data_mean)),
axis=-1)
new_accept = tf.math.less_equal(
tf.random_uniform(shape=[num_samples], minval=0., maxval=1.),
accept_prob)
force_accept = tf.math.greater_equal(
tf.to_float(t),
tf.to_float(self.T) - 1.)
new_accept = tf.math.logical_or(new_accept, force_accept)
accepted = tf.logical_or(accept, new_accept)
swap = tf.math.logical_and(tf.math.logical_not(accept), new_accept)
z = tf.where(swap, new_z, z)
return t + 1, z, accepted
def while_cond(unused_t, unused_z, accept):
return tf.reduce_any(tf.logical_not(accept))
shape = [num_samples] + self.data_dim
z0 = tf.zeros(shape, dtype=self.dtype)
accept0 = tf.constant(False, shape=[num_samples])
_, zs, _ = tf.while_loop(while_cond, while_body, loop_vars=(0, z0, accept0))
return zs
| apache-2.0 | -7,277,091,270,182,323,000 | 36.685535 | 92 | 0.609312 | false | 3.17037 | false | false | false |
rrader/sens-o-matic | sensomatic/ui/server.py | 1 | 1143 | import asyncio
import logging
from aiohttp import web
from sensomatic.sources import defaults
from sensomatic.ui.data_stream import DataStreamHandler
from sensomatic.ui.static_utils import static_handler
class Server:
def __init__(self):
self.app = self.create_app()
self.handler = self.app.make_handler()
self.srv = self.create_http_server()
def create_app(self):
app = web.Application()
app.router.add_route('GET', '/updates', DataStreamHandler)
app.router.add_route('GET', '/ui/{path:.*}', static_handler)
return app
def create_http_server(self):
loop = asyncio.get_event_loop()
srv = loop.run_until_complete(
loop.create_server(self.handler, '0.0.0.0', 8081)
)
logging.info('will be serving on {}'.format(srv.sockets[0].getsockname()))
return srv
def finalize(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.handler.finish_connections(1.0))
self.srv.close()
loop.run_until_complete(self.srv.wait_closed())
loop.run_until_complete(self.app.finish())
| mit | -3,038,707,784,990,555,600 | 31.657143 | 82 | 0.64392 | false | 3.617089 | false | false | false |
pdasigi/onto-lstm | encoders.py | 1 | 7096 | import sys
import numpy
from overrides import overrides
from keras.layers import Embedding, Dropout, LSTM, Bidirectional
from onto_attention import OntoAttentionLSTM
from embedding import OntoAwareEmbedding
class Encoder(object):
'''
Encoder is an abstract class that defines a get_encoded_phrase method.
'''
def __init__(self, data_processor=None, embed_dim=50, bidirectional=False, tune_embedding=False,
return_sequences=True, **kwargs):
self.embed_dim = embed_dim
self.data_processor = data_processor
self.bidirectional = bidirectional
self.tune_embedding = tune_embedding
self.return_sequences = return_sequences
self.embedding_layer = None
self.encoder_layer = None
def get_encoded_phrase(self, phrase_input_layer, dropout={}, embedding=None):
'''
Takes a Keras input layer, dropout and returns the output of the encoder as a Keras object.
Arguments:
phrase_input_layer (Input): Keras Input layer of the appropriate shape.
dropout (dict [str -> float]): Dict containing dropout values applied after
`embedding` and `encoder`.
embedding_file (str): Optional gzipped embedding file to use as initialization
for embedding layer.
'''
embedding_layer = self._get_embedding_layer(embedding)
embedded_phrase = embedding_layer(phrase_input_layer)
embedding_dropout = dropout.pop("embedding", 0.0)
if embedding_dropout > 0:
embedded_phrase = Dropout(embedding_dropout)(embedded_phrase)
encoder = self._get_encoder_layer()
encoded_phrase = encoder(embedded_phrase)
encoder_dropout = dropout.pop("encoder", 0.0)
if encoder_dropout > 0:
encoded_phrase = Dropout(encoder_dropout)(encoded_phrase)
return encoded_phrase
def _get_embedding_layer(self, embedding_file=None):
'''
Checks if an embedding layer is defined. If so, returns it. Or else, makes one.
'''
raise NotImplementedError
def _get_encoder_layer(self):
'''
Checks if an encoder layer is defined. If so, returns it. Or else, makes one.
'''
raise NotImplementedError
@staticmethod
def get_custom_objects():
return {}
class LSTMEncoder(Encoder):
@overrides
def _get_embedding_layer(self, embedding_file=None):
if self.embedding_layer is None:
if embedding_file is None:
if not self.tune_embedding:
print >>sys.stderr, "Pretrained embedding is not given. Setting tune_embedding to True."
self.tune_embedding = True
embedding = None
else:
# Put the embedding in a list for Keras to treat it as initiali weights of the embedding
# layer.
embedding = [self.data_processor.get_embedding_matrix(embedding_file, onto_aware=False)]
vocab_size = self.data_processor.get_vocab_size(onto_aware=False)
self.embedding_layer = Embedding(input_dim=vocab_size, output_dim=self.embed_dim,
weights=embedding, trainable=self.tune_embedding,
mask_zero=True, name="embedding")
return self.embedding_layer
@overrides
def _get_encoder_layer(self):
if self.encoder_layer is None:
self.encoder_layer = LSTM(input_dim=self.embed_dim, output_dim=self.embed_dim,
return_sequences=self.return_sequences, name="encoder")
if self.bidirectional:
self.encoder_layer = Bidirectional(self.encoder_layer, name="encoder")
return self.encoder_layer
class OntoLSTMEncoder(Encoder):
def __init__(self, num_senses, num_hyps, use_attention, set_sense_priors, **kwargs):
self.num_senses = num_senses
self.num_hyps = num_hyps
self.use_attention = use_attention
self.set_sense_priors = set_sense_priors
super(OntoLSTMEncoder, self).__init__(**kwargs)
@overrides
def _get_embedding_layer(self, embedding_file=None):
if self.embedding_layer is None:
word_vocab_size = self.data_processor.get_vocab_size(onto_aware=False)
synset_vocab_size = self.data_processor.get_vocab_size(onto_aware=True)
if embedding_file is None:
if not self.tune_embedding:
print >>sys.stderr, "Pretrained embedding is not given. Setting tune_embedding to True."
self.tune_embedding = True
embedding_weights = None
else:
# TODO: Other sources for prior initialization
embedding = self.data_processor.get_embedding_matrix(embedding_file, onto_aware=True)
# Put the embedding in a list for Keras to treat it as weights of the embedding layer.
embedding_weights = [embedding]
if self.set_sense_priors:
initial_sense_prior_parameters = numpy.random.uniform(low=0.01, high=0.99,
size=(word_vocab_size, 1))
# While setting weights, Keras wants trainable weights first, and then the non trainable
# weights. If we are not tuning the embedding, we need to keep the sense priors first.
if not self.tune_embedding:
embedding_weights = [initial_sense_prior_parameters] + embedding_weights
else:
embedding_weights.append(initial_sense_prior_parameters)
self.embedding_layer = OntoAwareEmbedding(word_vocab_size, synset_vocab_size, self.embed_dim,
weights=embedding_weights, mask_zero=True,
set_sense_priors=self.set_sense_priors,
tune_embedding=self.tune_embedding,
name="embedding")
return self.embedding_layer
@overrides
def _get_encoder_layer(self):
if self.encoder_layer is None:
self.encoder_layer = OntoAttentionLSTM(input_dim=self.embed_dim, output_dim=self.embed_dim,
num_senses=self.num_senses, num_hyps=self.num_hyps,
use_attention=self.use_attention, consume_less="gpu",
return_sequences=self.return_sequences, name="onto_lstm")
if self.bidirectional:
self.encoder_layer = Bidirectional(self.encoder_layer, name="onto_lstm")
return self.encoder_layer
@staticmethod
def get_custom_objects():
return {"OntoAttentionLSTM": OntoAttentionLSTM,
"OntoAwareEmbedding": OntoAwareEmbedding}
| apache-2.0 | -5,314,260,022,083,649,000 | 47.272109 | 108 | 0.592024 | false | 4.499683 | false | false | false |
bgris/ODL_bgris | lib/python3.5/site-packages/spyder/plugins/maininterpreter.py | 1 | 11069 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Shortcut management"""
# Standard library imports
from __future__ import print_function
import os
import os.path as osp
import sys
# Third party imports
from qtpy.QtWidgets import (QButtonGroup, QGroupBox, QInputDialog, QLabel,
QLineEdit, QMessageBox, QPushButton, QVBoxLayout)
# Local imports
from spyder.config.base import _
from spyder.plugins.configdialog import GeneralConfigPage
from spyder.py3compat import PY2, is_text_string, to_text_string
from spyder.utils import icon_manager as ima
from spyder.utils.misc import get_python_executable
from spyder.utils import programs
class MainInterpreterConfigPage(GeneralConfigPage):
CONF_SECTION = "main_interpreter"
NAME = _("Python interpreter")
ICON = ima.icon('python')
def __init__(self, parent, main):
GeneralConfigPage.__init__(self, parent, main)
self.cus_exec_radio = None
self.pyexec_edit = None
# Python executable selection (initializing default values as well)
executable = self.get_option('executable', get_python_executable())
if self.get_option('default'):
executable = get_python_executable()
if not osp.isfile(executable):
# This is absolutely necessary, in case the Python interpreter
# executable has been moved since last Spyder execution (following
# a Python distribution upgrade for example)
self.set_option('executable', get_python_executable())
elif executable.endswith('pythonw.exe'):
# That should not be necessary because this case is already taken
# care of by the `get_python_executable` function but, this was
# implemented too late, so we have to fix it here too, in case
# the Python executable has already been set with pythonw.exe:
self.set_option('executable',
executable.replace("pythonw.exe", "python.exe"))
def initialize(self):
GeneralConfigPage.initialize(self)
self.pyexec_edit.textChanged.connect(self.python_executable_changed)
self.cus_exec_radio.toggled.connect(self.python_executable_switched)
def setup_page(self):
newcb = self.create_checkbox
# Python executable Group
pyexec_group = QGroupBox(_("Python interpreter"))
pyexec_bg = QButtonGroup(pyexec_group)
pyexec_label = QLabel(_("Select the Python interpreter for all Spyder "
"consoles"))
def_exec_radio = self.create_radiobutton(
_("Default (i.e. the same as Spyder's)"),
'default', button_group=pyexec_bg)
self.cus_exec_radio = self.create_radiobutton(
_("Use the following Python interpreter:"),
'custom', button_group=pyexec_bg)
if os.name == 'nt':
filters = _("Executables")+" (*.exe)"
else:
filters = None
pyexec_file = self.create_browsefile('', 'executable', filters=filters)
for le in self.lineedits:
if self.lineedits[le][0] == 'executable':
self.pyexec_edit = le
def_exec_radio.toggled.connect(pyexec_file.setDisabled)
self.cus_exec_radio.toggled.connect(pyexec_file.setEnabled)
pyexec_layout = QVBoxLayout()
pyexec_layout.addWidget(pyexec_label)
pyexec_layout.addWidget(def_exec_radio)
pyexec_layout.addWidget(self.cus_exec_radio)
pyexec_layout.addWidget(pyexec_file)
pyexec_group.setLayout(pyexec_layout)
# UMR Group
umr_group = QGroupBox(_("User Module Reloader (UMR)"))
umr_label = QLabel(_("UMR forces Python to reload modules which were "
"imported when executing a file in a Python or "
"IPython console with the <i>runfile</i> "
"function."))
umr_label.setWordWrap(True)
umr_enabled_box = newcb(_("Enable UMR"), 'umr/enabled',
msg_if_enabled=True, msg_warning=_(
"This option will enable the User Module Reloader (UMR) "
"in Python/IPython consoles. UMR forces Python to "
"reload deeply modules during import when running a "
"Python script using the Spyder's builtin function "
"<b>runfile</b>."
"<br><br><b>1.</b> UMR may require to restart the "
"console in which it will be called "
"(otherwise only newly imported modules will be "
"reloaded when executing files)."
"<br><br><b>2.</b> If errors occur when re-running a "
"PyQt-based program, please check that the Qt objects "
"are properly destroyed (e.g. you may have to use the "
"attribute <b>Qt.WA_DeleteOnClose</b> on your main "
"window, using the <b>setAttribute</b> method)"),
)
umr_verbose_box = newcb(_("Show reloaded modules list"),
'umr/verbose', msg_info=_(
"Please note that these changes will "
"be applied only to new consoles"))
umr_namelist_btn = QPushButton(
_("Set UMR excluded (not reloaded) modules"))
umr_namelist_btn.clicked.connect(self.set_umr_namelist)
umr_layout = QVBoxLayout()
umr_layout.addWidget(umr_label)
umr_layout.addWidget(umr_enabled_box)
umr_layout.addWidget(umr_verbose_box)
umr_layout.addWidget(umr_namelist_btn)
umr_group.setLayout(umr_layout)
vlayout = QVBoxLayout()
vlayout.addWidget(pyexec_group)
vlayout.addWidget(umr_group)
vlayout.addStretch(1)
self.setLayout(vlayout)
def python_executable_changed(self, pyexec):
"""Custom Python executable value has been changed"""
if not self.cus_exec_radio.isChecked():
return
if not is_text_string(pyexec):
pyexec = to_text_string(pyexec.toUtf8(), 'utf-8')
if programs.is_python_interpreter(pyexec):
self.warn_python_compatibility(pyexec)
else:
QMessageBox.warning(self, _('Warning'),
_("You selected an invalid Python interpreter for the "
"console so the previous interpreter will stay. Please "
"make sure to select a valid one."), QMessageBox.Ok)
self.pyexec_edit.setText(get_python_executable())
return
def python_executable_switched(self, custom):
"""Python executable default/custom radio button has been toggled"""
def_pyexec = get_python_executable()
cust_pyexec = self.pyexec_edit.text()
if not is_text_string(cust_pyexec):
cust_pyexec = to_text_string(cust_pyexec.toUtf8(), 'utf-8')
if def_pyexec != cust_pyexec:
if custom:
self.warn_python_compatibility(cust_pyexec)
def warn_python_compatibility(self, pyexec):
if not osp.isfile(pyexec):
return
spyder_version = sys.version_info[0]
try:
args = ["-c", "import sys; print(sys.version_info[0])"]
proc = programs.run_program(pyexec, args)
console_version = int(proc.communicate()[0])
except IOError:
console_version = spyder_version
if spyder_version != console_version:
QMessageBox.warning(self, _('Warning'),
_("You selected a <b>Python %d</b> interpreter for the console "
"but Spyder is running on <b>Python %d</b>!.<br><br>"
"Although this is possible, we recommend you to install and "
"run Spyder directly with your selected interpreter, to avoid "
"seeing false warnings and errors due to the incompatible "
"syntax between these two Python versions."
) % (console_version, spyder_version), QMessageBox.Ok)
def set_umr_namelist(self):
"""Set UMR excluded modules name list"""
arguments, valid = QInputDialog.getText(self, _('UMR'),
_("Set the list of excluded modules as "
"this: <i>numpy, scipy</i>"),
QLineEdit.Normal,
", ".join(self.get_option('umr/namelist')))
if valid:
arguments = to_text_string(arguments)
if arguments:
namelist = arguments.replace(' ', '').split(',')
fixed_namelist = []
non_ascii_namelist = []
for module_name in namelist:
if PY2:
if all(ord(c) < 128 for c in module_name):
if programs.is_module_installed(module_name):
fixed_namelist.append(module_name)
else:
QMessageBox.warning(self, _('Warning'),
_("You are working with Python 2, this means that "
"you can not import a module that contains non-"
"ascii characters."), QMessageBox.Ok)
non_ascii_namelist.append(module_name)
elif programs.is_module_installed(module_name):
fixed_namelist.append(module_name)
invalid = ", ".join(set(namelist)-set(fixed_namelist)-
set(non_ascii_namelist))
if invalid:
QMessageBox.warning(self, _('UMR'),
_("The following modules are not "
"installed on your machine:\n%s"
) % invalid, QMessageBox.Ok)
QMessageBox.information(self, _('UMR'),
_("Please note that these changes will "
"be applied only to new Python/IPython "
"consoles"), QMessageBox.Ok)
else:
fixed_namelist = []
self.set_option('umr/namelist', fixed_namelist)
def apply_settings(self, options):
self.main.apply_settings()
| gpl-3.0 | -9,007,455,793,979,388,000 | 46.973451 | 81 | 0.541652 | false | 4.491883 | true | false | false |
amschaal/bioshare | bioshareX/utils.py | 1 | 14503 | from django.shortcuts import redirect, render
from django.core.urlresolvers import reverse
import os
import json
from functools import wraps
from django.http.response import JsonResponse
from django.conf import settings
from django.template import Context, Template
from rest_framework import status
from django.db.models.query_utils import Q
import subprocess
from scandir import scandir
import re
import datetime
from bioshareX.file_utils import istext
from os import path
class JSONDecorator(object):
def __init__(self, orig_func):
self.orig_func = orig_func
def __call__(self, *args, **kwargs):
import json
json_arg = args[0].POST.get('json',args[0].GET.get('json',None))
if json_arg is not None:
kwargs['json'] = json.loads(json_arg)
elif hasattr(args[0], 'data'):
kwargs['json'] = args[0].data
return self.orig_func(*args, **kwargs)
def share_access_decorator_old(perms,share_param='share'):
def wrap(f):
def wrapped_f(*args,**kwargs):
from bioshareX.models import Share
share = Share.objects.get(id=kwargs[share_param])
kwargs[share_param]=share
f(*args,**kwargs)
return wrapped_f
return wrap
def ajax_login_required(view_func):
@wraps(view_func)
def wrapper(request, *args, **kwargs):
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
return JsonResponse({'status':'error','unauthenticated':True,'errors':['You do not have access to this resource.']},status=status.HTTP_401_UNAUTHORIZED)
return wrapper
class share_access_decorator(object):
def __init__(self, perms,share_param='share'):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.perms = perms
self.share_param = share_param
def __call__(self, f):
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
def wrapped_f(*args,**kwargs):
from bioshareX.models import Share
try:
share = Share.get_by_slug_or_id(kwargs[self.share_param])
except Share.DoesNotExist:
return render(args[0],'errors/message.html', {'message':'No share with that ID exists.'},status=500)
kwargs[self.share_param]=share
request = args[0]
user_permissions = share.get_user_permissions(request.user)
for perm in self.perms:
if not share.secure and perm in ['view_share_files','download_share_files']:
continue
if not perm in user_permissions:
if request.is_ajax():
if not request.user.is_authenticated():
return JsonResponse({'status':'error','unauthenticated':True,'errors':['You do not have access to this resource.']},status=status.HTTP_401_UNAUTHORIZED)
return json_error({'status':'error','unauthenticated':True,'errors':['You do not have access to this resource.']})
else:
return json_error(['You do not have access to this resource.'])
else:
if not request.user.is_authenticated():
url = reverse('login') + '?next=%s' % request.get_full_path()
return redirect(url)
return redirect('forbidden')
return f(*args,**kwargs)
return wrapped_f
class safe_path_decorator(object):
def __init__(self, share_param='share',path_param='subpath'):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.share_param = share_param
self.path_param = path_param
def __call__(self, f):
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
def wrapped_f(*args,**kwargs):
from bioshareX.models import Share
share = kwargs.get(self.share_param,None)
if share:
if not isinstance(kwargs[self.share_param], Share):
try:
share = Share.get_by_slug_or_id(share)
except Share.DoesNotExist:
return render(args[0],'errors/message.html', {'message':'No share with that ID exists.'},status=500)
if not paths_contain(settings.DIRECTORY_WHITELIST,share.get_realpath()):
raise Exception('Share has an invalid root path: %s'%share.get_realpath())
path = kwargs.get(self.path_param,None)
if path is not None:
test_path(path)
if share:
full_path = os.path.join(share.get_path(),path)
if not paths_contain(settings.DIRECTORY_WHITELIST,full_path):
raise Exception('Illegal path encountered, %s, %s'%(share.get_path(),path))
return f(*args,**kwargs)
return wrapped_f
class safe_path_decorator_old(object):
def __init__(self, path_param='subpath'):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.path_param = path_param
def __call__(self, f):
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
def wrapped_f(*args,**kwargs):
path = kwargs[self.path_param]
if path is not None:
test_path(path)
return f(*args,**kwargs)
return wrapped_f
def get_setting(key, default=None):
return getattr(settings, key, default)
def test_path(path,allow_absolute=False,share=None):
illegals = ['..','*']
for illegal in illegals:
if illegal in path:
raise Exception('Illegal path encountered')
if path.startswith('/') and not allow_absolute:
raise Exception('Subpath may not start with slash')
if path.startswith('~') and not allow_absolute:
raise Exception('Subpath may not start with a "~"')
if share:
full_path = os.path.join(share.get_path(),path)
if not paths_contain(settings.DIRECTORY_WHITELIST,full_path):
raise Exception('Illegal path encountered, %s, %s'%(share.get_path(),path))
def path_contains(parent_path,child_path,real_path=True):
if real_path:
return os.path.join(os.path.realpath(child_path),'').startswith(os.path.join(os.path.realpath(parent_path),''))
else:
return os.path.join(child_path,'').startswith(os.path.join(parent_path,''))
def paths_contain(paths,child_path, get_path=False):
for path in paths:
if path_contains(path, child_path):
return path if get_path else True
return False
def json_response(dict):
from django.http.response import HttpResponse
import json
return HttpResponse(json.dumps(dict), content_type='application/json')
def json_error(messages,http_status=None):
http_status = http_status or status.HTTP_400_BAD_REQUEST
return JsonResponse({'status':'error','errors':messages},status=http_status)
# return json_response({'status':'error','errors':messages})
def dictfetchall(sql,args=[]):
from django.db import connection
cursor = connection.cursor()
cursor.execute(sql, args)
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def fetchall(sql,args=[]):
from django.db import connection
cursor = connection.cursor()
cursor.execute(sql, args)
return cursor.fetchall()
def find_python(pattern, path):
import fnmatch
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def find_in_shares(shares, pattern):
import subprocess
paths = [share.get_path() for share in shares]
output = subprocess.check_output(['find']+paths+['-name',pattern])
return output.split('\n')
def find(share, pattern, subdir=None,prepend_share_id=True):
import subprocess, os
path = share.get_path() if subdir is None else os.path.join(share.get_path(),subdir)
base_path = os.path.realpath(path)
output = subprocess.Popen(['find',base_path,'-name',pattern], stdout=subprocess.PIPE).communicate()[0]
# output = subprocess.check_output(['find',path,'-name',pattern])
paths = output.split('\n')
# return paths
results=[]
for path in paths:
result = path.split(base_path)
if len(result) == 2:
# print os.path.join(share.id,result[1])
if prepend_share_id:
results.append('/'+share.id+result[1])
else:
results.append(result[1][1:])
return results
def validate_email( email ):
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
try:
validate_email( email )
return True
except ValidationError:
return False
def email_users(users, subject_template=None, body_template=None, ctx_dict={},subject=None,body=None, from_email=settings.DEFAULT_FROM_EMAIL,content_subtype = "html"):
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
if subject:
t = Template(subject)
subject = t.render(Context(ctx_dict))
else:
subject = render_to_string(subject_template,ctx_dict)
subject = ''.join(subject.splitlines())
if body:
t = Template(body)
message = t.render(Context(ctx_dict))
else:
message = render_to_string(body_template, ctx_dict)
msg = EmailMessage(subject, message, from_email, [u.email for u in users])
msg.content_subtype = content_subtype # Main content is now text/html
msg.send(fail_silently=False)
#
# def get_file_info(path):
# from os.path import basename
# from os import stat
# import datetime
# (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = stat(path)
# return {'name':file.name,'size':size, 'modified':datetime.datetime.fromtimestamp(mtime).strftime("%b %d, %Y %H:%M")}
def sizeof_fmt(num):
num /= 1024.0 #function takes bytes, convert to KB
for x in ['KB','MB','GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def zipdir(base, path, zip):
from os.path import relpath
for root, dirs, files in os.walk(path):
for file in files:
file_path = os.path.join(root, file)
rel_path = relpath(path=file_path, start=base)
zip.write(file_path,arcname=rel_path)
def get_size(path):
total_size = 0
if os.path.isfile(path):
return os.path.getsize(path)
elif os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def get_share_stats(share):
path = os.path.abspath(share.get_path())
total_size = 0
if not share.parent: # don't count subshares
ZFS_PATH = share.get_zfs_path()
if ZFS_PATH:
ZFS_PATH = share.get_path()
total_size = subprocess.check_output(['zfs', 'get', '-H', '-o', 'value', '-p', 'used', ZFS_PATH])
else:
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return {'size':int(total_size)}
def get_total_size(paths=[]):
total_size = 0
for path in paths:
total_size += get_size(path)
return total_size
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
return subprocess.check_output(['du','-shL', path]).split()[0].decode('utf-8')
def list_share_dir(share,subdir=None,ajax=False):
from bioshareX.models import MetaData
PATH = share.get_path()
if subdir is not None:
PATH = os.path.join(PATH,subdir)
file_list=[]
directories={}
regex = r'^%s[^/]+/?' % '' if subdir is None else re.escape(os.path.normpath(subdir))+'/'
metadatas = {}
for md in MetaData.objects.prefetch_related('tags').filter(share=share,subpath__regex=regex):
metadatas[md.subpath]= md if not ajax else md.json()
for entry in scandir(PATH):
subpath= entry.name if subdir is None else os.path.join(subdir,entry.name)
metadata = metadatas[subpath] if metadatas.has_key(subpath) else {}
if entry.is_file():
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = entry.stat()
file={'name':entry.name,'extension':entry.name.split('.').pop() if '.' in entry.name else None,'size':sizeof_fmt(size),'bytes':size,'modified':datetime.datetime.fromtimestamp(mtime).strftime("%m/%d/%Y %H:%M"),'metadata':metadata,'isText':True}
file_list.append(file)
else:
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = entry.stat()
dir={'name':entry.name,'size':None,'metadata':metadata,'modified':datetime.datetime.fromtimestamp(mtime).strftime("%m/%d/%Y %H:%M")}
directories[os.path.realpath(entry.path)]=dir
return (file_list,directories)
def md5sum(path):
output = subprocess.check_output([settings.MD5SUM_COMMAND,path]) #Much more efficient than reading file contents into python and using hashlib
#IE: output = 4968966191e485885a0ed8854c591720 /tmp/Project/Undetermined_S0_L002_R2_001.fastq.gz
return re.findall(r'([0-9a-fA-F]{32})',output)[0]
| mit | -4,075,465,211,365,027,300 | 40.201705 | 255 | 0.60884 | false | 3.866436 | false | false | false |
aio-libs/aiodocker | tests/conftest.py | 1 | 5056 | import asyncio
import os
import sys
import traceback
import uuid
from distutils.version import StrictVersion
from typing import Any, Dict
import pytest
from aiodocker.docker import Docker
from aiodocker.exceptions import DockerError
API_VERSIONS = {
"17.06": "v1.30",
"17.09": "v1.32",
"17.12": "v1.35",
"18.02": "v1.36",
"18.03": "v1.37",
"18.06": "v1.38",
"18.09": "v1.39",
}
if sys.platform == "win32":
if sys.version_info < (3, 7):
# Python 3.6 has no WindowsProactorEventLoopPolicy class
from asyncio import events
class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
_loop_factory = asyncio.ProactorEventLoop
else:
WindowsProactorEventLoopPolicy = asyncio.WindowsProactorEventLoopPolicy
@pytest.fixture
def event_loop(request):
"""Create an instance of the default event loop for each test case."""
if sys.platform == "win32":
asyncio.set_event_loop_policy(WindowsProactorEventLoopPolicy())
loop = asyncio.new_event_loop()
yield loop
loop.close()
def _random_name():
return "aiodocker-" + uuid.uuid4().hex[:7]
@pytest.fixture(scope="session")
def random_name():
yield _random_name
# If some test cases have used randomly-named temporary images,
# we need to clean up them!
if os.environ.get("CI", "") == "true":
# But inside the CI server, we don't need clean up!
return
event_loop = asyncio.get_event_loop()
async def _clean():
docker = Docker()
images = await docker.images.list()
for img in images:
if img["RepoTags"] is None:
continue
try:
if img["RepoTags"][0].startswith("aiodocker-"):
print("Deleting image id: {0}".format(img["Id"]))
await docker.images.delete(img["Id"], force=True)
except DockerError:
traceback.print_exc()
await docker.close()
event_loop.run_until_complete(_clean())
@pytest.fixture(scope="session")
def image_name() -> str:
if sys.platform == "win32":
return "python:latest"
else:
return "python:alpine"
@pytest.fixture(scope="session")
def testing_images(image_name: str) -> None:
# Prepare a small Linux image shared by most test cases.
event_loop = asyncio.get_event_loop()
async def _pull():
docker = Docker()
required_images = [image_name]
if image_name != "python:latest":
required_images.append("python:latest")
for img in required_images:
try:
await docker.images.inspect(img)
except DockerError as e:
assert e.status == 404
print('Pulling "{img}" for the testing session...'.format(img=img))
await docker.pull(img)
await docker.close()
event_loop.run_until_complete(_pull())
@pytest.fixture
def docker(event_loop, testing_images):
kwargs = {}
version = os.environ.get("DOCKER_VERSION")
if version:
for k, v in API_VERSIONS.items():
if version.startswith(k):
kwargs["api_version"] = v
break
else:
raise RuntimeError(f"Cannot find docker API version for {version}")
async def _make_docker():
return Docker(**kwargs)
docker = event_loop.run_until_complete(_make_docker())
yield docker
async def _finalize():
await docker.close()
event_loop.run_until_complete(_finalize())
@pytest.fixture
async def requires_api_version(docker):
# Update version info from auto to the real value
await docker.version()
def check(version, reason):
if StrictVersion(docker.api_version[1:]) < StrictVersion(version[1:]):
pytest.skip(reason)
yield check
@pytest.fixture
def swarm(event_loop, docker):
if sys.platform == "win32":
pytest.skip("swarm commands dont work on Windows")
assert event_loop.run_until_complete(docker.swarm.init())
yield docker
assert event_loop.run_until_complete(docker.swarm.leave(force=True))
@pytest.fixture
def make_container(event_loop, docker):
container = None
async def _spawn(config: Dict[str, Any], name=None):
nonlocal container
container = await docker.containers.create_or_replace(config=config, name=name)
await container.start()
return container
yield _spawn
async def _delete():
nonlocal container
if container is not None:
await container.delete(force=True)
event_loop.run_until_complete(_delete())
@pytest.fixture
async def shell_container(event_loop, docker, make_container, image_name):
config = {
"Cmd": ["python"],
"Image": image_name,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"Tty": True,
"OpenStdin": True,
}
return await make_container(config, name="aiodocker-testing-shell")
| apache-2.0 | 9,078,576,240,145,594,000 | 26.037433 | 87 | 0.620649 | false | 3.877301 | true | false | false |
FermiDirak/RandomForest | main.py | 1 | 2875 | import math
import numpy as np
import matplotlib.pyplot as plt
from RandomForest import RandomForest
from DecisionTree import Tree
from NN import NN
number_of_points = 100 #number of data points per class
number_of_classes = 3 #number of classes in dataset
#data generation: creates spiral dataset with 4 classes and 100 samples each
def generateData(number_of_points, number_of_classes):
data = np.empty([3, number_of_classes * number_of_points])
for i in range(0, number_of_classes):
data[0, i*number_of_points : (i+1) * number_of_points] = np.float(i) #np.matrix(np.ones(numberOfPoints));
radius = np.linspace(0.05, 0.9, number_of_points)
theta = np.linspace(i*2*math.pi/number_of_classes, \
i*2*math.pi/number_of_classes + 3*math.pi/2, number_of_points) +\
np.random.normal(0, .1, number_of_points)
x = radius * np.cos(theta)
y = radius * np.sin(theta)
datum = np.matrix(np.transpose(np.column_stack((x, y))))
data[1:, i*number_of_points:(i+1)*number_of_points] = datum
return data
def display(data, hists):
display_decision_boundary(hists)
display_training_data(data)
plt.show()
#displays training data for classification
def display_training_data(data):
colors = ['green', 'blue', 'red', 'yellow', 'orange']
for i in range(0, number_of_classes):
plt.scatter(data[1, i*number_of_points:(i+1)*number_of_points], data[2, i*number_of_points:(i+1)*number_of_points], c=colors[i], s=40)
def display_decision_boundary(hists):
plt.imshow(hists, interpolation='nearest', extent=[-1,1,-1,1])
#returns histograms in range -1,1 -1,1
def train_random_forest(data, size):
return RandomForest(data, size, 7, number_of_classes)
#creates a decision boundary represented as a 1000 x 1000 x 3 matrix
def create_decision_boundary(forest, size):
def scale_to_grid(i, size):
return -1 + 2 * (i / size)
hists = np.zeros([size, size, 3])
for i in range(0, size):
for j in range(0, size):
hists[i, j] = forest.test_point(np.transpose(np.matrix([scale_to_grid(i, size), scale_to_grid(j, size)])))
return hists
def train_nn(data):
# print(data.T, np.shape(data))
print(data.T[:, 1:3].shape)
# print(data.T[range(400), 0].shape)
nn = NN(data.T[: ,1:], data.T[:, 0])
nn.train()
nn.display()
if __name__ == '__main__':
data = generateData(number_of_points, number_of_classes)
# testing if master different now! w
# train_softmax(data)
# train_nn(data)
print('creating forest')
forest = train_random_forest(data, 200)
print('forest created')
print('creating decison boundary')
hists = create_decision_boundary(forest, 25)
print('decision boundary created')
print('displaying data and decision boundary')
display(data, hists)
| mit | -7,192,878,935,116,275,000 | 30.944444 | 142 | 0.653913 | false | 3.18031 | false | false | false |
AFFogarty/SEP-Bot | public/sep_search/scraper/article_list_scraper.py | 1 | 1699 | from lxml import html
import re
import requests
SEP_URL = "http://plato.stanford.edu/"
class ArticleListScraper():
query = None
results = None
def __init__(self, query):
self.set_query(query)
def set_query(self, query):
# query_no_accents = remove_accents(query)
query_no_posessives = re.sub("'s", '', query)
pattern = re.compile('[^a-zA-Z\d\s]')
stripped_query = re.sub(pattern, ' ', query_no_posessives)
# stop_word_filter = StopWordFilter()
# self.query = stop_word_filter.filter(str(stripped_query).lower().split())
@property
def url(self):
url = SEP_URL + "search/searcher.py?query="
for word in self.query:
url += word + "+"
print url
return url
def request_results(self):
page = requests.get(self.url)
# Remvoe bold tags
text_no_bold = re.sub('</? ?b>', '', page.text)
text_no_newlines = re.sub('\n', '', text_no_bold)
tree = html.fromstring(text_no_newlines.encode('utf-8'))
titles = tree.xpath("//div[@class='result_title']/a/text()")
urls = tree.xpath("//div[@class='result_title']/a/@href")
# Figure out how many results to return
result_length = 0
if len(titles) > 5:
result_length = 5
else:
result_length = len(titles)
# Build the output tuples
output = []
for i in range(result_length):
output.append(
{
"title": titles[i],
"url": SEP_URL + urls[i].lstrip("../")
}
)
self.results = output
return output
| mit | -4,051,904,433,752,750,600 | 30.462963 | 83 | 0.532078 | false | 3.685466 | false | false | false |
ominux/personfinder | tools/validate_merge.py | 16 | 2793 | #!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verify the messages in the en .po files after merging.
Usage:
From the root directory: tools/verify_translation.py
Verify_translations takes no arguments. To use:
1. run find_missing_translations to generate a template .po file:
find_missing_translations --format=po
This will generate a .po file with just the translated messages in order.
2. Use the english person_finder.xml file and the template from step 1 to
'merge' the english translations into the english .po file. This should
generate a .po file with the msg string set to the msg id value for each
newly translated string. Example command:
'merge_translations translations/en/person_finder.xml app/locale/en/LC_MESSAGES/django'
3. run verify_translations to verify that the strings are actually the same.
command: 'verify_translation'
4. revert the app/locale/en changes (eg, don't check in the msgstrs
in the englis files).
PO file format:
http://www.gnu.org/software/hello/manual/gettext/PO-Files.html
"""
from babel.messages import pofile
from find_missing_translations import get_po_filename
from test_pfif import text_diff
if __name__ == '__main__':
filename = get_po_filename('en')
english = pofile.read_po(open(filename))
count = 0
def printsep():
if count > 0:
print '-------------------------------------'
for msg in english:
# Each newly translated string will have msg.string set
# to the 'translated' english value.
if msg.id and msg.string and msg.string != msg.id:
if isinstance(msg.id, tuple):
# TODO(lschumacher): deal with plurals properly,
if msg.string[0] or msg.string[1]:
printsep()
print 'msg id: %s\nmsgstr: %s' % (msg.id, msg.string)
count += 1
else:
printsep()
print text_diff(msg.id, msg.string)
count += 1
if count:
printsep()
print 'Found %s bad translations' % count
else:
print 'Translation OK'
| apache-2.0 | 6,933,954,318,731,710,000 | 37.791667 | 107 | 0.637666 | false | 4.125554 | false | false | false |
simontakite/sysadmin | pythonscripts/pythonnetworkingcoookbook/chapter5/5_6_send_email_from_gmail.py | 2 | 1870 | #!/usr/bin/env python
# Python Network Programming Cookbook -- Chapter - 5
# This program requires Python 2.7 or any later version
import argparse
import os
import getpass
import re
import sys
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT = 587
def send_email(sender, recipient):
""" Send email message """
msg = MIMEMultipart()
msg['Subject'] = 'Python Emaill Test'
msg['To'] = recipient
msg['From'] = sender
subject = 'Python email Test'
message = 'Images attached.'
# attach imgae files
files = os.listdir(os.getcwd())
gifsearch = re.compile(".gif", re.IGNORECASE)
files = filter(gifsearch.search, files)
for filename in files:
path = os.path.join(os.getcwd(), filename)
if not os.path.isfile(path):
continue
img = MIMEImage(open(path, 'rb').read(), _subtype="gif")
img.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(img)
part = MIMEText('text', "plain")
part.set_payload(message)
msg.attach(part)
# create smtp session
session = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
session.ehlo()
session.starttls()
session.ehlo
password = getpass.getpass(prompt="Enter your Google password: ")
session.login(sender, password)
session.sendmail(sender, recipient, msg.as_string())
print "Email sent."
session.quit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Email Sending Example')
parser.add_argument('--sender', action="store", dest="sender")
parser.add_argument('--recipient', action="store", dest="recipient")
given_args = parser.parse_args()
send_email(given_args.sender, given_args.recipient)
| gpl-2.0 | -5,088,516,377,796,062,000 | 30.694915 | 78 | 0.671658 | false | 3.762575 | false | false | false |
Inboxen/Inboxen | inboxen/cms/admin_urls.py | 1 | 1698 | ##
# Copyright (C) 2017 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django.conf import urls
from inboxen.cms import views
urlpatterns = [
urls.re_path(r'^$', views.index, name='index'),
urls.re_path(r'^(?P<page_pk>\d+)/$', views.index, name='index'),
urls.re_path(r'^choose_new_page/(?P<parent_pk>\d+)/$', views.choose_page_type, name='choose-page-type'),
urls.re_path(r'^create_page/(?P<model>[A-Za-z]+)/(?P<parent_pk>\d+)/$', views.create_page, name='create-page'),
urls.re_path(r'^edit_page/(?P<page_pk>\d+)/$', views.edit_page, name='edit-page'),
urls.re_path(r'^delete_page/(?P<page_pk>\d+)/$', views.delete_page, name='delete-page'),
urls.re_path(r'^blog/', urls.include(("inboxen.blog.admin_urls", "blog"), namespace="blog")),
urls.re_path(r'^questions/', urls.include(("inboxen.tickets.admin_urls", "tickets"), namespace="tickets")),
urls.re_path(r'^domains/', urls.include(("inboxen.admin_urls.domains", "inboxen"), namespace="domains")),
]
| agpl-3.0 | 63,411,465,265,730,070 | 47.514286 | 115 | 0.676678 | false | 3.240458 | false | false | false |
michealcarrerweb/LHVent_app | operation_finance/migrations/0033_auto_20170720_2348.py | 1 | 1814 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-20 23:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('operation_finance', '0032_auto_20170720_1815'),
]
operations = [
migrations.CreateModel(
name='VendorConflict',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(blank=True, max_length=150)),
('origin', models.DateTimeField(auto_now_add=True, null=True)),
('last_modified', models.DateTimeField(auto_now=True, null=True)),
('conflict_description', models.CharField(max_length=300, verbose_name='Conflict description')),
('conflict_resolution', models.CharField(blank=True, max_length=300, null=True, verbose_name='Conflict resolution')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='invoice',
name='conflict',
),
migrations.RemoveField(
model_name='invoice',
name='conflict_description',
),
migrations.AddField(
model_name='invoice',
name='file',
field=models.FileField(blank=True, null=True, upload_to='uploads/operations_invoice/', verbose_name='File'),
),
migrations.AddField(
model_name='vendorconflict',
name='invoice',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='conflict', to='operation_finance.Invoice'),
),
]
| mit | 1,598,525,994,893,387,000 | 36.791667 | 138 | 0.583793 | false | 4.392252 | false | false | false |
ifduyue/sentry | tests/sentry/api/serializers/test_group.py | 2 | 10647 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
import six
from datetime import timedelta
from django.utils import timezone
from mock import patch
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializer
from sentry.models import (
Environment, GroupResolution, GroupSnooze, GroupStatus,
GroupSubscription, UserOption, UserOptionValue
)
from sentry.testutils import TestCase
class GroupSerializerTest(TestCase):
def test_is_ignored_with_expired_snooze(self):
now = timezone.now().replace(microsecond=0)
user = self.create_user()
group = self.create_group(
status=GroupStatus.IGNORED,
)
GroupSnooze.objects.create(
group=group,
until=now - timedelta(minutes=1),
)
result = serialize(group, user)
assert result['status'] == 'unresolved'
assert result['statusDetails'] == {}
def test_is_ignored_with_valid_snooze(self):
now = timezone.now().replace(microsecond=0)
user = self.create_user()
group = self.create_group(
status=GroupStatus.IGNORED,
)
snooze = GroupSnooze.objects.create(
group=group,
until=now + timedelta(minutes=1),
)
result = serialize(group, user)
assert result['status'] == 'ignored'
assert result['statusDetails']['ignoreCount'] == snooze.count
assert result['statusDetails']['ignoreWindow'] == snooze.window
assert result['statusDetails']['ignoreUserCount'] == snooze.user_count
assert result['statusDetails']['ignoreUserWindow'] == snooze.user_window
assert result['statusDetails']['ignoreUntil'] == snooze.until
assert result['statusDetails']['actor'] is None
def test_is_ignored_with_valid_snooze_and_actor(self):
now = timezone.now().replace(microsecond=0)
user = self.create_user()
group = self.create_group(
status=GroupStatus.IGNORED,
)
GroupSnooze.objects.create(
group=group,
until=now + timedelta(minutes=1),
actor_id=user.id,
)
result = serialize(group, user)
assert result['status'] == 'ignored'
assert result['statusDetails']['actor']['id'] == six.text_type(user.id)
def test_resolved_in_next_release(self):
release = self.create_release(project=self.project, version='a')
user = self.create_user()
group = self.create_group(
status=GroupStatus.RESOLVED,
)
GroupResolution.objects.create(
group=group,
release=release,
type=GroupResolution.Type.in_next_release,
)
result = serialize(group, user)
assert result['status'] == 'resolved'
assert result['statusDetails'] == {'inNextRelease': True, 'actor': None}
def test_resolved_in_release(self):
release = self.create_release(project=self.project, version='a')
user = self.create_user()
group = self.create_group(
status=GroupStatus.RESOLVED,
)
GroupResolution.objects.create(
group=group,
release=release,
type=GroupResolution.Type.in_release,
)
result = serialize(group, user)
assert result['status'] == 'resolved'
assert result['statusDetails'] == {'inRelease': 'a', 'actor': None}
def test_resolved_with_actor(self):
release = self.create_release(project=self.project, version='a')
user = self.create_user()
group = self.create_group(
status=GroupStatus.RESOLVED,
)
GroupResolution.objects.create(
group=group,
release=release,
type=GroupResolution.Type.in_release,
actor_id=user.id,
)
result = serialize(group, user)
assert result['status'] == 'resolved'
assert result['statusDetails']['actor']['id'] == six.text_type(user.id)
@patch('sentry.models.Group.is_over_resolve_age')
def test_auto_resolved(self, mock_is_over_resolve_age):
mock_is_over_resolve_age.return_value = True
user = self.create_user()
group = self.create_group(
status=GroupStatus.UNRESOLVED,
)
result = serialize(group, user)
assert result['status'] == 'resolved'
assert result['statusDetails'] == {'autoResolved': True}
def test_subscribed(self):
user = self.create_user()
group = self.create_group()
GroupSubscription.objects.create(
user=user,
group=group,
project=group.project,
is_active=True,
)
result = serialize(group, user)
assert result['isSubscribed']
assert result['subscriptionDetails'] == {
'reason': 'unknown',
}
def test_explicit_unsubscribed(self):
user = self.create_user()
group = self.create_group()
GroupSubscription.objects.create(
user=user,
group=group,
project=group.project,
is_active=False,
)
result = serialize(group, user)
assert not result['isSubscribed']
assert not result['subscriptionDetails']
def test_implicit_subscribed(self):
user = self.create_user()
group = self.create_group()
combinations = (
# ((default, project), (subscribed, details))
((None, None), (True, None)),
((UserOptionValue.all_conversations, None), (True, None)),
((UserOptionValue.all_conversations, UserOptionValue.all_conversations), (True, None)),
((UserOptionValue.all_conversations, UserOptionValue.participating_only), (False, None)),
((UserOptionValue.all_conversations, UserOptionValue.no_conversations),
(False, {'disabled': True})),
((UserOptionValue.participating_only, None), (False, None)),
((UserOptionValue.participating_only, UserOptionValue.all_conversations), (True, None)),
((UserOptionValue.participating_only, UserOptionValue.participating_only), (False, None)),
((UserOptionValue.participating_only, UserOptionValue.no_conversations),
(False, {'disabled': True})),
((UserOptionValue.no_conversations, None), (False, {'disabled': True})),
((UserOptionValue.no_conversations, UserOptionValue.all_conversations), (True, None)),
((UserOptionValue.no_conversations, UserOptionValue.participating_only), (False, None)),
((UserOptionValue.no_conversations, UserOptionValue.no_conversations),
(False, {'disabled': True})),
)
def maybe_set_value(project, value):
if value is not None:
UserOption.objects.set_value(
user=user,
project=project,
key='workflow:notifications',
value=value,
)
else:
UserOption.objects.unset_value(
user=user,
project=project,
key='workflow:notifications',
)
for options, (is_subscribed, subscription_details) in combinations:
default_value, project_value = options
UserOption.objects.clear_local_cache()
maybe_set_value(None, default_value)
maybe_set_value(group.project, project_value)
result = serialize(group, user)
assert result['isSubscribed'] is is_subscribed
assert result.get('subscriptionDetails') == subscription_details
def test_global_no_conversations_overrides_group_subscription(self):
user = self.create_user()
group = self.create_group()
GroupSubscription.objects.create(
user=user,
group=group,
project=group.project,
is_active=True,
)
UserOption.objects.set_value(
user=user,
project=None,
key='workflow:notifications',
value=UserOptionValue.no_conversations,
)
result = serialize(group, user)
assert not result['isSubscribed']
assert result['subscriptionDetails'] == {
'disabled': True,
}
def test_project_no_conversations_overrides_group_subscription(self):
user = self.create_user()
group = self.create_group()
GroupSubscription.objects.create(
user=user,
group=group,
project=group.project,
is_active=True,
)
UserOption.objects.set_value(
user=user,
project=group.project,
key='workflow:notifications',
value=UserOptionValue.no_conversations,
)
result = serialize(group, user)
assert not result['isSubscribed']
assert result['subscriptionDetails'] == {
'disabled': True,
}
def test_no_user_unsubscribed(self):
group = self.create_group()
result = serialize(group)
assert not result['isSubscribed']
class StreamGroupSerializerTestCase(TestCase):
def test_environment(self):
group = self.group
environment = Environment.get_or_create(group.project, 'production')
from sentry.api.serializers.models.group import tsdb
with mock.patch(
'sentry.api.serializers.models.group.tsdb.get_range',
side_effect=tsdb.get_range) as get_range:
serialize(
[group],
serializer=StreamGroupSerializer(
environment_func=lambda: environment,
stats_period='14d',
),
)
assert get_range.call_count == 1
for args, kwargs in get_range.call_args_list:
assert kwargs['environment_id'] == environment.id
def get_invalid_environment():
raise Environment.DoesNotExist()
with mock.patch(
'sentry.api.serializers.models.group.tsdb.make_series',
side_effect=tsdb.make_series) as make_series:
serialize(
[group],
serializer=StreamGroupSerializer(
environment_func=get_invalid_environment,
stats_period='14d',
)
)
assert make_series.call_count == 1
| bsd-3-clause | -2,061,487,176,131,183,900 | 33.456311 | 102 | 0.58655 | false | 4.277622 | true | false | false |
mambocab/cassandra-dtest | snitch_test.py | 1 | 4269 | import os
import socket
from dtest import Tester, debug
from tools import since
@since('2.2.5')
class TestGossipingPropertyFileSnitch(Tester):
# Throws connection refused if cannot connect
def _test_connect(self, address, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
s.connect((address, port))
s.close()
def test_prefer_local_reconnect_on_listen_address(self):
"""
@jira_ticket CASSANDRA-9748
@jira_ticket CASSANDRA-8084
Test that it's possible to connect over the broadcast_address when
listen_on_broadcast_address=true and that GossipingPropertyFileSnitch
reconnect via listen_address when prefer_local=true
"""
NODE1_LISTEN_ADDRESS = '127.0.0.1'
NODE1_BROADCAST_ADDRESS = '127.0.0.3'
NODE2_LISTEN_ADDRESS = '127.0.0.2'
NODE2_BROADCAST_ADDRESS = '127.0.0.4'
STORAGE_PORT = 7000
cluster = self.cluster
cluster.populate(2)
node1, node2 = cluster.nodelist()
cluster.seeds = [NODE1_BROADCAST_ADDRESS]
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch',
'listen_on_broadcast_address': 'true'})
node1.set_configuration_options(values={'broadcast_address': NODE1_BROADCAST_ADDRESS})
node2.auto_bootstrap = True
node2.set_configuration_options(values={'broadcast_address': NODE2_BROADCAST_ADDRESS})
for node in cluster.nodelist():
with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as snitch_file:
snitch_file.write("dc=dc1" + os.linesep)
snitch_file.write("rack=rack1" + os.linesep)
snitch_file.write("prefer_local=true" + os.linesep)
node1.start(wait_for_binary_proto=True)
node1.watch_log_for("Starting Messaging Service on /{}:{}".format(NODE1_LISTEN_ADDRESS, STORAGE_PORT), timeout=60)
node1.watch_log_for("Starting Messaging Service on /{}:{}".format(NODE1_BROADCAST_ADDRESS, STORAGE_PORT), timeout=60)
self._test_connect(NODE1_LISTEN_ADDRESS, STORAGE_PORT)
self._test_connect(NODE1_BROADCAST_ADDRESS, STORAGE_PORT)
# write some data to node1
node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8'])
session = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1'
original_rows = list(session.execute("SELECT * FROM {}".format(stress_table)))
node2.start(wait_for_binary_proto=True, wait_other_notice=False)
node2.watch_log_for("Starting Messaging Service on /{}:{}".format(NODE2_LISTEN_ADDRESS, STORAGE_PORT), timeout=60)
node2.watch_log_for("Starting Messaging Service on /{}:{}".format(NODE2_BROADCAST_ADDRESS, STORAGE_PORT), timeout=60)
self._test_connect(NODE2_LISTEN_ADDRESS, STORAGE_PORT)
self._test_connect(NODE2_BROADCAST_ADDRESS, STORAGE_PORT)
node1.watch_log_for("Intiated reconnect to an Internal IP /{} for the /{}".format(NODE2_LISTEN_ADDRESS,
NODE2_BROADCAST_ADDRESS), filename='debug.log', timeout=60)
node2.watch_log_for("Intiated reconnect to an Internal IP /{} for the /{}".format(NODE1_LISTEN_ADDRESS,
NODE1_BROADCAST_ADDRESS), filename='debug.log', timeout=60)
# read data from node2 just to make sure data and connectivity is OK
session = self.patient_exclusive_cql_connection(node2)
new_rows = list(session.execute("SELECT * FROM {}".format(stress_table)))
self.assertEquals(original_rows, new_rows)
out, err = node1.nodetool('gossipinfo')
self.assertEqual(0, len(err), err)
debug(out)
self.assertIn("/{}".format(NODE1_BROADCAST_ADDRESS), out)
self.assertIn("INTERNAL_IP:6:{}".format(NODE1_LISTEN_ADDRESS), out)
self.assertIn("/{}".format(NODE2_BROADCAST_ADDRESS), out)
self.assertIn("INTERNAL_IP:6:{}".format(NODE2_LISTEN_ADDRESS), out)
| apache-2.0 | 3,012,660,398,633,485,300 | 46.966292 | 149 | 0.62989 | false | 3.575377 | true | false | false |
rapidpro/ureport-web-participation | features/environment.py | 1 | 1127 | import time
import uuid as uuid
from splinter.browser import Browser
from django.contrib.auth.models import User
from webparticipation.apps.ureporter.models import Ureporter
from webparticipation.apps.ureport_auth.models import PasswordReset
def before_all(context):
context.browser = Browser('chrome')
time.sleep(5)
def before_scenario(context, scenario):
email = '[email protected]'
username = 'user999999999'
password = 'password'
email1 = '[email protected]'
username1 = 'user999999991'
uid = uuid.uuid4()
uid1 = uuid.uuid4()
Ureporter.objects.create(uuid=uid,
user=User.objects.create_user(username=username, email=email, password=password))
Ureporter.objects.create(uuid=uid1,
user=User.objects.create_user(username=username1, email=email1, password=password))
def after_scenario(context, scenario):
User.objects.all().delete()
Ureporter.objects.all().delete()
PasswordReset.objects.all().delete()
def after_all(context):
context.browser.quit()
context.browser = None
context.server = None | agpl-3.0 | 1,241,663,468,293,540,000 | 31.228571 | 112 | 0.703638 | false | 3.707237 | false | false | false |
kennedyshead/home-assistant | tests/components/sun/test_trigger.py | 2 | 5420 | """The tests for the sun automation."""
from datetime import datetime
from unittest.mock import patch
import pytest
from homeassistant.components import sun
import homeassistant.components.automation as automation
from homeassistant.const import (
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, async_mock_service, mock_component
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
hass.config.set_time_zone(hass.config.time_zone)
hass.loop.run_until_complete(
async_setup_component(hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
)
def teardown():
"""Restore."""
dt_util.set_default_time_zone(ORIG_TIME_ZONE)
async def test_sunset_trigger(hass, calls, legacy_patchable_time):
"""Test the sunset trigger."""
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNSET},
"action": {
"service": "test.automation",
"data_template": {"id": "{{ trigger.id}}"},
},
}
},
)
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 0
with patch("homeassistant.util.dt.utcnow", return_value=now):
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["id"] == 0
async def test_sunrise_trigger(hass, calls, legacy_patchable_time):
"""Test the sunrise trigger."""
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 14, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_sunset_trigger_with_offset(hass, calls, legacy_patchable_time):
"""Test the sunset trigger with offset."""
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNSET,
"offset": "0:30:00",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event", "offset"))
},
},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "sun - sunset - 0:30:00"
async def test_sunrise_trigger_with_offset(hass, calls, legacy_patchable_time):
"""Test the sunrise trigger with offset."""
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 13, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNRISE,
"offset": "-0:30:00",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
| apache-2.0 | 166,584,854,498,084,400 | 30.882353 | 86 | 0.55369 | false | 3.888092 | true | false | false |
SVoxel/R7800 | git_home/samba.git/python/samba/ndr.py | 9 | 4875 | # -*- coding: utf-8 -*-
# Unix SMB/CIFS implementation.
# Copyright © Jelmer Vernooij <[email protected]> 2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Network Data Representation (NDR) marshalling and unmarshalling."""
def ndr_pack(object):
"""Pack a NDR object.
:param object: Object to pack
:return: String object with marshalled object.
"""
ndr_pack = getattr(object, "__ndr_pack__", None)
if ndr_pack is None:
raise TypeError("%r is not a NDR object" % object)
return ndr_pack()
def ndr_unpack(cls, data, allow_remaining=False):
"""NDR unpack an object.
:param cls: Class of the object to unpack
:param data: Buffer to unpack
:param allow_remaining: allows remaining data at the end (default=False)
:return: Unpacked object
"""
object = cls()
ndr_unpack = getattr(object, "__ndr_unpack__", None)
if ndr_unpack is None:
raise TypeError("%r is not a NDR object" % object)
ndr_unpack(data, allow_remaining=allow_remaining)
return object
def ndr_print(object):
ndr_print = getattr(object, "__ndr_print__", None)
if ndr_print is None:
raise TypeError("%r is not a NDR object" % object)
return ndr_print()
def ndr_pack_in(object, bigendian=False, ndr64=False):
"""Pack the input of an NDR function object.
:param object: Object to pack
:param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False)
:param ndr64: use LIBNDR_FLAG_NDR64 (default=False)
:return: String object with marshalled object.
"""
ndr_pack_in_fn = getattr(object, "__ndr_pack_in__", None)
if ndr_pack_in_fn is None:
raise TypeError("%r is not a NDR function object" % object)
return ndr_pack_in_fn(bigendian=bigendian, ndr64=ndr64)
def ndr_unpack_in(object, data, bigendian=False, ndr64=False, allow_remaining=False):
"""Unpack the input of an NDR function object.
:param cls: Class of the object to unpack
:param data: Buffer to unpack
:param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False)
:param ndr64: use LIBNDR_FLAG_NDR64 (default=False)
:param allow_remaining: allows remaining data at the end (default=False)
:return: Unpacked object
"""
ndr_unpack_in_fn = getattr(object, "__ndr_unpack_in__", None)
if ndr_unpack_in_fn is None:
raise TypeError("%r is not a NDR function object" % object)
ndr_unpack_in_fn(data, bigendian=bigendian, ndr64=ndr64,
allow_remaining=allow_remaining)
return object
def ndr_print_in(object):
ndr_print_in_fn = getattr(object, "__ndr_print_in__", None)
if ndr_print_in_fn is None:
raise TypeError("%r is not a NDR function object" % object)
return ndr_print_in_fn()
def ndr_pack_out(object, bigendian=False, ndr64=False):
"""Pack the output of an NDR function object.
:param object: Object to pack
:param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False)
:param ndr64: use LIBNDR_FLAG_NDR64 (default=False)
:return: String object with marshalled object.
"""
ndr_pack_out_fn = getattr(object, "__ndr_pack_out__", None)
if ndr_pack_out_fn is None:
raise TypeError("%r is not a NDR function object" % object)
return ndr_pack_out_fn(bigendian=bigendian, ndr64=ndr64)
def ndr_unpack_out(object, data, bigendian=False, ndr64=False, allow_remaining=False):
"""Unpack the output of an NDR function object.
:param cls: Class of the object to unpack
:param data: Buffer to unpack
:param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False)
:param ndr64: use LIBNDR_FLAG_NDR64 (default=False)
:param allow_remaining: allows remaining data at the end (default=False)
:return: Unpacked object
"""
ndr_unpack_out_fn = getattr(object, "__ndr_unpack_out__", None)
if ndr_unpack_out_fn is None:
raise TypeError("%r is not a NDR function object" % object)
ndr_unpack_out_fn(data, bigendian=bigendian, ndr64=ndr64,
allow_remaining=allow_remaining)
return object
def ndr_print_out(object):
ndr_print_out_fn = getattr(object, "__ndr_print_out__", None)
if ndr_print_out_fn is None:
raise TypeError("%r is not a NDR function object" % object)
return ndr_print_out_fn()
| gpl-2.0 | 9,124,250,025,311,887,000 | 35.646617 | 86 | 0.681165 | false | 3.361379 | false | false | false |
llasram/puzzles | shipz/transforms.py | 1 | 3773 | #! /usr/bin/python
import sys
import math
from math import sin, cos
import cPickle as pickle
import struct
import numpy
from numpy import matrix, array
from itertools import izip
from support import *
BITSET_TEMPLATE = \
" %s = (%s & ~(1L << %d)) | (((%s >> %d) & 1L) << %d);"
def build_transform_func(i, cube, pattern):
output = [
'inline unused cube_t *',
'cube_transform_%i(cube_t *dst, cube_t *src)' % (i,),
'{',
' uint64_t low = src->low;',
' uint64_t high = src->high;',
' dst->low = low;',
' dst->high = high;']
for c1, c2 in izip(cube, pattern):
if c1 == c2:
continue
src = coord_bit(*c1)
if src < 64:
srcs = "low"
else:
srcs = "high"
src -= 64
dst = coord_bit(*c2)
if dst < 64:
dsts = "dst->low"
else:
dsts = "dst->high"
dst -= 64
output.append(BITSET_TEMPLATE % \
(dsts, dsts, dst, srcs, src, dst))
output.extend([' return dst;', '}', ''])
return '\n'.join(output)
def build_transform_group(group, isomorphs, stable, refs=None):
output = [
'',
'inline unused cube_t *',
'cube_transform_group%d(cube_t *dst, cube_t *src, int index)' % \
(group,),
'{',
' switch(index) {']
cube = list(isomorphs[0])
stable = cube.index(stable)
index = 0
for i, isomorph in enumerate(isomorphs):
if cube[stable] != isomorph[stable]:
continue
output.extend([
' case %d:' % (index,),
' return cube_transform_%d(dst, src);' % (i,)])
index += 1
output.insert(0, '#define CUBE_GROUP%d_TRANSFORMS (%d)' % (group, index))
output.extend([
' default:',
' return NULL;',
' }',
'}',
''])
if refs:
ref1s = cube.index(refs[0])
ref1d = cube.index(refs[1])
ref2s = cube.index(refs[2])
ref2d = cube.index(refs[3])
for i, isomorph in enumerate(isomorphs):
if cube[ref1s] != isomorph[ref1d] or \
cube[ref2s] != isomorph[ref2d]:
continue
output.extend([
'#define cube_reflect_group%d cube_transform_%d' % (group, i),
''])
break
return '\n'.join(output)
def main(argv=sys.argv):
shift = matrix([2, 2, 2])
cube = []
mirror = []
for x, y, z in product(xrange(MAXX), repeat=3):
cube.append((x, y, z))
mirror.append(((MAXX - 1) - x, y, z))
base1 = matrix(cube) - shift
base2 = matrix(mirror) - shift
isomorphs = set()
for x, y, z in product(ANGLES, repeat=3):
for base in (base1, base2):
r = (base * rot_matrix(x, y, z)).round(0)
r += shift
r = tuple(tuple(int(i) for i in j) for j in r.tolist())
isomorphs.add(r)
isomorphs = sorted(isomorphs)
print "#ifndef TRANSFORMS_H"
print "#define TRANSFORMS_H\n"
for i, pattern in enumerate(isomorphs):
print build_transform_func(i, cube, pattern)
print build_transform_group(3, isomorphs, (0, 0, 0),
[(0, 2, 0), (0, 2, 4), (4, 2, 0), (4, 2, 4)])
print build_transform_group(6, isomorphs, (0, 0, 2),
[(0, 0, 2), (4, 0, 2), (0, 4, 2), (4, 4, 2)])
print build_transform_group(12, isomorphs, (2, 0, 2),
[(0, 0, 2), (0, 4, 2), (4, 0, 2), (4, 4, 2)])
print build_transform_group(24, isomorphs, (2, 2, 2))
print "#endif /* TRANSFORMS_H */"
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | -6,583,522,729,625,080,000 | 31.247863 | 78 | 0.482905 | false | 3.252586 | false | false | false |
Avira/pootle | pootle/apps/reports/migrations/0001_initial.py | 1 | 1272 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
# Django uses this attribute to recognize squashed migrations, but we are
# abusing it to tell Django that this migration replaces a migration
# already run and recorded with a different app name.
replaces = [(b'evernote_reports', '0001_initial')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='PaidTask',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('task_type', models.PositiveSmallIntegerField(default=0, db_index=True, verbose_name='Type', choices=[(0, 'Translation'), (1, 'Review'), (2, 'Hourly Work'), (3, 'Correction')])),
('amount', models.FloatField(default=0, verbose_name='Amount')),
('rate', models.FloatField(default=0)),
('datetime', models.DateTimeField(verbose_name='Date', db_index=True)),
('description', models.TextField(null=True, verbose_name='Description')),
],
options={
},
bases=(models.Model,),
),
]
| gpl-3.0 | -2,941,673,925,234,581,500 | 38.75 | 195 | 0.599843 | false | 4.386207 | false | false | false |
endlos99/xdt99 | test/utils.py | 1 | 21693 | import sys
import os
import re
from subprocess import call
from config import xdm_py, xhm_py, xvm_py, xas_py, xda_py, xga_py, xdg_py, xbas_py
# Utility functions
def ordw(word):
"""word ord"""
return (word[0] << 8) | word[1]
def chrw(word):
"""word chr"""
return bytes((word >> 8, word & 0xff))
def xint(s):
"""return hex or decimal value"""
return int(s.lstrip('>'), 16 if s[:2] == '0x' or s[:1] == '>' else 10)
def sinc(s, i):
"""return string increased by i"""
return s[:-1] + chr(ord(s[-1]) + i)
# Test management functions
def xdm(*args, **kargs):
"""invoke Disk Manager"""
print('DM:', args)
if kargs.get('shell'):
rc = call(' '.join(xdm_py + list(args)), shell=True)
else:
rc = call(xdm_py + list(args), stdin=kargs.get('stdin'),
stdout=kargs.get('stdout'), stderr=kargs.get('stderr'))
if rc != kargs.get('rc', 0):
error('OS', 'xdm99 call returned with failure code ' + str(rc))
def xhm(*args, **kargs):
"""invoke HFE Manager"""
print('HM:', args)
if kargs.get('shell'):
rc = call(' '.join(xhm_py + list(args)), shell=True)
else:
rc = call(xhm_py + list(args),
stdout=kargs.get('stdout'), stderr=kargs.get('stderr'))
if rc != kargs.get('rc', 0):
error('OS', 'xhm99 call returned with failure code ' + str(rc))
def xvm(*args, **kargs):
"""invoke Volume Manager"""
print('VM:', args)
if kargs.get('shell'):
rc = call(' '.join(xvm_py + list(args)), shell=True)
else:
rc = call(xvm_py + list(args),
stdout=kargs.get('stdout'), stderr=kargs.get('stderr'))
if rc != kargs.get('rc', 0):
error('OS', 'xvm99 call returned with failure code ' + str(rc))
def xas(*args, **kargs):
"""invoke assembler"""
print('AS:', args)
rc = call(xas_py + list(args),
stdout=kargs.get('stdout'), stderr=kargs.get('stderr'))
if rc != kargs.get('rc', 0):
error('OS', 'xas99 call returned with failure code ' + str(rc))
def xda(*args, **kargs):
"""invoke disassembler"""
print('DA:', args)
rc = call(xda_py + list(args),
stdout=kargs.get('stdout'), stderr=kargs.get('stderr'))
if rc != kargs.get('rc', 0):
error('OS', 'xda99 call returned with failure code ' + str(rc))
def xga(*args, **kargs):
"""invoke GPL assembler"""
print('GA:', args)
rc = call(xga_py + list(args),
stdout=kargs.get('stdout'), stderr=kargs.get('stderr'))
if rc != kargs.get('rc', 0):
error('OS', 'xga99 call returned with failure code ' + str(rc))
def xdg(*args, **kargs):
"""invoke GPL disssembler"""
print('DG:', args)
rc = call(xdg_py + list(args),
stdout=kargs.get('stdout'), stderr=kargs.get('stderr'))
if rc != kargs.get('rc', 0):
error('OS', 'xdg99 call returned with failure code ' + str(rc))
def xbas(*args, **kargs):
"""invoke TI BASIC tool"""
print('BAS:', args)
rc = call(xbas_py + list(args),
stdout=kargs.get('stdout'), stderr=kargs.get('stderr'))
if rc != kargs.get('rc', 0):
error('OS', 'xbas99 call returned with failure code ' + str(rc))
def error(tid, msg):
"""report test error"""
sys.exit('ERROR: ' + tid + ': ' + msg)
# Common check functions
def content(fn, mode='rb'):
"""return contents of file"""
with open(fn, mode) as f:
data = f.read()
return data
def content_lines(fn):
"""return lines of file"""
with open(fn, 'r') as f:
lines = ' '.join(f.readlines())
return lines
def content_len(fn):
"""return length of file"""
return os.path.getsize(fn)
def check_file_exists(fn):
"""check if given file exists"""
return os.path.isfile(fn)
def check_file_empty(fn):
"""return if file is empty"""
return os.path.getsize(fn) == 0
def concat(flist, out):
"""concatenate file contents"""
with open(out, 'wb') as fout:
for fn in flist:
with open(fn, 'rb') as fin:
data = fin.read()
fout.write(data)
# Common check functions: xdm99
def check_files_eq(tid, infile, reffile, fmt, mask=None):
if fmt[0] == 'D':
if 'V' in fmt:
check_text_files_eq(tid, infile, reffile)
else:
check_binary_files_eq(tid, infile, reffile, [])
else:
check_binary_files_eq(tid, infile, reffile, mask or [])
def check_text_files_eq(tid, infile, reffile, skip=0):
"""check if file matches reference file"""
with open(infile, 'r') as fin, open(reffile, 'r') as fref:
if fin.readlines()[skip:] != fref.readlines()[skip:]:
error(tid, '%s: File contents mismatch' % infile)
def check_text_lines_eq(tid, infile, reffile, fmt):
"""check if text files are equal modulo trailing spaces"""
reclen = int(re.search('\d+', fmt).group(0))
with open(infile, 'r') as fin, open(reffile, 'r') as fref:
reflines = [line[:-1] + ' ' * (reclen - len(line) + 1) + '\n'
for line in fref.readlines()]
if fin.readlines() != reflines:
error(tid, '%s: File contents mismatch' % infile)
def check_binary_files_eq(tid, infile, reffile, mask=()):
"""check if binary files are equal modulo mask"""
with open(infile, 'rb') as fin, open(reffile, 'rb') as fref:
indata = fin.read()
refdata = fref.read()
cutlen = 0
for i, j in mask:
assert cutlen <= i <= j
indata = indata[:i - cutlen] + indata[j - cutlen:]
refdata = refdata[:i - cutlen] + refdata[j - cutlen:]
cutlen += j - i
if indata != refdata:
error(tid, '%s: File contents mismatch' % infile)
def check_bin_text_eq(tid, infile, reffile):
"""check if DISPLAY files with binary parts are equal"""
with open(infile, 'rb') as fin, open(reffile, 'rb') as fref:
indata = fin.read()
refdata = fref.read()
if indata == refdata:
return
# replace line separators by 0xff
indata_norm = indata.replace(b'\x0d\x0a', b'\xff').replace(b'\x0a', b'\xff').replace(b'\x0d', b'\xff')
refdata_norm = refdata.replace(b'\x0d\x0a', b'\xff').replace(b'\x0a', b'\xff').replace(b'\x0d', b'\xff')
if indata_norm != refdata_norm:
error(tid, 'Normalized file contents mismatch')
def check_file_matches(infile, matches):
"""check if text file contents match regular expressions"""
try:
with open(infile, 'r') as f:
contents = f.readlines()
except IOError:
error('CLI', '%s: File not found' % infile)
for line, pattern in matches:
try:
if not re.search(pattern, contents[line]):
error('CLI',
'%s: Line %d does not match' % (infile, line))
except IndexError:
error('CLI', '%s: Line %d missing' % (infile, line))
# Common check functions: xas99
def _tags(objcode, compressed, filter=None):
taglen = 3 if compressed else 5
refdefs = []
if compressed: # ignore :id line
objcode = b''.join([objcode[i:i + 80].rstrip() for i in range(0, len(objcode) - 80, 80)]) # ignore :id line
else: # ignore line numbers and :id line
objcode = b''.join([objcode[i:i + 76].rstrip() for i in range(0, len(objcode) - 80, 80)])
yield objcode[:taglen] # rec count
objcode = objcode[taglen + 8:].lstrip() # skip to first tag
while objcode:
if objcode[:1] in b'3456': # those might not be in the same order for xas99
refdefs.append(objcode[:taglen + 6]) # include name
objcode = objcode[taglen + 6:]
elif objcode[:1] == b'7' and refdefs: # ignore checksum in refdefs
objcode = objcode[taglen:].lstrip()
elif objcode[:1] == b'F':
objcode = objcode[1:] # just skip end of record marker
else:
if filter is None or objcode[0] in filter:
yield objcode[:taglen]
objcode = objcode[taglen:]
for tag in sorted(refdefs):
yield tag
def check_obj_code_eq(infile, reffile, compressed=False, tagfilter=None):
"""check if object code files are equal modulo id tag"""
with open(infile, 'rb') as fin, open(reffile, 'rb') as fref:
indata = fin.read()
refdata = fref.read()
for i, (intag, reftag) in enumerate(zip(_tags(indata, compressed, tagfilter),
_tags(refdata, compressed, tagfilter))):
if intag != reftag:
error('Object code', 'Mismatch for tag no. {:d}: {}/{}'.format(i, str(intag), str(reftag)))
def check_image_files_eq(genfile, reffile, ignore=()):
"""check if non-zero bytes in binary files are equal"""
with open(genfile, 'rb') as fg, open(reffile, 'rb') as fr:
genimage = fg.read()
refimage = fr.read()
for imin, imax in ignore: # must be in decreasing order!
genimage = genimage[:imin] + genimage[imax:]
refimage = refimage[:imin] + refimage[imax:]
if not 0 <= len(genimage) - len(refimage) <= 1:
error('Image', 'Image length mismatch')
if (genimage[:2] != refimage[:2] or
not (0 <= ordw(genimage[2:4]) - ordw(refimage[2:4]) <= 1) or
genimage[4:6] != refimage[4:6]):
error('Image', 'Image header mismatch')
# TI-generated images may contain arbitrary bytes in BSS segments
for i in range(4, len(refimage)):
if genimage[i] and genimage[i] != refimage[i]:
error('Image', 'Image contents mismatch @ ' + hex(i))
def check_image_set_eq(gendata, refdata):
"""check if genfile is outfile, module padding at outfile"""
if any(data[:2] != b'\xff\xff' for data in gendata[:-1]):
error('image', 'Bad continuation marker')
if gendata[-1][:2] != b'\x00\x00':
error('image', 'Missing >0000 end marker')
sortedgendata = sorted(gendata, key=lambda d: ordw(d[4:6]))
sortedrefdata = sorted(refdata, key=lambda d: ordw(d[4:6]))
if len(sortedgendata) != len(sortedrefdata):
error('image', 'Image file count mismatch')
for genimage, refimage in zip(sortedgendata, sortedrefdata):
padlen = len(refimage) - len(genimage)
if not 0 <= padlen <= 1:
error('Image', 'Image length mismatch')
if not (ordw(refimage[2:4]) - ordw(genimage[2:4]) == padlen) or genimage[4:6] != refimage[4:6]:
error('Image', 'Image header mismatch')
# TI-generated images may contain arbitrary bytes in BSS segments
for i in range(4, len(refimage) - padlen):
if genimage[i] and genimage[i] != refimage[i]:
error('Image', 'Image contents mismatch @ ' + hex(i))
def check_list_files_eq(genfile, reffile, ignore_lino=False):
"""check if listing files are equivalent
ignores symbol listing at end of reffile by checking upto end of genfile
"""
with open(genfile, 'r') as fg, open(reffile, 'r') as fr:
genlist = [(l[:16] + l[19:]).rstrip() for l in fg.readlines()
if l.strip() and l[5:9] != '****' and l[19] != '<']
reflist = [l[2:].rstrip() for l in fr.readlines() if l[:2] == ' ']
gi, ri = 1, 0 # skip assembler header note
min_col, max_col = 4 if ignore_lino else 0, 74
while gi < len(genlist):
gl, rl = genlist[gi], reflist[ri]
# ignore deliberate changes
try:
if gl[10] in '.X':
rl = rl[:10] + gl[10:15] + rl[15:] # no data
if gl[14] == 'r' and rl[14] == "'": # reloc
rl = rl[:14] + 'r' + rl[15:]
if gl[14] == 'e': # reloc or external (REF)
rl = rl[:10] + '0000e' + rl[15:] # remove REF chain
if 'ORG' in rl[16:] or 'BES' in rl[16:]:
rl = rl[:5] + gl[5:9] + rl[9:] # no address
gl = gl.replace(';', '*') # unify comments
# ignore listing directives
if 'TITL' in gl[16:] or 'PAGE' in gl[16:] or 'UNL' in gl[16:] or 'LIST' in gl[16:]:
gi += 1
continue
# ignore BYTE sections
if gl[16:] == rl[16:] and ('BYTE' in gl[16:] or 'TEXT' in gl[16:]):
gi, ri = gi + 1, ri + 1
while genlist[gi][:4] == ' ':
gi += 1
while reflist[ri][:4] == ' ':
ri += 1
continue
except IndexError:
pass
if gl[min_col:max_col] != rl[min_col:max_col]:
error('List file', f'Line mismatch in {gi}/{ri}:\n{gl}\n{rl}')
gi, ri = gi + 1, ri + 1
def check_list_against_binary(listfile, binfile):
with open(listfile, 'r') as fl, open(binfile, 'rb') as fb:
lines = fl.readlines()
blob = fb.read()
mem = {}
for line in lines:
try:
addr = int(line[5:9].strip(), 16)
except ValueError:
continue
word = line[10:14].strip()
try:
value = int(word, 16)
except ValueError:
if word == 'XXXX':
value = 0
else:
continue
if len(word) == 2:
mem[addr] = value
elif len(word) == 4:
mem[addr] = value >> 8
mem[addr + 1] = value & 0xff
lc = min(mem)
for b in blob:
listb = mem.get(lc, 0)
if listb != b:
error('bin list', f'Unexpected byte @>{lc:04X}: >{listb:02X}, expected >{b:02X}')
lc += 1
def check_dat_file_eq(datfile, binfile):
"""check that textual representation matches binary file"""
with open(datfile, 'r') as fd, open(binfile, 'rb') as fb:
dat = ''.join(fd.readlines()[1:])
bin = fb.read()
values = [xint(x) for x in re.findall(r'>\w{2}', dat)]
if bytes(values) != bin:
error('dat', 'Values and binary file mismatch')
# common check functions: xda99/xdg99
def check_bytes(outfile, reffile):
"""check that outfile has not more data than reffile"""
outbytes, cntbytes = count_bytes(outfile), count_bytes(reffile)
if outbytes > cntbytes:
error('BYTEs', 'Too many BYTEs/DATAs: %d instead of %d' % (outbytes, cntbytes))
def count_bytes(fn):
"""count bytes declared by directives in source"""
byte_count = 0
with open(fn, 'r') as fin:
source = fin.readlines()
for line in source:
# get rid of quoted single quotes ''
line = re.sub(r"'(?:[^']|'')*'",
lambda x: ','.join(['z'] * (len(x.group(0)) - 2 - x.group(0)[1:-1].count("''"))),
line)
# get instruction parts
parts = re.split(r'\s+', line, maxsplit=2)
if len(parts) > 2 and parts[1].lower() in ('byte', 'data', 'stri', 'text'):
# get all args
args = [x.strip() for x in parts[2].split(',') if x.strip()]
# know what you count
if parts[1].lower() == 'data':
byte_count += len(args) * 2
elif parts[1].lower() == 'text':
byte_count += sum([len(a) // 2 if a[0] == '>' else 1
for a in args])
elif parts[1].lower() == 'stri':
byte_count += sum([len(a) // 2 if a[0] == '>' else 1
for a in args]) + 1 # len byte
else:
byte_count += len(args)
return byte_count
def check_indent(fn, blocks):
"""check if first lines are indented correctly"""
with open(fn, 'r') as fin:
source = fin.readlines()
indents = []
for line in source:
if not line:
continue
if line[0] == ' ':
indents.append(re.match(r'\s+(\w)', line).start(1))
else:
try:
indents.append(
re.match(r'(?:[\w?!~]+\s+){%d}(\w)' % blocks, line).start(
1))
except AttributeError:
pass
if len(indents) < 3:
error('indent', 'Too few indent values: %d' % len(indents))
return all([i == indents[0] for i in indents[1:]])
def count_mnemonics(fn, offset=0, wanted=None):
"""build dict of all occurring mnemonics"""
with open(fn, 'r') as fin:
source = [l[offset:] for l in fin.readlines()]
mnems = {}
for line in source:
parts = re.split(r'\s+', line.rstrip(), maxsplit=2)
if len(parts) < 2:
continue
mnem = parts[1].lower()
if wanted is not None and wanted != mnem:
continue
n = mnems.setdefault(mnem, 0)
mnems[parts[1].lower()] = n + 1
return mnems.get(wanted, 0) if wanted is not None else mnems
def check_source(outfile, reffile):
"""compare sources"""
with open(outfile, 'r') as fout, open(reffile, 'r') as fref:
out = fout.readlines()
ref = fref.readlines()
j = -1
for i, oline in enumerate(out):
# split output instruction (generated source)
oinstr = re.split(r'\s+', re.sub(';.*$', '', oline.rstrip()).lower(),
2)
if len(oinstr) < 2 or oinstr[1] == 'equ':
continue # no instruction
oargs = [a.strip().upper() for a in oinstr[2].split(',')] if len(
oinstr) > 2 else []
rline, rinstr, urargs = '', (), ()
while True:
j += 1
rline = re.sub(';.*$', '', ref[j]).rstrip()
if rline[:1] == '*':
continue # ignore comments
if 'IGNORE' in rline:
break # don't compare two corresponding lines
# split reference instruction (original source)
rinstr = re.split(r'\s+', rline.lower(), 2)
rargs = [a.strip().upper() for a in rinstr[2].split(',')] if len(
rinstr) > 2 else []
# uniform numerical arguments >XXXX, except if they're
# already >XX (for xdg99)
urargs = [('>%04X' % xint(a)) if (a[0] == '>' and len(
a) != 3) or a.isdigit() else a
for a in rargs]
if rline and rinstr[0][-1:] != ':' and rinstr[1] != 'equ':
break
if 'IGNORE' not in rline and (
oinstr[1] != rinstr[1] or oargs != urargs):
error('source', 'Mismatch in line %d:\n(R) %s\n(O) %s' % (
i, rline, oline))
def check_origins(fn, origins):
"""check origins in source"""
with open(fn, 'r') as fin:
source = fin.readlines()
ocnt = 0
for line in source:
m = re.match(r'^(\w+)\s[^;]*; <-(.*)$', line)
if m:
addr = int(m.group(1), 16)
anns = [int(a.strip()[1:], 16) for a in m.group(2).split(',')]
if addr in origins:
if origins[addr] == anns:
ocnt += 1
else:
error('origin', 'Origin mismatch @%04X' % addr)
if ocnt != len(origins):
error('origin', 'Origin count mismatch: %d/%d' % (ocnt, len(origins)))
def read_stderr(fn, include_warnings=False):
"""read stderr output"""
errors = []
with open(fn, 'r') as f:
lines = f.readlines()
for error, line in zip(lines[::2], lines[1::2]):
if 'Warning' in line:
if include_warnings:
warn = re.search(r'<.>\s+(\d+|\*+)\s+-', error)
if warn:
errors.append(warn.group(1))
else:
continue # ignore warnings
else:
err = re.search(r'<.>\s+(\d+)', error)
if err:
errors.append(err.group(1))
return errors
def get_source_markers(source, tag):
ref_errors = []
with open(source, 'r') as f:
for i, line in enumerate(f):
m = re.search(tag, line)
if m:
try:
if m.group(1):
ref_errors.append(m.group(1)[1:])
continue
except IndexError:
pass
ref_errors.append(f'{i + 1:04d}')
return ref_errors
def check_errors(ref, actual):
"""compare two dicts for key equality"""
for err in ref:
if err not in actual:
error('Error messages', 'Missing error of line ' + err)
for err in actual:
if err[0] == '*':
continue
if err not in ref:
error('Error messages', 'Extraneous error in line ' + err)
def check_ellipsis(fn, skip=0):
with open(fn, 'r') as fin:
addrs = [None if line[0] == '.' else int(line[:4], 16) for line in fin.readlines()[skip:]]
for i, a in enumerate(addrs):
if a is None:
continue
try:
if addrs[i + 1] is None:
if addrs[i + 2] - a <= 2:
error('concise', "Badly placed '....' within address segment")
else:
if addrs[i + 1] - a > 2:
error('concise', "Missing '....' between two address segments")
except IndexError:
pass
# common check functions xga99
def check_gbc_files_eq(name, genfile, reffile, offset=0):
"""check if non-zero bytes in binary files are equal"""
with open(genfile, 'rb') as fgen, open(reffile, 'rb') as fref:
genimage = fgen.read()
refimage = fref.read()[6:]
if genimage[offset:] != refimage and genimage[offset:] != refimage[:-1]:
error('GPL image', 'Image mismatch: ' + name)
if genimage[:offset] != bytes(offset):
error('GPL image', 'Non-zero offset prefix')
| gpl-2.0 | -7,286,043,714,069,134,000 | 34.504092 | 116 | 0.528558 | false | 3.423229 | false | false | false |
luisfcorreia/rtd2660 | programmer/serial1.py | 1 | 1109 | # Serial1 Class
from threading import Thread
class Serial1(Thread):
def __init__(self, serialPort, serialBaud):
Thread.__init__(self)
self.daemon = True
self.running = True
self.name = 'Serial Thread'
self.start()
def kill(self):
self.running = False
def write(self,data):
try:
self.port.write(data)
except:
pass
def readline(self):
buf = ''
while True:
char = self.port.read()
if char == 'B':
buf = char
else:
buf += char
if char == '\r' or char == '' and buf[:3] == 'BEG' and buf[-4:] == 'END\r':
return buf
def run(self):
try:
self.port = serial.Serial(serialPort, serialBaud)
logging.debug('starting')
except:
pass
while True:
try:
data = self.readline()
logging.debug(data)
except:
pass
if not self.running:
break
| gpl-2.0 | -4,518,309,322,066,550,000 | 21.18 | 87 | 0.450857 | false | 4.436 | false | false | false |
KonradBreitsprecher/espresso | src/python/espressomd/io/writer/h5md.py | 1 | 3069 | """Interface module for the H5md core implementation."""
import sys
from six import iteritems
from ...script_interface import PScriptInterface # pylint: disable=import
class H5md(object):
"""H5md file object.
Used for accessing the H5MD core implementation via the
PScriptInterface.
.. note::
Bonds will be written to the file automatically if they exist.
Parameters
----------
filename : :obj:`str`
Name of the trajectory file.
write_pos : :obj:`bool`, optional
If positions should be written.
write_vel : :obj:`bool`, optional
If velocities should be written.
write_force : :obj:`bool`, optional
If forces should be written.
write_species : :obj:`bool`, optional
If types (called 'species' in the H5MD specification) should be written.
write_mass : :obj:`bool`, optional
If masses should be written.
write_charge : :obj:`bool`, opional
If charges should be written.
write_ordered : :obj:`bool`, optional
If particle properties should be ordered according to
ids.
"""
def __init__(self, write_ordered=True, **kwargs):
self.valid_params = ['filename', "write_ordered"]
if 'filename' not in kwargs:
raise ValueError("'filename' parameter missing.")
self.what = {'write_pos': 1 << 0,
'write_vel': 1 << 1,
'write_force': 1 << 2,
'write_species': 1 << 3,
'write_mass': 1 << 4,
'write_charge': 1 << 5}
self.valid_params.append(self.what.keys())
self.what_bin = 0
for i, j in iteritems(kwargs):
if i in self.what.keys():
if isinstance(j, bool):
if j:
self.what_bin += self.what[i]
else:
raise ValueError("{} has to be a bool value.".format(i))
elif i not in self.valid_params:
raise ValueError(
"Unknown parameter {} for H5MD writer.".format(i))
self.h5md_instance = PScriptInterface(
"ScriptInterface::Writer::H5mdScript")
self.h5md_instance.set_params(filename=kwargs['filename'],
what=self.what_bin,
scriptname=sys.argv[0],
write_ordered=write_ordered)
self.h5md_instance.call_method("init_file")
def get_params(self):
"""Get the parameters from the scriptinterface."""
return self.h5md_instance.get_params()
def write(self):
"""Call the H5md write method."""
self.h5md_instance.call_method("write")
def flush(self):
"""Call the H5md flush method."""
self.h5md_instance.call_method("flush")
def close(self):
"""Close the H5md file."""
self.h5md_instance.call_method("close")
| gpl-3.0 | 8,727,909,045,723,869,000 | 34.275862 | 89 | 0.539264 | false | 4.353191 | false | false | false |
jrsix/py-dc | src/platform/org/db.py | 12 | 2046 | # -*- coding:utf-8 -*-
import MySQLdb
import conf
class newdb(object):
'''数据操作'''
def __init__(self,returnDict=False):
'返回游标为字典的数据访问对象'
self.conn=MySQLdb.connect(
host=conf.db_host,
user=conf.db_user,
passwd=conf.db_pwd,
db=conf.db_name,
port=conf.db_port,
charset='utf8'
)
if returnDict:
self.cursor=self.conn.cursor(cursorclass = MySQLdb.cursors.DictCursor)
else:
self.cursor=self.conn.cursor()
def commit(self):
self.conn.commit()
def close(self):
self.conn.close()
def fetchone(self,sql,args=None):
self.cursor.execute(sql,args)
result=self.cursor.fetchone()
self.cursor.close()
self.close()
return result
def fetchall(self,sql,args=None):
result=None
self.cursor.execute(sql,args)
result=self.cursor.fetchall()
self.cursor.close()
self.close()
return result
def query(self,sql,args=None):
self.cursor.execute(sql,args)
self.cursor.close()
_row=self.cursor.rowcount
self.commit()
self.cursor.close()
self.close()
return _row
#********* SQLAlchemy *****************#
'''
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
class SA:
'SQLAlchemy'
engine=create_engine('mysql://%s:%s@%s:%s/%s?charset=utf8'%(
conf.db_user,
conf.db_pwd,
conf.db_host,
conf.db_port,
conf.db_name
),echo=True)
Session=sessionmaker(bind=engine)
''' | apache-2.0 | -2,662,783,012,128,413,700 | 28.144928 | 82 | 0.458209 | false | 4.276596 | false | false | false |
obrbot/bot-clean | plugins/ignore.py | 1 | 3045 | import asyncio
from fnmatch import fnmatch
import logging
from obrbot import hook
from obrbot.plugin import HookType
plugin_info = {
"plugin_category": "core",
"command_category_name": "Administration"
}
logger = logging.getLogger("obrbot")
@asyncio.coroutine
@hook.sieve()
def ignore_sieve(event, hook_event):
""" blocks events from ignored channels/hosts
:type event: obrbot.event.Event
:type hook_event: obrbot.event.HookEvent
"""
bot = event.bot
# don't block event hooks
if hook_event.hook.type is HookType.event or hook_event.hook.type is HookType.irc_raw:
return event
# don't block server messages
if event.mask is None:
return event
# don't block an event that could be un-ignoring
if hook_event.hook.type is HookType.command and hook_event.hook.function_name == 'unignore':
return event
ignore_list = yield from event.async(bot.db.smembers, 'plugins:ignore:ignored')
mask = event.mask.lower()
for pattern in ignore_list:
pattern = pattern.decode()
if pattern.startswith('#'):
if fnmatch(event.chan_name, pattern):
logger.info("Ignoring {}: Skipping hook {}".format(event.chan_name, hook_event.hook.description))
return None
else:
if fnmatch(mask, pattern):
logger.info("Ignoring {}: Skipping hook {}".format(event.mask, hook_event.hook.description))
return None
return event
@asyncio.coroutine
@hook.command(autohelp=False, permissions=['ignored.view'])
def ignored(notice, async, db):
"""- lists all channels and users I'm ignoring"""
ignore_list = yield from async(db.smembers, 'plugins:ignore:ignored')
if ignore_list:
notice("Ignored users: {}".format(", ".join(b.decode() for b in ignore_list)))
else:
notice("No users are currently ignored.")
return
@asyncio.coroutine
@hook.command(permissions=['ignored.manage'])
def ignore(text, async, db):
"""<nick|user-mask> - adds <channel|nick> to my ignore list
:type db: redis.StrictRedis
"""
target = text.lower()
if ('!' not in target or '@' not in target) and not target.startswith('#'):
target = '{}!*@*'.format(target)
added = yield from async(db.sadd, 'plugins:ignore:ignored', target)
if added > 0:
return "{} has been ignored.".format(target)
else:
return "{} is already ignored.".format(target)
@asyncio.coroutine
@hook.command(permissions=['ignored.manage'])
def unignore(text, async, db):
"""<nick|user-mask> - removes <nick|user-mask> from my ignore list
:type db: redis.StrictRedis
"""
target = text.lower()
if ('!' not in target or '@' not in target) and not target.startswith('#'):
target = '{}!*@*'.format(target)
removed = yield from async(db.srem, 'plugins:ignore:ignored', target)
if removed > 0:
return "{} has been unignored.".format(target)
else:
return "{} was not ignored.".format(target)
| gpl-3.0 | 8,997,325,847,389,618,000 | 29.757576 | 113 | 0.644007 | false | 3.811014 | false | false | false |
SEL-Columbia/commcare-hq | corehq/apps/domainsync/management/commands/copy_domain.py | 1 | 10743 | from multiprocessing import Process, Queue
import sys
import os
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from casexml.apps.stock.models import StockTransaction, StockReport, DocDomainMapping
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.database import get_db, iter_docs
from corehq.apps.domainsync.config import DocumentTransform, save
from couchdbkit.client import Database
from optparse import make_option
from datetime import datetime
# doctypes we want to be careful not to copy, which must be explicitly
# specified with --include
DEFAULT_EXCLUDE_TYPES = [
'ReportNotification',
'WeeklyNotification',
'DailyNotification'
]
NUM_PROCESSES = 8
class Command(BaseCommand):
help = "Copies the contents of a domain to another database."
args = '<sourcedb> <domain>'
option_list = BaseCommand.option_list + (
make_option('--include',
action='store',
dest='doc_types',
default='',
help='Comma-separated list of Document Types to copy'),
make_option('--exclude',
action='store',
dest='doc_types_exclude',
default='',
help='Comma-separated list of Document Types to NOT copy.'),
make_option('--since',
action='store',
dest='since',
default='',
help='Only copy documents newer than this date. Format: yyyy-MM-dd. Only '),
make_option('--list-types',
action='store_true',
dest='list_types',
default=False,
help='Don\'t copy anything, just list all the available document types.'),
make_option('--simulate',
action='store_true',
dest='simulate',
default=False,
help='Don\'t copy anything, print what would be copied.'),
make_option('--id-file',
action='store',
dest='id_file',
default='',
help="File containing one document ID per line. Only docs with these ID's will be copied"),
make_option('--postgres-db',
action='store',
dest='postgres_db',
default='',
help="Name of postgres database to pull additional data from. This should map to a "
"key in settings.DATABASES. If not specified no additional postgres data will be "
"copied. This is currently used to pull CommTrack models."),
make_option('--postgres-password',
action='store',
dest='postgres_password',
default='',
help="Password for postgres database to pull additional data from. If not specified will "
"default to the value in settings.DATABASES")
)
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError('Usage is copy_domain %s' % self.args)
sourcedb = Database(args[0])
domain = args[1].strip()
simulate = options['simulate']
since = datetime.strptime(options['since'], '%Y-%m-%d').isoformat() if options['since'] else None
if options['list_types']:
self.list_types(sourcedb, domain, since)
sys.exit(0)
if simulate:
print "\nSimulated run, no data will be copied.\n"
if options['postgres_db'] and options['postgres_password']:
settings.DATABASES[options['postgres_db']]['PASSWORD'] = options['postgres_password']
self.targetdb = get_db()
domain_doc = Domain.get_by_name(domain)
if domain_doc is None:
self.copy_domain(sourcedb, domain)
if options['doc_types']:
doc_types = options['doc_types'].split(',')
for type in doc_types:
startkey = [x for x in [domain, type, since] if x is not None]
endkey = [x for x in [domain, type, {}] if x is not None]
self.copy_docs(sourcedb, domain, simulate, startkey, endkey, type=type, since=since,
postgres_db=options['postgres_db'])
elif options['id_file']:
path = options['id_file']
if not os.path.isfile(path):
print "Path '%s' does not exist or is not a file" % path
sys.exit(1)
with open(path) as input:
doc_ids = [line.rstrip('\n') for line in input]
if not doc_ids:
print "Path '%s' does not contain any document ID's" % path
sys.exit(1)
self.copy_docs(sourcedb, domain, simulate, doc_ids=doc_ids, postgres_db=options['postgres_db'])
else:
startkey = [domain]
endkey = [domain, {}]
exclude_types = DEFAULT_EXCLUDE_TYPES + options['doc_types_exclude'].split(',')
self.copy_docs(sourcedb, domain, simulate, startkey, endkey, exclude_types=exclude_types,
postgres_db=options['postgres_db'])
def list_types(self, sourcedb, domain, since):
doc_types = sourcedb.view("domain/docs", startkey=[domain],
endkey=[domain, {}], reduce=True, group=True, group_level=2)
doc_count = dict([(row['key'][1], row['value']) for row in doc_types])
if since:
for doc_type in sorted(doc_count.iterkeys()):
num_since = sourcedb.view("domain/docs", startkey=[domain, doc_type, since],
endkey=[domain, doc_type, {}], reduce=True).all()
num = num_since[0]['value'] if num_since else 0
print "{0:<30}- {1:<6} total {2}".format(doc_type, num, doc_count[doc_type])
else:
for doc_type in sorted(doc_count.iterkeys()):
print "{0:<30}- {1}".format(doc_type, doc_count[doc_type])
def copy_docs(self, sourcedb, domain, simulate, startkey=None, endkey=None, doc_ids=None,
type=None, since=None, exclude_types=None, postgres_db=None):
if not doc_ids:
doc_ids = [result["id"] for result in sourcedb.view("domain/docs", startkey=startkey,
endkey=endkey, reduce=False)]
total = len(doc_ids)
count = 0
msg = "Found %s matching documents in domain: %s" % (total, domain)
msg += " of type: %s" % (type) if type else ""
msg += " since: %s" % (since) if since else ""
print msg
err_log = self._get_err_log()
queue = Queue(150)
for i in range(NUM_PROCESSES):
Worker(queue, sourcedb, self.targetdb, exclude_types, total, simulate, err_log).start()
for doc in iter_docs(sourcedb, doc_ids, chunksize=100):
count += 1
queue.put((doc, count))
# shutdown workers
for i in range(NUM_PROCESSES):
queue.put(None)
err_log.close()
if os.stat(err_log.name)[6] == 0:
os.remove(err_log.name)
else:
print 'Failed document IDs written to %s' % err_log.name
if postgres_db:
self.copy_postgres_data(sourcedb, domain, postgres_db, doc_ids=doc_ids, simulate=simulate)
def copy_domain(self, sourcedb, domain):
print "Copying domain doc"
result = sourcedb.view(
"domain/domains",
key=domain,
reduce=False,
include_docs=True
).first()
if result and 'doc' in result:
domain_doc = Domain.wrap(result['doc'])
dt = DocumentTransform(domain_doc, sourcedb)
save(dt, self.targetdb)
else:
print "Domain doc not found for domain %s." % domain
def copy_postgres_data(self, sourcedb, domain, postgres_slug, simulate, doc_ids):
# can make this more configurable or less hard coded eventually
# also note that ordering here is important for foreign key dependencies
postgres_models = [
(StockReport, 'form_id'),
(StockTransaction, 'case_id'),
(DocDomainMapping, 'doc_id'),
# StockState objects are "derived" and get created by StockTransaction post_save signal.
# We may want to directly port these over in the future.
# (StockState, 'case_id'),
]
for model, doc_field in postgres_models:
query_set = model.objects.using(postgres_slug).filter(
**{'{}__in'.format(doc_field): doc_ids}
)
count = query_set.count()
print "Copying {} models ({})".format(model.__name__, count)
if not simulate:
for i, item in enumerate(query_set):
# this can cause primary key conflicts to overwrite local data I think. Oh well?
item.save(using='default')
print 'Synced {}/{} {}'.format(i, count, model.__name__)
def _get_err_log(self):
name = 'copy_domain.err.%s'
for i in range(1000): # arbitrarily large number
candidate = name % i
if not os.path.isfile(candidate):
return open(candidate, 'a', buffering=1)
class Worker(Process):
def __init__(self, queue, sourcedb, targetdb, exclude_types, total, simulate, err_log):
super(Worker, self).__init__()
self.queue = queue
self.sourcedb = sourcedb
self.targetdb = targetdb
self.exclude_types = exclude_types
self.total = total
self.simulate = simulate
self.err_log = err_log
def run(self):
for doc, count in iter(self.queue.get, None):
try:
if self.exclude_types and doc["doc_type"] in self.exclude_types:
print " SKIPPED (excluded type: %s). Synced %s/%s docs (%s: %s)" % \
(doc["doc_type"], count, self.total, doc["doc_type"], doc["_id"])
else:
if not self.simulate:
dt = DocumentTransform(doc, self.sourcedb)
save(dt, self.targetdb)
print " Synced %s/%s docs (%s: %s)" % (count, self.total, doc["doc_type"], doc["_id"])
except Exception, e:
self.err_log.write('%s\n' % doc["_id"])
print " Document %s failed! Error is: %s" % (doc["_id"], e)
| bsd-3-clause | -8,145,945,174,143,193,000 | 41.462451 | 111 | 0.544634 | false | 4.256339 | false | false | false |
softdev-projects/mta-smart-alerts | login.py | 1 | 3018 | """Module used for dealing with database stuff"""
from pymongo import Connection
"""Json Object Values"""
nameKey = 'name'
passwordKey = 'password'
authenticatedKey = 'authenticated'
phoneKey = 'phone'
def login(name, password, dbname="users", dbCollectionName="people"):
"""string name, string password, string dbname="users",
string collection="people"
sets authenticated to True for a given user"""
success = False
conn = Connection()
db = conn[dbname]
people = db[dbCollectionName]
if (isInDatabase(name, dbname, dbCollectionName)):
# should only loop through once
for user in people.find({nameKey: name}):
if (user[passwordKey] == password):
success = updateUser(name, True, dbname, dbCollectionName)
return success
def logout(name, dbname="users", dbCollectionName="people"):
"""sets authenticated to False for a given user"""
success = updateUser(name, False, dbname, dbCollectionName)
return success
def updateUser(name, authenticated, dbname="users", dbCollectionName="people"):
"""string name, Boolean authenticated, string dbname, string dbCollectioName
Logs the user in if authenticated is True
Logs the user out if authenticated is False
Returns True if successful or False if not successful"""
success = True
conn = Connection()
db = conn[dbname]
people = db[dbCollectionName]
if (isInDatabase(name, dbname, dbCollectionName)):
people.update({nameKey: name},
{"$set": {authenticatedKey: authenticated}},
False)
else:
success = False
return success
def addUser(name, password, phone, dbname="users", dbCollectionName="people"):
"""string name, string password, string phone,
string dbname, string dbCollectionName
adds user to the database and returns False is username already exists
automatically logs the user in after creating the account"""
success = True
conn = Connection()
db = conn[dbname]
if (not isInDatabase(name, dbname, dbCollectionName)):
# Jsonifies the User, authenticated True means the user is logged in
user = {nameKey: name,
phoneKey: phone,
passwordKey: password,
authenticatedKey: True}
people = db[dbCollectionName]
people.insert(user)
else:
success = False
return success
def isInDatabase(name, dbname="users", dbCollectionName="people"):
"""takes string name, string dbname, string dbCollectionName
checks if user is already in the database and returns False if username
already exists"""
conn = Connection()
db = conn[dbname]
# returns collection of users
people = db[dbCollectionName]
# there should be at most one instance of the user in the database
success = (people.find({nameKey: name}).count() == 1)
return success
def main():
pass
if __name__ == '__main__':
main()
| isc | -7,034,176,726,047,326,000 | 26.944444 | 80 | 0.662691 | false | 4.471111 | false | false | false |
pachyderm/pfs | examples/ml/iris/python/iris-train-python-lda/pytrain.py | 2 | 1093 | import pandas as pd
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.externals import joblib
import argparse
import os
# command line arguments
parser = argparse.ArgumentParser(description='Train a model for iris classification.')
parser.add_argument('indir', type=str, help='Input directory containing the training set')
parser.add_argument('outdir', type=str, help='Output directory for the trained model')
args = parser.parse_args()
# training set column names
cols = [
"Sepal_Length",
"Sepal_Width",
"Petal_Length",
"Petal_Width",
"Species"
]
features = [
"Sepal_Length",
"Sepal_Width",
"Petal_Length",
"Petal_Width"
]
# import the iris training set
irisDF = pd.read_csv(os.path.join(args.indir, "iris.csv"), names=cols)
# fit the model
lda = LinearDiscriminantAnalysis().fit(irisDF[features], irisDF["Species"])
# output a text description of the model
f = open(os.path.join(args.outdir, 'model.txt'), 'w')
f.write(str(lda))
f.close()
# persist the model
joblib.dump(lda, os.path.join(args.outdir, 'model.pkl'))
| apache-2.0 | -8,617,461,932,297,405,000 | 25.658537 | 90 | 0.722781 | false | 3.195906 | false | false | false |
bmazin/ARCONS-pipeline | cosmic/CosmicRun.py | 1 | 5451 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from util.ObsFile import ObsFile
from util.FileName import FileName
from util import utils
from util import hgPlot
from cosmic.Cosmic import Cosmic
import tables
from scipy.optimize import curve_fit
from hotpix import hotPixels
import pickle
from interval import interval, inf, imath
from cosmic import tsBinner
import os
import sys
import logging
class CosmicRun:
def __init__(self, path):
print "begin path=",path
os.chdir(path)
file = open("settings.par", 'r')
self.s = {}
for line in file:
temp = line.split("=")
if len(temp) > 1:
self.s[temp[0].strip()] = temp[1].strip()
print temp[0].strip(), '=', temp[1].strip()
file.close()
def findv1(self):
populationMax=2000
ySum = np.zeros(populationMax)
frameSum = 'none'
seq5 = self.s['seq5'].split()
for seq in seq5:
print "seq=",seq
outfileName = "cosmicTimeList-"+seq+".pkl"
if not os.path.exists(outfileName):
fn = FileName(self.s['run'],
self.s['sundownDate'],
self.s['obsDate']+"-"+str(seq))
cosmic = Cosmic(fn,
beginTime=self.s['beginTime'],
endTime=self.s['endTime'],
loggingLevel = logging.INFO)
fc = cosmic.findCosmics(stride=int(self.s['stride']),
threshold=int(self.s['threshold']),
populationMax=populationMax,
nSigma=float(self.s['nSigma']))
outfile = open(outfileName, "wb")
pickle.dump(fc['cosmicTimeList'],outfile)
pickle.dump(fc['binContents'],outfile)
outfile.close()
cfn = "cosmicMask-%s.h5"%seq
ObsFile.writeCosmicIntervalToFile(fc['interval'],1.0e6, cfn,
self.s['beginTime'],
self.s['endTime'],
int(self.s['stride']),
int(self.s['threshold']),
float(self.s['nSigma']),
populationMax)
del cosmic
def makemovie1(self):
run = self.s['run']
sundownDate = self.s['sundownDate']
obsDate = self.s['obsDate']
stride = int(self.s['stride'])
seq5 = self.s['seq5'].split()
for seq in seq5:
inFile = open("cosmicTimeList-%s.pkl"%(seq),"rb")
cosmicTimeList = pickle.load(inFile)
binContents = pickle.load(inFile)
cfn = "cosmicMask-%s.h5"%seq
intervals = ObsFile.readCosmicIntervalFromFile(cfn)
for interval in intervals:
print "interval=",interval
fn = FileName(run, sundownDate,obsDate+"-"+seq)
obsFile = ObsFile(fn.obs())
obsFile.loadTimeAdjustmentFile(fn.timeAdjustments())
i0=interval[0]
i1=interval[1]
intervalTime = i1-i0
dt = intervalTime/2
beginTime = max(0,i0-0.000200)
endTime = beginTime + 0.001
integrationTime = endTime-beginTime
nBins = int(np.round(obsFile.ticksPerSec*(endTime-beginTime)+1))
timeHgValues = np.zeros(nBins, dtype=np.int64)
ymax = sys.float_info.max/100.0
for iRow in range(obsFile.nRow):
for iCol in range(obsFile.nCol):
gtpl = obsFile.getTimedPacketList(iRow,iCol,
beginTime,integrationTime)
ts = (gtpl['timestamps'] - beginTime)*obsFile.ticksPerSec
ts64 = np.round(ts).astype(np.uint64)
tsBinner.tsBinner(ts64, timeHgValues)
plt.clf()
plt.plot(timeHgValues, label="data")
x0 = (i0-beginTime)*obsFile.ticksPerSec
x1 = (i1-beginTime)*obsFile.ticksPerSec
plt.fill_between((x0,x1),(0,0), (ymax,ymax), alpha=0.2, color='red')
plt.yscale("symlog",linthreshy=0.9)
plt.xlim(0,1000)
plt.ylim(-0.1,300)
tick0 = int(np.round(i0*obsFile.ticksPerSec))
plotfn = "cp-%05d-%s-%s-%s-%09d"%(timeHgValues.sum(),run,obsDate,seq,tick0)
plt.title(plotfn)
plt.legend()
plt.savefig(plotfn+".png")
plt.xlabel("nSigma=%d stride=%d threshold=%d"%(int(self.s['nSigma']),int(self.s['stride']),int(self.s['threshold'])))
print "plotfn=",plotfn
os.system("convert -delay 0 `ls -r cp*png` cp.gif")
if __name__ == '__main__':
if len(sys.argv) >1:
path = sys.argv[1]
else:
path = "."
cosmicRun = CosmicRun(path)
cosmicRun.findv1()
print "now call makemovie1"
cosmicRun.makemovie1()
print "glorious success"
| gpl-2.0 | -4,076,501,517,738,674,700 | 39.377778 | 133 | 0.490185 | false | 3.95 | false | false | false |
vipul-tm/DAG | dags-ttpl/subdags/service.py | 1 | 6081 | from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime, timedelta
from airflow.operators import PythonOperator
from airflow.hooks import RedisHook
from airflow.models import Variable
import logging
import itertools
import traceback
import socket
from etl_tasks_functions import get_time
from etl_tasks_functions import subtract_time
import time
default_args = {
'owner': 'wireless',
'depends_on_past': False,
'start_date': datetime.now() - timedelta(minutes=2),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=1),
'provide_context': True,
'catchup': False,
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
OKGREEN = '\033[92m'
NC='\033[0m'
redis_hook_4 = RedisHook(redis_conn_id="redis_hook_4")
def service_etl(parent_dag_name, child_dag_name, start_date, schedule_interval,celery_queue):
config = eval(Variable.get('system_config'))
dag_subdag = DAG(
dag_id="%s.%s" % (parent_dag_name, child_dag_name),
schedule_interval=schedule_interval,
start_date=start_date,
)
#TODO: Create hook for using socket with Pool
def get_from_socket(site_name,query,socket_ip,socket_port):
"""
Function_name : get_from_socket (collect the query data from the socket)
Args: site_name (poller on which monitoring data is to be collected)
Kwargs: query (query for which data to be collectes from nagios.)
Return : None
raise
Exception: SyntaxError,socket error
"""
#socket_path = "/omd/sites/%s/tmp/run/live" % site_name
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
machine = site_name[:-8]
s.connect((socket_ip, socket_port))
#s.connect(socket_path)
s.send(query)
s.shutdown(socket.SHUT_WR)
output = ''
wait_string= ''
while True:
try:
out = s.recv(100000000)
except socket.timeout,e:
err=e.args[0]
print 'socket timeout ..Exiting'
if err == 'timed out':
sys.exit(1)
if not len(out):
break;
output += out
return output
def extract_and_distribute(*args,**kwargs):
st = get_time()
try:
service_query = Variable.get('service_query') #to get LQL to extract service
device_slot = Variable.get("device_slot_service") #the number of devices to be made into 1 slot
site_ip = kwargs.get('params').get("ip")
site_port = kwargs.get('params').get("port")
except ValueError:
logging.info("Unable to fetch Service Query Failing Task")
return 1
task_site = kwargs.get('task_instance_key_str').split('_')[4:7]
site_name = "_".join(task_site)
start_time = float(Variable.get("data_service_extracted_till_%s"%site_name))
end_time = time.time()
service_query = "GET services\nColumns: host_name host_address service_description service_state "+\
"last_check service_last_state_change host_state service_perf_data\nFilter: service_description ~ _invent\n"+\
"Filter: service_description ~ _status\n"+\
"Filter: service_description ~ Check_MK\n"+\
"Filter: service_description ~ PING\n"+\
"Filter: service_description ~ .*_kpi\n"+\
"Filter: service_description ~ wimax_ss_port_params\n"+\
"Filter: service_description ~ wimax_bs_ss_params\n"+\
"Filter: service_description ~ wimax_aggregate_bs_params\n"+\
"Filter: service_description ~ wimax_bs_ss_vlantag\n"+\
"Filter: service_description ~ wimax_topology\n"+\
"Filter: service_description ~ cambium_topology_discover\n"+\
"Filter: service_description ~ mrotek_port_values\n"+\
"Filter: service_description ~ rici_port_values\n"+\
"Filter: service_description ~ rad5k_topology_discover\n"+\
"Or: 14\nNegate:\n"+\
"Filter: last_check >= %s\n" % start_time+\
"Filter: last_check < %s\n" % end_time+\
"OutputFormat: python\n"
try:
start_time = get_time()
service_data = eval(get_from_socket(site_name, service_query,site_ip,site_port))
logging.info("Fetch Time %s"%subtract_time(start_time))
Variable.set("data_service_extracted_till_%s"%site_name,end_time)
#for x in service_data:
# if x[1] == '10.175.161.2':
# print x
except Exception:
logging.error(OKGREEN+"Unable to fetch the data from Socket")
logging.error('SITE:'+str(site_name)+"\n PORT : "+str(site_port )+ "\n IP: " +str(site_ip)+NC)
service_data = []
traceback.print_exc()
if len(service_data) > 0:
logging.info("The length of Data recieved " + str(len(service_data)))
group_iter = [iter(service_data)]*int(device_slot)
device_slot_data = list(([e for e in t if e !=None] for t in itertools.izip_longest(*group_iter)))
i=1;
logging.info("Service Slot created in redis -> " + str(len(device_slot_data)))
for slot in device_slot_data:
redis_hook_4.rpush("sv_"+site_name+"_slot_"+str(i),slot)
logging.info("Pushing %s"%("sv_"+site_name+"_slot_"+str(i)))
i+=1
Variable.set("sv_%s_slots"%(site_name),str(len(device_slot_data)))
logging.info("Total Time %s"%subtract_time(st))
else:
logging.info("Unable to extract data for time %s to %s "%(start_time,end_time))
for machine in config:
for site in machine.get('sites'):
PythonOperator(
task_id="Service_extract_%s"%(site.get('name')),
provide_context=True,
python_callable=extract_and_distribute,
params={"ip":machine.get('ip'),"port":site.get('port')},
dag=dag_subdag,
queue = celery_queue
)
return dag_subdag
| bsd-3-clause | -1,639,601,173,176,181,000 | 36.537037 | 138 | 0.621608 | false | 3.269355 | false | false | false |
FrankGrimm/text-insights | web/ti/management/commands/fbutils.py | 1 | 12885 | from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import *
from ti import models
import facebook
import logging
from dateutil import parser
import urllib
import urlparse
import cgi
import subprocess
import warnings
import time
import random
import string
import datetime
# hide facebook deprecation warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
# global logging setup
logging.basicConfig(level=logging.INFO)
class PageCrawler(object):
def __init__(self, graph):
self._log = logging.getLogger('crawler')
self._log.info("Initializing")
self.maxpages = 20
self.pagecount = 0
self.graph = graph
self.posts = []
def retrievePageContent(self, pageid, anon):
self.abort = False
self.anon = anon
graph = self.graph
log = self._log
pageinfo = graph.get_object(pageid)
log.info("Processing page \"%s\" (id %s, category %s, likes: %s)" % (pageinfo["username"], pageinfo["id"], pageinfo["category"], pageinfo["likes"]))
try:
pagefeed = graph.get_object(pageid + "/feed")
self.processFeed(pagefeed)
except Exception, e:
self._log.warn(e)
raise e
log.info("Pages processed: %s" % self.pagecount)
log.info("Posts: %s" % len(self.posts))
texts = []
types = []
ccount = 0
clikes = 0
for post in self.posts:
ccount = ccount + len(post.comments)
clikes = clikes + post.likecount
for comment in post.comments:
texts.append(comment.content)
clikes = clikes + comment.likecount
if not post.type in types:
types.append(post.type)
log.info("Comments: %s" % ccount)
log.info("Post types: %s" % ','.join(types))
textcharcount = 0
wordcount = 0
to_be_removed = ".,:!"
for text in texts:
textcharcount = textcharcount + len(text)
s = text
for c in to_be_removed:
s = s.replace(c, '')
wordcount = wordcount + len(s.split())
log.info("Average comment length: %s" % (float(textcharcount) / float(len(texts))))
log.info("Average words per comment: %s" % (float(wordcount) / float(len(texts))))
log.info("Unique commenters: %s" % len(anon.usedaliases))
log.info("Trying to populate db")
# page owner
p_owner, created = models.User.objects.get_or_create(id=long(pageinfo["id"]), defaults={'fullname':pageinfo["name"], 'alias':("page-%s" % pageinfo["username"])} )
p_owner.save()
if created:
log.info("Created user entry for the page. %s" % pageinfo["id"])
else:
log.info("Using existing page user entry. %s" % pageinfo["id"])
# page
p = None
try:
p = models.Page.objects.get(fb_page_id=pageinfo["id"])
log.info("Page entry already exists.")
except ObjectDoesNotExist:
log.info("New page entry required. Creating.")
p = models.Page.objects.create(fb_page_id=pageinfo["id"], fb_page_name=pageinfo["name"], last_updated=datetime.datetime.today(), owner=p_owner)
p.save()
# users
for user_id in self.anon.userlist:
userinfo = self.anon.userlist[user_id]
userobj, created = models.User.objects.get_or_create(id=long(user_id), defaults={'fullname': userinfo["name"], 'alias':userinfo["alias"]})
if created:
userobj.save()
log.info("Created new user #%s (alias: %s)" % (userobj.id, userobj.alias))
# posts
for post in self.posts:
postts = parser.parse(post.timestamp).replace(tzinfo=None)
postuser = models.User.objects.get(id=long(post.user["id"]))
postobj = None
created = False
try:
postobj, created = models.Post.objects.get_or_create(fb_post_id=post.id, defaults={'posttype': post.type, 'text': post.content, 'createtime': postts, 'parent': None, 'page': p, 'createuser': postuser, 'likes': post.likecount})
except Exception, e: # ignore UTF-(>8) postings
log.warn("Failed to import post")
log.warn(e)
if created:
postobj.save()
log.info("Post %s saved to database" % post.id)
else:
log.info("Post %s already stored" % post.id)
for comment in post.comments:
commentts = parser.parse(comment.timestamp).replace(tzinfo=None)
commentuser = models.User.objects.get(id=long(comment.user["id"]))
commentobj = None
created = False
try:
commentobj, created = models.Post.objects.get_or_create(fb_post_id=comment.id, defaults={'posttype': comment.type, 'text': comment.content, 'createtime': commentts, 'parent': postobj, 'page': p, 'createuser': commentuser, 'likes': comment.likecount})
except Exception, e: # ignore UTF-(>8) postings
log.warn("Failed to import comment")
log.warn(e)
if created:
commentobj.save()
log.info("Comment %s saved to database" % comment.id)
else:
log.info("Comment %s already stored" % comment.id)
def processComments(self, pagedata, targetlist, postdata, isPage=True):
graph = self.graph
log = self._log
if isPage:
if len(postdata['comments']) == 0:
log.info("Post %s does not have comments. Aborting." % post['id'])
return
# add comments that are already contained in the page feed
self.addData(postdata["comments"]["data"], targetlist)
log.info("Added comments from page feed (length: %s)" % len(targetlist))
if len(postdata['comments']) == 0:
return
else:
if 'data' in pagedata:
self.addData(pagedata['data'], targetlist)
log.info("Added data for comment page (new length: %s)" % len(targetlist))
log.info("Post %s contains %s comments." % (postdata['id'], len(postdata['comments'])))
if isPage:
parent = postdata['comments']
else:
parent = pagedata
if 'paging' in parent and 'next' in parent['paging']:
nextpage = parent['paging']['next']
nextpage, nextpage_args = self.getGraphRequest(nextpage)
log.info('Found comment paging link: %s' % nextpage)
commentfeed = graph.request(nextpage, nextpage_args)
time.sleep(1)
self.processComments(commentfeed, targetlist, postdata, isPage=False)
def getGraphRequest(self, nextpage):
if nextpage.startswith("https://graph.facebook.com/"):
print nextpage
nextpage = urlparse.urlparse(nextpage)
qs = cgi.parse_qs(nextpage.query)
print qs
#del qs['access_token']
nextpage = nextpage.path #+ "?" + urllib.urlencode(qs, True)
nextpage = nextpage[1:]
nextpage_args = qs
return nextpage, nextpage_args
def processFeed(self, pagefeed):
graph = self.graph
log = self._log
self.maxpages = self.maxpages - 1
if self.maxpages <= 0:
self.abort = True
log.info("Not fetching more pages. Maximum exceeded.")
self.pagecount = self.pagecount + 1
try:
nextpage = pagefeed["paging"]["next"]
nextpage, nextpage_args = self.getGraphRequest(nextpage)
except KeyError:
# no next page
log.info("Hit last page. Aborting.")
self.abort = True
pagedata = pagefeed["data"]
lpd = len(pagedata)
log.info("Processing %s feed items" % lpd)
self.addData(pagedata, self.posts)
if lpd == 0:
log.info("Hit empty data response. Aborting.")
self.abort = True
if not self.abort:
log.info("Requesting next page of data <%s>" % nextpage)
pagefeed = graph.request(nextpage, nextpage_args)
time.sleep(1)
self.processFeed(pagefeed)
def addData(self, data, target):
for postdata in data:
id = postdata["id"]
try:
type = postdata["type"]
except:
type = "comment"
user = dict(id=postdata["from"]["id"], name=postdata["from"]["name"])
self.anon.getUserId(user) # add to userlist
content = ""
try:
content = postdata["message"]
except:
pass
try:
content = postdata["story"]
except:
pass
timestamp = postdata["created_time"]
likecount = 0
try:
likecount = len(postdata["likes"]["data"])
except:
pass
p = Post(id, type, user, content, timestamp, likecount, self.anon)
comments = None
try:
comments = postdata["comments"]["data"]
except:
pass
if comments is not None:
self.processComments(data, p.comments, postdata)
for comment in p.comments:
comment.post = p
target.append(p)
def gatherUserData(self, user):
log = self._log
graph = self.graph
if user.gender is None or user.gender != '':
return # already gathered
try:
userinfo = graph.get_object("/" + str(user.id))
if 'gender' in userinfo:
user.gender = userinfo['gender']
else:
user.gender = 'unknown'
if 'locale' in userinfo:
user.locale = userinfo['locale']
except ValueError:
print "Invalid data."
user.save()
def retrievePageUsers(self, pageid):
self.abort = False
graph = self.graph
log = self._log
page = models.Page.objects.get(id=pageid)
pageinfo = graph.get_object("/" + page.fb_page_id)
log.info("Processing page \"%s\" (id %s, category %s, likes: %s)" % (pageinfo["username"], pageinfo["id"], pageinfo["category"], pageinfo["likes"]))
pageuser_ids = models.Post.objects.filter(page__exact=page).values('createuser').distinct()
pageusers = models.User.objects.filter(id__in=pageuser_ids)
idx = 0
pageusercount = pageusers.count()
for user in pageusers:
idx = idx + 1
print "[%s/%s] User id %s" % (idx, pageusercount, user.id)
self.gatherUserData(user)
class AnonymizeUsers(object):
def __init__(self):
self.userlist = dict()
self.usedaliases = []
def getUserById(self, user_id):
if user_id in self.userlist:
return self.userlist[user_id]
def getUserByName(self, user_name):
for user_key in self.userlist:
user = self.userlist[user_key]
if user["name"] == user_name:
return user
return None
def getUserId(self, user):
if not user["id"] in self.userlist:
self.userlist[user["id"]] = dict(id = user["id"], name=user["name"], alias=None)
newalias = None
while newalias is None or newalias in self.usedaliases:
newalias = self.generateAlias()
self.userlist[user["id"]]["alias"] = newalias
self.usedaliases.append(newalias)
return self.userlist[user["id"]]["alias"]
def generateAlias(self):
#http://stackoverflow.com/questions/2257441/python-random-string-generation-with-upper-case-letters-and-digits
newalias = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(7))
return newalias
class Post(object):
def __init__(self, id, type, user, content, timestamp, likecount, anoninstance):
self.id = id
self.user = user
self.type = type
self.content = content
self.timestamp = timestamp
self.likecount = likecount
self.comments = []
self.post = None
self.anon = anoninstance
def __str__(self):
return "Post[id=%s;type=%s;user=%s(%s):%s:;ts=%s;likes=%s;comments=%s]:\r\n%s" % (self.id, self.type, self.user["name"], self.user["id"], self.anon.getUserId(self.user), self.timestamp, self.likecount, len(self.comments), self.content)
| mit | -4,785,154,007,406,689,000 | 35.19382 | 270 | 0.557548 | false | 4.017774 | false | false | false |
aemerick/galaxy_analysis | analysis/spatial_acf.py | 1 | 2265 | import yt
import numpy as np
from galaxy_analysis import Galaxy
from galaxy_analysis.utilities import utilities
from astroML.time_series import ACF_EK
from astroML.time_series import ACF_scargle
from matplotlib import rc
fsize = 17
rc('text', usetex=False)
rc('font', size=fsize)#, ftype=42)
line_width = 3
point_size = 30
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def simple_plot(x,y,name):
fig, ax = plt.subplots()
ax.plot(x,y, lw = 3 , color = 'black')
plt.minorticks_on()
plt.tight_layout()
fig.set_size_inches(8,8)
plt.savefig(name)
plt.close()
return
#
# testing script for now
#
gal = Galaxy('DD0126')
# select a random SF region
n = gal.df['number_density']
T = gal.df['temperature']
select = (n >= 200.0) * (T < 200.0)
x,y,z = gal.df['x'][select], gal.df['y'][select], gal.df['z'][select]
pos = np.array([x[0],y[0],z[0]])
rmax = 50.0
dr = 5.0
sphere = gal.ds.sphere(pos, (rmax, 'pc')) # do small for now
x = sphere['spherical_radius'].convert_to_units('pc').value
y = sphere['Fe_Fraction'].value
p = yt.ProfilePlot(sphere, "radius", ["Fe_Fraction",'Fe_over_H','O_over_Fe'],
weight_field = 'cell_volume', accumulation = False)
p.set_unit('radius','pc')
p.save()
bins = np.arange(0.0, rmax+dr, dr)
#acf, bins = ACF_scargle(x, y, dy = 0.0000001, n_omega = 2**12, omega_max = np.pi/5.0) #, bins = bins)
acf, err, bins = ACF_EK(x,y,dy=1.0E-8, bins = bins)
print(acf)
print(bins)
simple_plot(0.5*(bins[1:]+bins[:-1]), acf, 'Fe_Fraction_acf.png')
print('----------------------------------------------------------')
print('----------------------------------------------------------')
x = sphere['spherical_radius'].convert_to_units('pc').value
y = sphere['Fe_over_H'].value
bins = np.arange(0.0, rmax+dr, dr)
#acf, bins = ACF_scargle(x, y, dy = 0.0001, n_omega = 2**12, omega_max = np.pi/5.0) #, bins = bins)
acf, err, bins = ACF_EK(x, y, dy = 0.00001, bins = bins)
simple_plot(0.5*(bins[1:]+bins[:-1]), acf, 'Fe_over_H_acf.png')
print(acf)
print(bins)
print("-------------------------------------------------------------------")
print("-------------------------------------------------------------------")
| mit | -1,958,499,865,761,079,600 | 24.738636 | 102 | 0.550552 | false | 2.775735 | false | false | false |
t8m8/SwitchSettings | switch_settings_core.py | 1 | 2779 | import sublime, sublime_plugin
class SettingsWrapper():
SETTINGS_FILE_NAME = 'SwitchSettings.sublime-settings'
SS_CURRENT_SETTINGS_NAME = 'current_settings_name'
SS_SETTINGS_NAMES = 'settings_names'
SS_SETTINGS_CONTENTS = 'settings_contents'
def __init__(self):
self._buf = None
self.settings = sublime.load_settings(SettingsWrapper.SETTINGS_FILE_NAME)
self.settings.add_on_change(SettingsWrapper.SS_CURRENT_SETTINGS_NAME, self.on_change)
def __save(self):
import os
if self._buf is None: return False
preferences = os.path.join(sublime.packages_path(), 'User', 'Preferences.sublime-settings')
if not os.path.exists(preferences):
with open(preferences, mode='w') as f:
pass
with open(preferences, mode='r') as f:
preferences_settings = sublime.decode_value(f.read())
contents = self.get_settings_contents()
contents[self._buf] = preferences_settings
self.settings.set(SettingsWrapper.SS_SETTINGS_CONTENTS, contents)
self._buf = None
return True
def __overwrite(self):
import os
cur_name = self.get_current_settings()
contents = self.get_settings_contents()
current_content = contents[cur_name]
preferences = os.path.join(sublime.packages_path(), 'User', 'Preferences.sublime-settings')
with open(preferences, mode='w') as f:
f.write(sublime.encode_value(current_content, True))
return True
def on_change(self):
from . import switch_settings_util as ss_util
if self.__save() and self.__overwrite():
self.save_ss_settings()
ss_util.reboot()
def save_ss_settings(self):
sublime.save_settings(SettingsWrapper.SETTINGS_FILE_NAME)
def set_buffer(self, buf):
self._buf = buf
def get_current_settings(self):
return self.settings.get(SettingsWrapper.SS_CURRENT_SETTINGS_NAME, 'Switch_Settings_Default')
def set_current_settings(self, name):
self.settings.set(SettingsWrapper.SS_CURRENT_SETTINGS_NAME, name)
def get_settings(self):
return self.settings.get(SettingsWrapper.SS_SETTINGS_NAMES, [])
def add_settings(self, name):
names = self.get_settings()
names.append(name)
self.settings.set(SettingsWrapper.SS_SETTINGS_NAMES, names);
def remove_settings(self, name):
names = self.get_settings()
names.remove(name)
self.settings.set(SettingsWrapper.SS_SETTINGS_NAMES, names)
def get_settings_contents(self):
return self.settings.get(SettingsWrapper.SS_SETTINGS_CONTENTS, {})
def add_settings_content(self, name, item):
contents = self.get_settings_contents()
contents[name] = item
self.settings.set(SettingsWrapper.SS_SETTINGS_CONTENTS, contents)
def pop_settings_content(self, name):
contents = self.get_settings_contents()
tmp = contents.pop(name)
self.settings.set(SettingsWrapper.SS_SETTINGS_CONTENTS, contents)
return tmp
| mit | 2,663,929,249,421,894,700 | 25.730769 | 95 | 0.731918 | false | 3.201613 | false | false | false |
Casmo888/NTHUOJ_web | install.py | 1 | 1598 | '''
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import getpass
# Setting nthuoj.ini
host = raw_input("Mysql host: ")
db = raw_input("Mysql database: ")
user = raw_input("Please input your mysql user: ")
pwd = getpass.getpass()
# Re-write nthuoj.ini file
iniFile = open("nthuoj.ini", "w")
iniFile.write("[client]\n")
iniFile.write("host = %s\n" % host)
iniFile.write("database = %s\n" % db)
iniFile.write("user = %s\n" % user)
iniFile.write("password = %s\n" % pwd)
iniFile.write("default-character-set = utf8\n")
iniFile.close()
# Install needed library
| mit | -5,447,285,099,431,688,000 | 34.511111 | 78 | 0.759074 | false | 3.733645 | false | false | false |
optimuspaul/pyorient | pyorient/messages/cluster.py | 2 | 9843 | # -*- coding: utf-8 -*-
__author__ = 'Ostico <[email protected]>'
from pyorient.exceptions import PyOrientBadMethodCallException
from ..constants import CLUSTER_TYPE_PHYSICAL, DATA_CLUSTER_ADD_OP, \
DATA_CLUSTER_COUNT_OP, FIELD_BOOLEAN, FIELD_BYTE, FIELD_LONG, FIELD_SHORT, \
FIELD_STRING, DATA_CLUSTER_DATA_RANGE_OP, DATA_CLUSTER_DROP_OP, CLUSTER_TYPES
from ..utils import need_db_opened
from .base import BaseMessage
#
# DATACLUSTER ADD
#
# Add a new data cluster.
#
# Request: (name:string)(cluster-id:short - since 1.6 snapshot)
# Response: (new-cluster:short)
#
# Where: type is one of "PHYSICAL" or "MEMORY". If cluster-id is -1 (recommended value)
# new cluster id will be generated.
#
class DataClusterAddMessage(BaseMessage):
def __init__(self, _orient_socket ):
super( DataClusterAddMessage, self ).__init__(_orient_socket)
self._cluster_name = ''
self._cluster_type = CLUSTER_TYPE_PHYSICAL
self._cluster_location = 'default'
self._datasegment_name = 'default'
self._new_cluster_id = -1
# order matters
self._append( ( FIELD_BYTE, DATA_CLUSTER_ADD_OP ) )
@need_db_opened
def prepare(self, params=None):
try:
# mandatory if not passed by method
self._cluster_name = params[0]
# mandatory if not passed by method
self.set_cluster_type( params[1] )
self._cluster_location = params[2]
self._datasegment_name = params[3]
except( IndexError, TypeError ):
# Use default for non existent indexes
pass
except ValueError:
raise PyOrientBadMethodCallException(
params[1] + ' is not a valid data cluster type', []
)
if self.get_protocol() < 24:
self._append( ( FIELD_STRING, self._cluster_type ) )
self._append( ( FIELD_STRING, self._cluster_name ) )
self._append( ( FIELD_STRING, self._cluster_location ) )
self._append( ( FIELD_STRING, self._datasegment_name ) )
else:
self._append( ( FIELD_STRING, self._cluster_name ) )
if self.get_protocol() >= 18:
self._append( ( FIELD_SHORT, self._new_cluster_id ) )
return super( DataClusterAddMessage, self ).prepare()
def fetch_response(self):
self._append( FIELD_SHORT )
return super( DataClusterAddMessage, self ).fetch_response()[0]
def set_cluster_name(self, _cluster_name):
self._cluster_name = _cluster_name
return self
def set_cluster_type(self, _cluster_type):
if _cluster_type in CLUSTER_TYPES:
# user choice storage if present
self._cluster_type = _cluster_type
else:
raise PyOrientBadMethodCallException(
_cluster_type + ' is not a valid cluster type', []
)
return self
def set_cluster_location(self, _cluster_location):
self._cluster_location = _cluster_location
return self
def set_datasegment_name(self, _datasegment_name):
self._datasegment_name = _datasegment_name
return self
def set_cluster_id(self, _new_cluster_id):
self._new_cluster_id = _new_cluster_id
return self
#
# DATA CLUSTER COUNT
#
# Returns the number of records in one or more clusters.
#
# Request: (cluster-count:short)(cluster-number:short)*(count-tombstones:byte)
# Response: (records-in-clusters:long)
# Where:
#
# cluster-count the number of requested clusters
# cluster-number the cluster id of each single cluster
# count-tombstones the flag which indicates whether deleted records
# should be taken in account. It is applicable for autosharded storage only,
# otherwise it is ignored.
# records-in-clusters is the total number of records found in the requested clusters
#
class DataClusterCountMessage(BaseMessage):
def __init__(self, _orient_socket ):
super( DataClusterCountMessage, self ).__init__(_orient_socket)
self._cluster_ids = []
self._count_tombstones = 0
# order matters
self._append( ( FIELD_BYTE, DATA_CLUSTER_COUNT_OP ) )
@need_db_opened
def prepare(self, params=None):
if isinstance( params, tuple ) or isinstance( params, list ):
try:
# mandatory if not passed by method
# raise Exception if None
if isinstance( params[0], tuple ) or isinstance( params[0], list ):
self._cluster_ids = params[0]
else:
raise PyOrientBadMethodCallException(
"Cluster IDs param must be an instance of Tuple or List.", []
)
self._count_tombstones = params[1]
except( IndexError, TypeError ):
# Use default for non existent indexes
pass
self._append( ( FIELD_SHORT, len(self._cluster_ids) ) )
for x in self._cluster_ids:
self._append( ( FIELD_SHORT, x ) )
self._append( ( FIELD_BOOLEAN, self._count_tombstones ) )
return super( DataClusterCountMessage, self ).prepare()
def fetch_response(self):
self._append( FIELD_LONG )
return super( DataClusterCountMessage, self ).fetch_response()[0]
def set_cluster_ids(self, _cluster_ids):
self._cluster_ids = _cluster_ids
return self
def set_count_tombstones(self, _count_tombstones):
self._count_tombstones = _count_tombstones
return self
#
# DATA CLUSTER DATA RANGE
#
# Returns the range of record ids for a cluster.
#
# Request: (cluster-number:short)
# Response: (begin:long)(end:long)
#
class DataClusterDataRangeMessage(BaseMessage):
def __init__(self, _orient_socket ):
super( DataClusterDataRangeMessage, self ).__init__(_orient_socket)
self._cluster_id = 0
self._count_tombstones = 0
# order matters
self._append( ( FIELD_BYTE, DATA_CLUSTER_DATA_RANGE_OP ) )
@need_db_opened
def prepare(self, params=None):
if isinstance( params, int ):
# mandatory if not passed by method
self._cluster_id = params
self._append( ( FIELD_SHORT, self._cluster_id ) )
return super( DataClusterDataRangeMessage, self ).prepare()
def fetch_response(self):
self._append( FIELD_LONG )
self._append( FIELD_LONG )
return super( DataClusterDataRangeMessage, self ).fetch_response()
def set_cluster_id(self, _cluster_id):
self._cluster_id = _cluster_id
return self
#
# DATA CLUSTER DROP
#
# Remove a cluster.
#
# Request: (cluster-number:short)
# Response: (delete-on-clientside:byte)
#
class DataClusterDropMessage(BaseMessage):
def __init__(self, _orient_socket ):
super( DataClusterDropMessage, self ).__init__(_orient_socket)
self._cluster_id = 0
self._count_tombstones = 0
# order matters
self._append( ( FIELD_BYTE, DATA_CLUSTER_DROP_OP ) )
@need_db_opened
def prepare(self, params=None):
if isinstance( params, int ):
# mandatory if not passed by method
self._cluster_id = params
self._append( ( FIELD_SHORT, self._cluster_id ) )
return super( DataClusterDropMessage, self ).prepare()
def fetch_response(self):
self._append( FIELD_BOOLEAN )
return super( DataClusterDropMessage, self ).fetch_response()[0]
def set_cluster_id(self, _cluster_id):
self._cluster_id = _cluster_id
return self
class Information(object):
def __iter__(self):
return self
def next(self): # Python 3: def __next__(self)
if self._indexPosition >= len( self.dataClusters ):
raise StopIteration
else:
self._indexPosition += 1
return self.dataClusters[ self._indexPosition -1 ]
def __next__(self):
return self.next()
def __init__( self, params ):
self._indexPosition = 0
self._reverseMap = {}
self._reverseIDMap = {}
self.orientRelease = None
self.version_info = {
'major': None,
'minor': None,
'build': None
}
self.dataClusters = params[0]
for ( position, cluster ) in enumerate( self.dataClusters ):
if not isinstance( cluster[ 'name' ], str ):
cluster[ 'name' ] = cluster[ 'name' ].decode()
self._reverseMap[ str( cluster[ 'name' ] ) ] = [ position, cluster[ 'id' ] ]
self._reverseIDMap[ cluster[ 'id' ] ] = [ position, str( cluster[ 'name' ] ) ]
self.hiAvailabilityList = params[1][0]
self._parse_version( params[1][1] )
def _parse_version( self, param ):
if not isinstance(param, str):
param = param.decode()
self.orientRelease = param
try:
version_info = self.orientRelease.split( "." )
self.version_info[ 'major' ] = int( version_info[0] )
self.version_info[ 'minor' ] = version_info[1]
self.version_info[ 'build' ] = version_info[2]
except IndexError:
pass
if "-" in self.version_info[ 'minor' ]:
_temp = self.version_info[ 'minor' ].split( "-" )
self.version_info[ 'minor' ] = int( _temp[0] )
self.version_info[ 'build' ] = _temp[1]
self.version_info[ 'build' ] = \
self.version_info[ 'build' ].split( " ", 1 )[0]
def get_class_position( self, cluster_name ):
return self._reverseMap[ cluster_name.lower() ][1]
def get_class_name( self, position ):
return self._reverseIDMap[ position ][1]
def __len__( self ):
return len( self.dataClusters )
| apache-2.0 | -5,414,585,741,678,273,000 | 30.649518 | 90 | 0.596972 | false | 3.870625 | false | false | false |
NiclasEriksen/rpg_procgen | utils/scene.py | 2 | 4722 | """
A simple scene system.
This implements a simple scene system, which combines different
scenes or screens and allows you to switch between them.
"""
from .events import EventHandler
__all__ = ["Scene", "SceneManager", "SCENE_ENDED", "SCENE_RUNNING",
"SCENE_PAUSED"
]
SCENE_ENDED = 0
SCENE_RUNNING = 1
SCENE_PAUSED = 2
class SceneManager(object):
"""A scene management system.
The SceneManager takes care of scene transitions, preserving scene
states and everything else to maintain and ensure the control flow
between different scenes.
"""
def __init__(self):
"""Creates a new SceneManager."""
self.scenes = []
self.next = None
self.current = None
self.switched = EventHandler(self)
def push(self, scene):
"""Pushes a new scene to the scene stack.
The current scene will be put on the scene stack for later
execution, while the passed scene will be set as current one.
Once the newly pushed scene has ended or was paused, the
previous scene will continue its execution.
"""
self.next = scene
if self.current:
self.scenes.append(self.current)
def pop(self):
"""Pops a scene from the scene stack, bringing it into place for
being executed on the next update."""
if len(self.scenes) == 0:
return
self.next = self.scenes.pop()
def pause(self):
"""Pauses the currently running scene."""
if self.current:
self.current.pause()
def unpause(self):
"""Unpauses the current scene."""
if self.current:
self.current.unpause()
def update(self):
"""Updates the scene state.
Updates the scene state and switches to the next scene, if any
has been pushed into place.
"""
if self.next:
# A scene is about to be started, finish the old one
if self.current and self.current.is_running:
self.current.end()
self.current.manager = None
self.current = self.next
self.current.manager = self
self.next = None
self.switched()
if self.current and self.current.has_ended:
self.current.start()
class Scene(object):
"""A simple scene state object used to maintain the application workflow
based on the presentation of an application.
"""
def __init__(self, name=None):
"""Creates a new Scene."""
self.name = name
self.manager = None
self.state = SCENE_ENDED
self.started = EventHandler(self)
self.paused = EventHandler(self)
self.unpaused = EventHandler(self)
self.ended = EventHandler(self)
def __repr__(self):
states = ("ENDED", "RUNNING", "PAUSED")
return "Scene(name='%s', state='%s')" % (self.name, states[self.state])
def start(self):
"""Executed, whenever the scene starts.
This is usually invoked by the SceneManager and will update the
scene's internal state and executes the started event.
"""
if self.state not in (SCENE_RUNNING, SCENE_PAUSED):
self.state = SCENE_RUNNING
self.started()
def pause(self):
"""Executed, whenever the scene is paused.
This is usually invoked by the SceneManager and will update the
scene's internal state and executes the paused event.
"""
if self.state == SCENE_RUNNING:
self.state = SCENE_PAUSED
self.paused()
def unpause(self):
"""Executed, whenever the scene is unpaused.
This is usually invoked by the SceneManager and will update the
scene's internal state and executes the unpaused event.
"""
if self.state == SCENE_PAUSED:
self.state = SCENE_RUNNING
self.unpaused()
def end(self):
"""Executed, whenever the scene ends.
This is usually invoked by the SceneManager and will update the
scene's internal state and executes the ended event.
"""
if self.state != SCENE_ENDED:
self.state = SCENE_ENDED
self.ended()
@property
def is_running(self):
"""True, if the scene is currently running, False otherwise."""
return self.state == SCENE_RUNNING
@property
def is_paused(self):
"""True, if the scene is currently paused, False otherwise."""
return self.state == SCENE_PAUSED
@property
def has_ended(self):
"""True, if the scene has ended, False otherwise."""
return self.state == SCENE_ENDED
| cc0-1.0 | -704,396,925,945,017,200 | 30.065789 | 79 | 0.602711 | false | 4.223614 | false | false | false |
stupidamigo/lice-python | lice/tests.py | 1 | 2521 | from core import *
import os
import unittest
class TestPaths(unittest.TestCase):
def test_paths(self):
self.assertEqual(clean_path("."), os.getcwd())
self.assertEqual(clean_path("$HOME"), os.environ["HOME"])
self.assertEqual(clean_path("~"), os.environ["HOME"])
class TestTemplates(unittest.TestCase):
def test_file_template(self):
pwd = os.path.abspath(os.path.dirname(__file__))
for license in LICENSES:
path = os.path.join(pwd, "template-%s.txt" % license)
with open(path) as infile:
self.assertEqual(infile.read(), load_file_template(path))
def test_package_template(self):
pwd = os.path.abspath(os.path.dirname(__file__))
for license in LICENSES:
path = os.path.join(pwd, "template-%s.txt" % license)
with open(path) as infile:
self.assertEqual(infile.read(), load_package_template(license))
def test_extract_vars(self):
for license in LICENSES:
template = """Oh hey, {{ this }} is a {{ template }} test."""
var_list = extract_vars(template)
self.assertEquals(var_list, ["this", "template"])
def test_license(self):
context = {
"year": "1981",
"project": "lice",
"organization": "Awesome Co.",
}
for license in LICENSES:
template = load_package_template(license)
rendered = template.replace("{{ year }}", context["year"])
rendered = rendered.replace("{{ project }}", context["project"])
rendered = rendered.replace("{{ organization }}", context["organization"])
self.assertEqual(rendered, generate_license(template, context))
def test_license_header(self):
context = {
"year": "1981",
"project": "lice",
"organization": "Awesome Co.",
}
for license in LICENSES:
try:
template = load_package_template(license, header=True)
rendered = template.replace("{{ year }}", context["year"])
rendered = rendered.replace("{{ project }}", context["project"])
rendered = rendered.replace("{{ organization }}", context["organization"])
self.assertEqual(rendered, generate_license(template, context))
except IOError:
pass # it's okay to not find templates
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 9,002,063,106,927,393,000 | 30.911392 | 90 | 0.564459 | false | 4.302048 | true | false | false |
DerekSelander/LLDB | lldb_commands/pmodule.py | 1 | 11903 | # MIT License
#
# Copyright (c) 2017 Derek Selander
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import lldb
import shlex
import optparse
import os
import textwrap
from stat import *
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
'command script add -f pmodule.pmodule pmodule -h "Generates DTrace script to profile module"')
def pmodule(debugger, command, exe_ctx, result, internal_dict):
'''Creates a custom dtrace script that profiles modules in an executable
based upon its memory layout and ASLR. Provide no arguments w/ '-a' if
you want a count of all the modules firing. Provide a module if you want
to dump all the methods as they occur.
pmodule [[MODULENAME]...]
You have the option to use objc or non-objc (i.e. objc$target or pid$target)
Through the -n argument
Examples:
# Trace all Objective-C code in UIKit
pmodule UIKit
# Trace all non-Objective-C code in libsystem_kernel.dylib (i.e. pid$target:libsystem_kernel.dylib::entry)
pmodule -n libsystem_kernel.dylib
# Dump errrything. Only displays count of function calls from modules after you end the script. Warning slow
pmodule -a
'''
command_args = shlex.split(command)
parser = generate_option_parser()
target = exe_ctx.target
try:
(options, args) = parser.parse_args(command_args)
except:
result.SetError("option parsing failed")
return
pid = exe_ctx.process.id
# module_parirs = get_module_pair(, target)
is_cplusplus = options.non_objectivec
if not args and not (options.all_modules or options.all_modules_output):
result.SetError('Need a module or use the -a option. You can list all modules by "image list -b"')
return
dtrace_script = generate_dtrace_script(target, options, args)
if options.debug:
source = '\n'.join(['# '+ format(idx + 1, '2') +': ' + line for idx, line in enumerate(dtrace_script.split('\n'))])
result.AppendMessage(source)
return
filename = '/tmp/lldb_dtrace_pmodule_' + ''.join(args)
create_or_touch_filepath(filename, dtrace_script)
copycommand = 'echo \"sudo {0} -p {1} 2>/dev/null\" | pbcopy'
os.system(copycommand.format(filename, pid))
result.AppendMessage("Copied to clipboard. Paste in Terminal.")
# 10.12.3 beta broke AppleScript's "do script" API. Dammit. Using pbcopy instead...
# dtraceCommand = 'osascript -e \'tell application \"Terminal\" to activate & do script \"sudo {0} -p {1} \"\' 2>/dev/null'
# os.system(dtraceCommand.format(filename, pid))
# result.AppendMessage("Continuing in different Terminal tab...")
result.SetStatus(lldb.eReturnStatusSuccessFinishNoResult)
def generate_conditional_for_module_name(module_name, target, options):
pair = get_module_pair(module_name, target)
if not options.non_objectivec and options.root_function:
template = '/ ({0} > *(uintptr_t *)copyin(uregs[R_SP], sizeof(uintptr_t)) || *(uintptr_t *)copyin(uregs[R_SP], sizeof(uintptr_t)) > {1}) && {0} <= uregs[R_PC] && uregs[R_PC] <= {1} /\n'
elif options.non_objectivec and not options.root_function:
template = '\n'
elif not options.non_objectivec and not options.root_function:
template = '/ {0} <= uregs[R_PC] && uregs[R_PC] <= {1} /\n'
elif options.non_objectivec and options.root_function:
template = '/ ({0} > *(uintptr_t *)copyin(uregs[R_SP], sizeof(uintptr_t)) || *(uintptr_t *)copyin(uregs[R_SP], sizeof(uintptr_t)) > {1}) /\n'
return template.format(pair[0], pair[1])
def generate_dump_all_module_script(target):
dtrace_script = r'''
this->method_counter = \"Unknown\";
program_counter = uregs[R_PC];
'''
dtrace_template = "this->method_counter = {} <= program_counter && program_counter <= {} ? \"{}\" : this->method_counter;\n"
dtrace_template = textwrap.dedent(dtrace_template)
for module in target.modules:
section = module.FindSection("__TEXT")
lower_bounds = section.GetLoadAddress(target)
upper_bounds = lower_bounds + section.file_size
module_name = module.file.basename
if "_lldb_" not in module_name:
dtrace_script += dtrace_template.format(lower_bounds, upper_bounds, module_name)
return dtrace_script
def create_or_touch_filepath(filepath, dtrace_script):
file = open(filepath, "w")
file.write(dtrace_script)
file.flush()
st = os.stat(filepath)
os.chmod(filepath, st.st_mode | S_IEXEC)
file.close()
def generate_dtrace_script(target, options, args):
is_cplusplus = options.non_objectivec
dtrace_script = '''#!/usr/sbin/dtrace -s
#pragma D option quiet
'''
if options.flow_indent:
dtrace_script += '#pragma D option flowindent'
dtrace_script += '''
dtrace:::BEGIN
{{
printf("Starting... Hit Ctrl-C to end. Observing {} functions in {}\\n");
}}
'''.format('straight up, normal' if is_cplusplus else 'Objective-C', (', ').join(args))
dtrace_template = ''
pid = target.process.id
is_cplusplus = options.non_objectivec
query_template = '{}$target:{}::entry\n'
if options.all_modules or options.all_modules_output:
if is_cplusplus:
dtrace_script += query_template.format('pid', '')
else:
dtrace_script += query_template.format('objc', '')
if options.all_modules_output and not options.non_objectivec:
dtrace_script += '{\nprintf("0x%012p %c[%s %s]\\n", uregs[R_RDI], probefunc[0], probemod, (string)&probefunc[1]);\n}'
elif options.all_modules_output and options.non_objectivec:
dtrace_script += '{\nprintf("0x%012p %s, %s\\n", uregs[R_RDI], probemod, probefunc);\n}'
else:
dtrace_script += '{\nprogram_counter = uregs[R_PC];\nthis->method_counter = \"Unknown\";' # TODO 64 only change to universal arch
dtrace_template += "this->method_counter = {} <= program_counter && program_counter <= {} ? \"{}\" : this->method_counter;\n"
dtrace_template = textwrap.dedent(dtrace_template)
for module in target.modules:
section = module.FindSection("__TEXT")
lower_bounds = section.GetLoadAddress(target)
upper_bounds = lower_bounds + section.file_size
module_name = module.file.basename
if "_lldb_" not in module_name:
dtrace_script += dtrace_template.format(lower_bounds, upper_bounds, module_name)
dtrace_script += "\n@num[this->method_counter] = count();\n}\n"
else:
for module_name in args:
# uregs[R_RDI]
# Objective-C logic: objc$target:::entry / {} <= uregs[R_PC] && uregs[R_PC] <= {} / { }
if not is_cplusplus:
dtrace_script += query_template.format('objc', '')
dtrace_script += generate_conditional_for_module_name(module_name, target, options)
# Non-Objective-C logic: pid$target:Module::entry { }
if is_cplusplus:
dtrace_script += query_template.format('pid', module_name)
dtrace_script += generate_conditional_for_module_name(module_name, target, options)
if options.timestamp:
dtrace_script += '{\n printf("%Y [%s] %s\\n", walltimestamp, probemod, probefunc);\n'
else:
dtrace_script += '{\n printf("[%s] %s\\n", probemod, probefunc);\n'
else:
if options.timestamp:
dtrace_script += '{\n printf("%Y 0x%012p %c[%s %s]\\n", walltimestamp, uregs[R_RDI], probefunc[0], probemod, (string)&probefunc[1]);\n'
else:
dtrace_script += '{\n printf("0x%012p %c[%s %s]\\n", uregs[R_RDI], probefunc[0], probemod, (string)&probefunc[1]);\n'
# Logic to append counting at the termination of script
if options.count:
dtrace_script += ' @numWrites{}[probefunc] = count();\n'.format(os.path.splitext(module_name)[0])
dtrace_script += '}\n'
return dtrace_script
def get_module_pair(module_name, target):
module = target.FindModule(lldb.SBFileSpec(module_name))
if not module.file.exists:
result.SetError(
"Unable to open module name '{}', to see list of images use 'image list -b'".format(module_name))
return
section = module.FindSection("__TEXT")
lower_bounds = section.GetLoadAddress(target)
upper_bounds = lower_bounds + section.file_size
return (lower_bounds, upper_bounds)
def generate_option_parser():
usage = "usage: %prog [options] arg1 [arg2...]"
parser = optparse.OptionParser(usage=usage, prog='pmodule')
parser.add_option("-n", "--non_objectivec",
action="store_true",
default=False,
dest="non_objectivec",
help="Use pid$target instead of objc$target")
parser.add_option("-c", "--count",
action="store_true",
default=False,
dest="count",
help="Count method calls for framework")
parser.add_option("-a", "--all_modules",
action="store_true",
default=False,
dest="all_modules",
help="Profile all modules. If this is selected, specific modules are ignored and counts are returned when script finishes")
parser.add_option("-A", "--all_modules_output",
action="store_true",
default=False,
dest="all_modules_output",
help="Dumps EVERYTHING. Only execute single commands with this one in lldb")
parser.add_option("-r", "--root_function",
action="store_true",
default=False,
dest="root_function",
help="Only prints the root functions if it's called from another module")
parser.add_option("-f", "--flow_indent",
action="store_true",
default=False,
dest="flow_indent",
help="Adds the flow indent flag")
parser.add_option("-t", "--timestamp",
action="store_true",
default=False,
dest="timestamp",
help="Prints out an approximate timestamp of when the calls were made")
parser.add_option("-g", "--debug",
action="store_true",
default=False,
dest="debug",
help="Doesn't copy the script, just prints it out to stderr")
return parser
| gpl-2.0 | 6,983,881,270,709,104,000 | 41.20922 | 193 | 0.610182 | false | 3.784738 | false | false | false |
sebdelsol/pyload | module/plugins/hoster/EuroshareEu.py | 1 | 2455 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class EuroshareEu(SimpleHoster):
__name__ = "EuroshareEu"
__type__ = "hoster"
__version__ = "0.26"
__pattern__ = r'http://(?:www\.)?euroshare\.(eu|sk|cz|hu|pl)/file/.*'
__description__ = """Euroshare.eu hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
INFO_PATTERN = r'<span style="float: left;"><strong>(?P<N>.+?)</strong> \((?P<S>.+?)\)</span>'
OFFLINE_PATTERN = ur'<h2>S.bor sa nena.iel</h2>|Požadovaná stránka neexistuje!'
FREE_URL_PATTERN = r'<a href="(/file/\d+/[^/]*/download/)"><div class="downloadButton"'
ERR_PARDL_PATTERN = r'<h2>Prebieha s.ahovanie</h2>|<p>Naraz je z jednej IP adresy mo.n. s.ahova. iba jeden s.bor'
ERR_NOT_LOGGED_IN_PATTERN = r'href="/customer-zone/login/"'
URL_REPLACEMENTS = [(r"(http://[^/]*\.)(sk|cz|hu|pl)/", r"\1eu/")]
def setup(self):
self.multiDL = self.resumeDownload = self.premium
self.req.setOption("timeout", 120)
def handlePremium(self):
if self.ERR_NOT_LOGGED_IN_PATTERN in self.html:
self.account.relogin(self.user)
self.retry(reason=_("User not logged in"))
self.download(self.pyfile.url.rstrip('/') + "/download/")
check = self.checkDownload({"login": re.compile(self.ERR_NOT_LOGGED_IN_PATTERN),
"json": re.compile(r'\{"status":"error".*?"message":"(.*?)"')})
if check == "login" or (check == "json" and self.lastCheck.group(1) == "Access token expired"):
self.account.relogin(self.user)
self.retry(reason=_("Access token expired"))
elif check == "json":
self.fail(self.lastCheck.group(1))
def handleFree(self):
if re.search(self.ERR_PARDL_PATTERN, self.html) is not None:
self.longWait(5 * 60, 12)
m = re.search(self.FREE_URL_PATTERN, self.html)
if m is None:
self.error(_("FREE_URL_PATTERN not found"))
parsed_url = "http://euroshare.eu%s" % m.group(1)
self.logDebug("URL", parsed_url)
self.download(parsed_url, disposition=True)
check = self.checkDownload({"multi_dl": re.compile(self.ERR_PARDL_PATTERN)})
if check == "multi_dl":
self.longWait(5 * 60, 12)
getInfo = create_getInfo(EuroshareEu)
| gpl-3.0 | -2,460,588,444,662,003,700 | 35.597015 | 117 | 0.586052 | false | 3.084277 | false | false | false |
tjhunter/phd-thesis-tjhunter | python/build.py | 1 | 1509 | # The definitions of the input and output directories to control where the images are being written.
# Defines some imports for matplotlib as well.
import os
import logging
logging.getLogger().setLevel(logging.INFO)
if "SAVE_DIR" in os.environ:
__path_savefig = os.environ['SAVE_DIR']
logging.info("Saving path: " + __path_savefig)
if "DATA_DIR" in os.environ:
__path_data = os.environ["DATA_DIR"]
logging.info("Data root path: %r", __path_data)
if "MATPLOTLIB_HEADLESS" in os.environ:
print "Configuring matplotlib to headless"
import matplotlib
matplotlib.use("Agg")
from matplotlib import rc
#rc('font',**{'family':'serif','serif':['Times New Roman']})
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
#matplotlib.rcParams['text.usetex'] = True
rc('font', **{'family':'serif'})
def save_path():
return __path_savefig
def data_path():
return __path_data
def data_name(name):
return "%s/%s"%(data_path(),name)
def save_name(name, ensure_dir=True):
fname = "%s/%s"%(save_path(),name)
fdir = os.path.dirname(fname)
logging.debug("Asking for save name %s, (%s)" % (fname, fdir))
if ensure_dir and not os.path.exists(fdir):
logging.info("Creating directory %r ",fdir)
os.makedirs(fdir)
return fname
def save_figure(fig, name, save_pdf=True, save_svg=True):
if save_pdf:
fig.savefig("%s.pdf"%(save_name(name)), bbox_inches='tight')
if save_svg:
fig.savefig("%s.svg"%(save_name(name)), bbox_inches='tight')
| apache-2.0 | 163,665,351,717,856,420 | 31.106383 | 100 | 0.685222 | false | 3.170168 | false | false | false |
essepuntato/opencitations | script/dataset_handler.py | 1 | 9362 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
__author__ = 'essepuntato'
from datetime import datetime
import re
import os
from rdflib import Graph, Namespace, URIRef
from rdflib.namespace import XSD, RDF, RDFS
from support import create_literal, create_type
from graphlib import GraphSet
from storer import Storer
from reporter import Reporter
class DatasetHandler(object):
DCTERMS = Namespace("http://purl.org/dc/terms/")
DCAT = Namespace("http://www.w3.org/ns/dcat#")
VOID = Namespace("http://rdfs.org/ns/void#")
MTT = Namespace("https://w3id.org/spar/mediatype/text/")
DBR = Namespace("http://dbpedia.org/resource/")
dataset = DCAT.Dataset
datafile = DCAT.Distribution
title = DCTERMS.title
description = DCTERMS.description
issued = DCTERMS.issued
modified = DCTERMS.modified
keyword = DCAT.keyword
subject = DCAT.theme
landing_page = DCAT.landingPage
subset = VOID.subset
sparql_endpoint = VOID.sparqlEndpoint
distribution = DCAT.distribution
license = DCTERMS.license
download_url = DCAT.downloadURL
media_type = DCAT.mediaType
byte_size = DCAT.byte_size
label = RDFS.label
a = RDF.type
turtle = MTT.turtle
bibliographic_database = DBR.Bibliographic_database
open_access = DBR.Open_access
scholary_communication = DBR.Scholarly_communication
citations = DBR.Citation
def __init__(self, tp_url_real, context_path, context_file_path,
base_iri, base_dir, info_dir, dataset_home, tmp_dir, triplestore_url=None):
self.tp_url = triplestore_url
self.base_iri = base_iri
self.base_dir = base_dir
self.info_dir = info_dir
self.context_path = context_path
self.dataset_home = URIRef(dataset_home)
self.tmp_dir = tmp_dir
self.tp_res = URIRef(tp_url_real)
self.repok = Reporter(prefix="[DatasetHandler: INFO] ")
self.reperr = Reporter(prefix="[DatasetHandler: ERROR] ")
self.st = Storer(context_map={context_path: context_file_path},
repok=self.repok, reperr=self.reperr)
self.st.set_preface_query(
u"DELETE { ?res <%s> ?date } WHERE { ?res a <%s> ; <%s> ?date }" %
(str(DatasetHandler.modified), str(DatasetHandler.dataset), str(DatasetHandler.modified)))
# /START Create Literal
def create_label(self, g, res, string):
return create_literal(g, res, RDFS.label, string)
def create_publication_date(self, g, res, string):
return create_literal(g, res, self.issued, string, XSD.dateTime)
def update_modification_date(self, g, res, string):
g.remove((res, self.modified, None))
return create_literal(g, res, self.modified, string, XSD.dateTime)
def create_title(self, g, res, string):
return create_literal(g, res, self.title, string)
def create_description(self, g, res, string):
return create_literal(g, res, self.description, string)
def create_keyword(self, g, res, string):
return create_literal(g, res, self.keyword, string)
def create_byte_size(self, g, res, string):
return create_literal(g, res, self.byte_size, string, XSD.decimal)
# /END Create Literal
# /START Create Complex Attributes
def has_subject(self, g, res, obj):
g.add((res, self.subject, obj))
def has_landing_page(self, g, res, obj):
g.add((res, self.landing_page, obj))
def has_subset(self, g, res, obj):
g.add((res, self.subset, obj))
def has_sparql_endpoint(self, g, res, obj):
g.add((res, self.sparql_endpoint, obj))
def has_distribution(self, g, res, obj):
g.add((res, self.distribution, obj))
def has_license(self, g, res, obj):
g.add((res, self.license, obj))
def has_download_url(self, g, res, obj):
g.add((res, self.download_url, obj))
def has_media_type(self, g, res, obj):
g.add((res, self.media_type, obj))
# /END Create Complex Attributes
# /START Types
def dataset_type(self, g, res):
create_type(g, res, self.dataset)
def distribution_type(self, g, res):
create_type(g, res, self.datafile)
# /END Types
def update_dataset_info(self, graph_set):
cur_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
subgraphs_to_update = set()
all_graphs = []
for g in graph_set.graphs():
cur_id = g.identifier
if cur_id not in subgraphs_to_update:
subgraphs_to_update.add(cur_id)
cur_dataset_res = URIRef(cur_id)
cur_dataset = self.get_dataset_graph(cur_dataset_res, cur_time)
self.update_modification_date(cur_dataset, cur_dataset_res, cur_time)
all_graphs += [cur_dataset]
if subgraphs_to_update:
cur_occ_res = URIRef(self.base_iri)
cur_occ = self.get_dataset_graph(cur_occ_res, cur_time)
self.update_modification_date(cur_occ, cur_occ_res, cur_time)
for subgraph_id in subgraphs_to_update:
self.has_subset(cur_occ, cur_occ_res, URIRef(subgraph_id))
all_graphs += [cur_occ]
if all_graphs: # Store everything and upload to triplestore
if self.tp_url is None:
self.st.store_all(
self.base_dir, self.base_iri, self.context_path,
self.tmp_dir, all_graphs, True)
else:
self.st.upload_and_store(
self.base_dir, self.tp_url, self.base_iri, self.context_path,
self.tmp_dir, all_graphs, True)
def get_dataset_graph(self, res, cur_time):
dataset_path = self.get_metadata_path_from_resource(res)
if os.path.exists(dataset_path):
return list(self.st.load(dataset_path, tmp_dir=self.tmp_dir).contexts())[0]
else:
dataset_label = "OCC"
dataset_title = "The OpenCitations Corpus"
dataset_description = "The OpenCitations Corpus is an open repository of scholarly " \
"citation data made available under a Creative Commons public " \
"domain dedication, which provides in RDF accurate citation " \
"information (bibliographic references) harvested from the " \
"scholarly literature (described using the SPAR Ontologies) " \
"that others may freely build upon, enhance and reuse for any " \
"purpose, without restriction under copyright or database law."
if re.search("/../$", str(res)) is not None:
g = Graph(identifier=str(res))
dataset_short_name = str(res)[-3:-1]
dataset_name = GraphSet.labels[dataset_short_name]
dataset_title += ": %s dataset" % dataset_name.title()
dataset_description += " This sub-dataset contains all the '%s' resources." % \
dataset_name
dataset_label += " / %s" % dataset_short_name
self.create_keyword(g, res, dataset_name)
else:
g = Graph()
self.has_landing_page(g, res, self.dataset_home)
self.has_sparql_endpoint(g, res, self.tp_res)
self.dataset_type(g, res)
self.create_label(g, res, dataset_label)
self.create_title(g, res, dataset_title)
self.create_description(g, res, dataset_description)
self.create_publication_date(g, res, cur_time)
self.create_keyword(g, res, "OCC")
self.create_keyword(g, res, "OpenCitations")
self.create_keyword(g, res, "OpenCitations Corpus")
self.create_keyword(g, res, "SPAR Ontologies")
self.create_keyword(g, res, "bibliographic references")
self.create_keyword(g, res, "citations")
self.has_subject(g, res, self.bibliographic_database)
self.has_subject(g, res, self.scholary_communication)
self.has_subject(g, res, self.open_access)
self.has_subject(g, res, self.citations)
return g
def get_metadata_path_from_resource(self, dataset_res):
return self.get_metadata_path_from_iri(str(dataset_res))
def get_metadata_path_from_iri(self, dataset_iri):
return re.sub("^%s" % self.base_iri, self.base_dir, dataset_iri) + "index.json"
| isc | -1,750,072,562,376,391,200 | 41.554545 | 102 | 0.616535 | false | 3.599385 | false | false | false |
NickRuiz/wikitrans-pootle | local_apps/wt_articles/management/commands/fetch_translations.py | 1 | 3090 | from django.core.management.base import NoArgsCommand, CommandError
from django.conf import settings
#from wt_articles.models import ArticleOfInterest, SourceArticle, TranslationRequest
from wt_translation.models import ServerlandHost
from wt_translation.models import UnsupportedLanguagePair, UndefinedTranslator, ServerlandConfigError
#
#from pootle_project.models import Project
#from pootle_translationproject.models import TranslationProject
#from pootle_language.models import Language
#
#import xmlrpclib
#import json
class Command(NoArgsCommand):
help = "Looks for completed translation requests from all Serverland hosts and updates their corresponding .po files."
def handle_error(self, host, error):
print "An error occurred with Serverland host '%s' (%s):" % (host.shortname, host.url)
print error #.exc_info()
def handle_noargs(self, **options):
# Fetch all of the hosts
hosts = ServerlandHost.objects.all()
for host in hosts:
try:
host.fetch_translations()
except UnsupportedLanguagePair as ex:
self.handle_error(host, ex)
except UndefinedTranslator as ex:
self.handle_error(host, ex)
except ServerlandConfigError as ex:
self.handle_error(host, ex)
except Exception as ex:
self.handle_error(host, ex)
# token = settings.SERVERLAND_TOKEN
#
# print "Using token " + token
# print "xmlrpclib.ServerProxy(%s)" % settings.SERVERLAND_XMLRPC
# proxy = xmlrpclib.ServerProxy(settings.SERVERLAND_XMLRPC)
# print "Connected!"
#
# # Fetch a list of the serverland workers
# # for worker in proxy.list_workers(token):
# # workerName = worker['shortname']
#
# # Check serverland for completed translations
# print "proxy.list_requests('%s')" % token
# requests = proxy.list_requests(token)
#
# print "Call finished"
# # If only one result is retrieved, the dict is not in a list
# if not isinstance(requests, list):
# print "Retrieved only one result"
# requests = [requests]
# else:
# print "Retrieved multiple results"
#
# # Display the translation requests that are "ready"
# completedRequests = [request for request in requests if request['ready']]
#
# print "Showing the completed requests"
# # Process the completed requests
# for completedRequest in completedRequests:
# # Get the result
# result = proxy.list_results(token, completedRequest['request_id'])
#
# # TODO: Save the result
# print result['shortname'], result['request_id']
# print result['result']
#
# # TODO: Delete the request
# # proxy.delete_translation(completedRequest['request_id'])
| gpl-2.0 | 2,376,098,625,678,812,700 | 38.684211 | 122 | 0.612945 | false | 4.098143 | false | false | false |
zepto/webbrowser | webbrowser/functions.py | 1 | 3120 | # This file is part of browser, and contains miscellaneous functions.
#
# Copyright (C) 2009-2010 Josiah Gordon <[email protected]>
#
# browser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import warnings
import subprocess
from time import strftime
from contextlib import contextmanager
import gtk
import glib
debug = True
def print_message(message, color=None, data_color=None):
if debug:
date = strftime('%h %e %H:%M:%S')
if color:
if not data_color:
message_list = message.split(':', 1)
messagestr = "[38;5;75m%s [0;%sm%s[m:%s" % (date, color, message_list[0], ''.join(message_list[1:]))
else:
message_list = message.split(':', 2)
messagestr = "[38;5;75m%s [0;%sm%s[m:[%sm%s[m:%s" % (date, color, message_list[0], data_color, message_list[1], ''.join(message_list[2:]))
else:
messagestr = "[38;5;75m%s[m" % (date, message)
print(messagestr)
@contextmanager
def redirect_warnings(warning_func):
""" _redirect_warnings(warning_func) -> Setup warning redirector to
redirect warnings to warning_func. Use this function with 'with'
statements.
"""
# Save old warning function
old_showwarning = warnings.showwarning
# Override default warning function
warnings.showwarning = warning_func
try:
# Run commands in 'with' statement
yield
finally:
# After 'with' block exits restore showwarning function
warnings.showwarning = old_showwarning
def extern_load_uri(uri):
""" extern_load_uri(uri) -> First attempts to load the uri with
gtk.show_uri. If that fails it trys xdg-open, gnome-open, and exo-open.
"""
try:
# Try gtk.show_uri.
ret = gtk.show_uri(gtk.gdk.screen_get_default(), uri,
int(glib.get_current_time()))
if ret:
return True
except Exception as err:
print("Error (%s) while loading uri: %s" % (err, uri))
app_list = ['xdg-open', 'gnome-open', 'exo-open']
for app in app_list:
try:
proc_tup = glib.spawn_async([app, uri],
flags=glib.SPAWN_SEARCH_PATH)
except Exception as err:
print("Error (%s) while loading uri (%s) with app (%s)" % \
(err, uri, app))
# Go to the next app if there was an error.
continue
# If it gets here than it spawned without error.
return True
return False
| gpl-3.0 | 1,126,589,959,228,223,400 | 31.842105 | 159 | 0.622436 | false | 3.727599 | false | false | false |
68foxboris/enigma2-openpli-vuplus | lib/python/Screens/ParentalControlSetup.py | 7 | 8615 | from Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import NumberActionMap
from Components.config import config, getConfigListEntry, ConfigNothing, NoSave, configfile
from Components.Sources.StaticText import StaticText
from Screens.MessageBox import MessageBox
from Screens.InputBox import PinInput
from Tools.BoundFunction import boundFunction
class ProtectedScreen:
def __init__(self):
if self.isProtected() and config.ParentalControl.servicepin[0].value:
self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.pinEntered, PinInput, pinList=[x.value for x in config.ParentalControl.servicepin], triesEntry=config.ParentalControl.retries.servicepin, title=_("Please enter the correct pin code"), windowTitle=_("Enter pin code")))
def isProtected(self):
return (config.ParentalControl.servicepinactive.value or config.ParentalControl.setuppinactive.value)
def pinEntered(self, result):
if result is None:
self.closeProtectedScreen()
elif not result:
self.session.openWithCallback(self.closeProtectedScreen, MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR, timeout=3)
def closeProtectedScreen(self, result=None):
self.close(None)
class ParentalControlSetup(Screen, ConfigListScreen, ProtectedScreen):
def __init__(self, session):
Screen.__init__(self, session)
ProtectedScreen.__init__(self)
# for the skin: first try ParentalControlSetup, then Setup, this allows individual skinning
self.skinName = ["ParentalControlSetup", "Setup" ]
self.setup_title = _("Parental control setup")
self.setTitle(self.setup_title)
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self.createSetup(initial=True)
self["actions"] = NumberActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self.recursive = False
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def isProtected(self):
return (not config.ParentalControl.setuppinactive.value and config.ParentalControl.servicepinactive.value) or\
(not config.ParentalControl.setuppinactive.value and config.ParentalControl.config_sections.configuration.value) or\
(not config.ParentalControl.config_sections.configuration.value and config.ParentalControl.setuppinactive.value and not config.ParentalControl.config_sections.main_menu.value)
def createSetup(self, initial=False):
self.reloadLists = None
self.list = []
if config.ParentalControl.servicepin[0].value or config.ParentalControl.servicepinactive.value or config.ParentalControl.setuppinactive.value or not initial:
if config.ParentalControl.servicepin[0].value:
pin_entry_text = _("Change PIN") + _(": 0000 - default (disabled)")
else:
pin_entry_text = _("Set PIN")
self.changePin = getConfigListEntry(pin_entry_text, NoSave(ConfigNothing()))
self.list.append(self.changePin)
self.list.append(getConfigListEntry(_("Protect services"), config.ParentalControl.servicepinactive))
if config.ParentalControl.servicepinactive.value:
self.list.append(getConfigListEntry(_("Remember service PIN"), config.ParentalControl.storeservicepin))
if config.ParentalControl.storeservicepin.value != "never":
self.list.append(getConfigListEntry(_("Hide parentel locked services"), config.ParentalControl.hideBlacklist))
self.list.append(getConfigListEntry(_("Protect on epg age"), config.ParentalControl.age))
self.reloadLists = getConfigListEntry(_("Reload blacklists"), NoSave(ConfigNothing()))
self.list.append(self.reloadLists)
self.list.append(getConfigListEntry(_("Protect Screens"), config.ParentalControl.setuppinactive))
if config.ParentalControl.setuppinactive.value:
self.list.append(getConfigListEntry(_("Protect main menu"), config.ParentalControl.config_sections.main_menu))
self.list.append(getConfigListEntry(_("Protect timer menu"), config.ParentalControl.config_sections.timer_menu))
self.list.append(getConfigListEntry(_("Protect plugin browser"), config.ParentalControl.config_sections.plugin_browser))
self.list.append(getConfigListEntry(_("Protect configuration"), config.ParentalControl.config_sections.configuration))
self.list.append(getConfigListEntry(_("Protect standby menu"), config.ParentalControl.config_sections.standby_menu))
self.list.append(getConfigListEntry(_("Protect software update screen"), config.ParentalControl.config_sections.software_update))
self.list.append(getConfigListEntry(_("Protect manufacturer reset screen"), config.ParentalControl.config_sections.manufacturer_reset))
self.list.append(getConfigListEntry(_("Protect movie list"), config.ParentalControl.config_sections.movie_list))
self.list.append(getConfigListEntry(_("Protect context menus"), config.ParentalControl.config_sections.context_menus))
if config.usage.menu_sort_mode.value == "user":
self.list.append(getConfigListEntry(_("Protect menu sort"), config.ParentalControl.config_sections.menu_sort))
else:
self.changePin = getConfigListEntry(_("Enable parental protection"), NoSave(ConfigNothing()))
self.list.append(self.changePin)
self["config"].list = self.list
self["config"].setList(self.list)
def keyOK(self):
if self["config"].l.getCurrentSelection() == self.changePin:
if config.ParentalControl.servicepin[0].value:
self.session.openWithCallback(self.oldPinEntered, PinInput, pinList=[x.value for x in config.ParentalControl.servicepin], triesEntry=config.ParentalControl.retries.servicepin, title=_("Please enter the old PIN code"), windowTitle=_("Enter pin code"))
else:
self.oldPinEntered(True)
elif self["config"].l.getCurrentSelection() == self.reloadLists:
from Components.ParentalControl import parentalControl
parentalControl.open()
self.session.open(MessageBox, _("Lists reloaded!"), MessageBox.TYPE_INFO, timeout=3)
else:
ConfigListScreen.keyRight(self)
self.createSetup()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.createSetup()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.createSetup()
def cancelCB(self, value):
self.keySave()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"))
else:
self.close()
def cancelConfirm(self, answer):
if answer:
for x in self["config"].list:
x[1].cancel()
self.close()
def keySave(self):
if self["config"].isChanged():
for x in self["config"].list:
x[1].save()
configfile.save()
from Components.ParentalControl import parentalControl
parentalControl.hideBlacklist()
self.close(self.recursive)
def closeRecursive(self):
self.recursive = True
self.keySave()
def keyNumberGlobal(self, number):
pass
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
def oldPinEntered(self, answer):
if answer:
self.session.openWithCallback(self.newPinEntered, PinInput, title=_("Please enter the new PIN code"), windowTitle=_("Enter pin code"))
elif answer == False:
self.session.open(MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR, timeout=3)
def newPinEntered(self, answer):
if answer is not None:
self.session.openWithCallback(boundFunction(self.confirmNewPinEntered, answer), PinInput, title=_("Please re-enter the new PIN code"), windowTitle=_("Enter pin code"))
def confirmNewPinEntered(self, answer1, answer2):
if answer2 is not None:
if answer1 == answer2:
warning_text = ""
if not answer2:
warning_text = _("You PIN code is 0000. This is the default PIN code and it disable parental control!\n")
self.session.open(MessageBox, warning_text + _("The PIN code has been changed successfully."), MessageBox.TYPE_INFO, timeout=3)
config.ParentalControl.servicepin[0].value = answer1
config.ParentalControl.servicepin[0].save()
self.createSetup()
else:
self.session.open(MessageBox, _("The PIN codes you entered are different."), MessageBox.TYPE_ERROR, timeout=3)
| gpl-2.0 | -2,981,760,938,787,400,000 | 45.069519 | 299 | 0.758561 | false | 3.579144 | true | false | false |
anubia/py_pg_tools | dropper.py | 2 | 6431 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from casting.casting import Casting
from const.const import Messenger as Msg
from const.const import Queries
from date_tools.date_tools import DateTools
from logger.logger import Logger
class Dropper:
# An object with connection parameters to connect to PostgreSQL
connecter = None
logger = None # Logger to show and log some messages
dbnames = [] # List of databases to be removed
def __init__(self, connecter=None, dbnames=[], logger=None):
if logger:
self.logger = logger
else:
self.logger = Logger()
if connecter:
self.connecter = connecter
else:
self.logger.stop_exe(Msg.NO_CONNECTION_PARAMS)
if isinstance(dbnames, list):
self.dbnames = dbnames
else:
self.dbnames = Casting.str_to_list(dbnames)
msg = Msg.DROPPER_VARS.format(server=self.connecter.server,
user=self.connecter.user,
port=self.connecter.port,
dbnames=self.dbnames)
self.logger.debug(Msg.DROPPER_VARS_INTRO)
self.logger.debug(msg)
def drop_pg_db(self, dbname, pg_superuser):
'''
Target:
- remove a database in PostgreSQL.
Parameters:
- dbname: the PostgreSQL database's name which is going to be
removed.
- pg_superuser: a flag which indicates whether the current user is
PostgreSQL superuser or not.
'''
delete = False
try:
self.connecter.cursor.execute(Queries.PG_DB_EXISTS, (dbname, ))
result = self.connecter.cursor.fetchone()
if result:
pg_pid = self.connecter.get_pid_str()
formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format(
pg_pid=pg_pid, target_db=dbname)
self.connecter.cursor.execute(formatted_sql)
result = self.connecter.cursor.fetchone()
# If there are not any connections to the target database...
if not result:
# Users who are not superusers will only be able to drop
# the databases they own
if not pg_superuser:
self.connecter.cursor.execute(Queries.GET_PG_DB_OWNER,
(dbname, ))
db = self.connecter.cursor.fetchone()
if db['owner'] != self.connecter.user:
msg = Msg.DROP_DB_NOT_ALLOWED.format(
user=self.connecter.user, dbname=dbname)
self.logger.highlight('warning', msg, 'yellow')
else:
delete = True
else:
delete = True
if delete:
# Get the database's "datallowconn" value
datallowconn = self.connecter.get_datallowconn(dbname)
# If datallowconn is allowed, change it temporarily
if datallowconn:
# Disallow connections to the database during the
# process
result = self.connecter.disallow_db_conn(dbname)
if not result:
msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format(
dbname=dbname)
self.logger.highlight('warning', msg, 'yellow')
fmt_query_drop_db = Queries.DROP_PG_DB.format(
dbname=dbname)
start_time = DateTools.get_current_datetime()
# Drop the database
self.connecter.cursor.execute(fmt_query_drop_db)
end_time = DateTools.get_current_datetime()
# Get and show the process' duration
diff = DateTools.get_diff_datetimes(start_time,
end_time)
msg = Msg.DROP_DB_DONE.format(dbname=dbname, diff=diff)
self.logger.highlight('info', msg, 'green')
# If datallowconn was allowed, leave it as it was
if datallowconn:
# Allow connections to the database at the end of
# the process
result = self.connecter.allow_db_conn(dbname)
if not result:
msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format(
dbname=dbname)
self.logger.highlight('warning', msg, 'yellow')
else:
msg = Msg.ACTIVE_CONNS_ERROR.format(dbname=dbname)
self.logger.highlight('warning', msg, 'yellow')
else:
msg = Msg.DB_DOES_NOT_EXIST.format(dbname=dbname)
self.logger.highlight('warning', msg, 'yellow')
except Exception as e:
self.logger.debug('Error en la función "drop_pg_db": '
'{}.'.format(str(e)))
self.logger.highlight('warning', Msg.DROP_DB_FAIL.format(
dbname=dbname), 'yellow')
def drop_pg_dbs(self, dbnames):
'''
Target:
- remove a list of databases in PostgreSQL.
'''
self.logger.highlight('info', Msg.BEGINNING_DROPPER, 'white')
# Check if the role of user connected to PostgreSQL is superuser
pg_superuser = self.connecter.is_pg_superuser()
if dbnames:
for dbname in self.dbnames:
msg = Msg.PROCESSING_DB.format(dbname=dbname)
self.logger.highlight('info', msg, 'cyan')
self.drop_pg_db(dbname, pg_superuser)
else:
self.logger.highlight('warning', Msg.DROPPER_HAS_NOTHING_TO_DO,
'yellow', effect='bold')
self.logger.highlight('info', Msg.DROP_DBS_DONE, 'green',
effect='bold')
| agpl-3.0 | 8,947,907,664,450,909,000 | 37.73494 | 79 | 0.493624 | false | 4.672965 | false | false | false |
ReactiveX/RxPY | rx/core/observable/defer.py | 1 | 1155 | from typing import Callable, Union
from asyncio import Future
from rx import throw, from_future
from rx.core import Observable
from rx.core.typing import Scheduler
from rx.internal.utils import is_future
def _defer(factory: Callable[[Scheduler], Union[Observable, Future]]
) -> Observable:
"""Returns an observable sequence that invokes the specified factory
function whenever a new observer subscribes.
Example:
>>> res = defer(lambda: of(1, 2, 3))
Args:
observable_factory: Observable factory function to invoke for
each observer that subscribes to the resulting sequence.
Returns:
An observable sequence whose observers trigger an invocation
of the given observable factory function.
"""
def subscribe(observer, scheduler=None):
try:
result = factory(scheduler)
except Exception as ex: # By design. pylint: disable=W0703
return throw(ex).subscribe(observer)
result = from_future(result) if is_future(result) else result
return result.subscribe(observer, scheduler=scheduler)
return Observable(subscribe)
| mit | 7,338,568,146,305,625,000 | 32 | 72 | 0.697835 | false | 4.547244 | false | false | false |
lakehanne/ensenso | ensenso_detect/manikin/ununsed/unused_funcs.py | 1 | 5244 | # May 18, 2017:: 9:59am
# This was the lone regressor extracted from the original classifier in
# the regressor code. We have phased this in to the trainClassifierRegressor
# code in /manikin/main.py
def trainRegressor(args, resnet, bbox_loader):
r"""
Following the interpretable learning from self-driving examples:
https://arxiv.org/pdf/1703.10631.pdf we can extract the last
feature cube x_t from the resnet model as a set of L = W x H
vectors of depth D, and stack a regressor module to obtain
bounding boxes
"""
#hyperparameters
numLayers, seqLength = 2, 5
noutputs, lr = 12, args.rnnLR
inputSize, nHidden = 128, [64, 32]
batchSize, maxIter = args.cbatchSize, args.cmaxIter
#extract feture cube of last layer and reshape it
res_classifier, feature_cube = None, None
if args.classifier: #use pre-trained classifier
res_classifier = ResNet(ResidualBlock, [3, 3, 3])
res_classifier.load_state_dict(torch.load('models225/' + args.classifier))
#freeze optimized layers
for param in res_classifier.parameters():
param.requires_grad = False
else:
res_classifier = resnet
#extract last convolution layer
last_layer, feat_cube = res_classifier.layer3, []
for param in last_layer.parameters():
if param.dim() > 1: # extract only conv cubes
feat_cube.append(param)
# for i in range(len(feat_cube)):
# print(feat_cube[i].size())
# print('b4 softmax: ', len(feat_cube))
'''
get the soft attention mechanism's x_t vector::
see this:
https://arxiv.org/pdf/1511.04119.pdf
'''
xt = []
for x in xrange(len(feat_cube)):
temp = softmax(feat_cube[x])
xt.append(temp)
# print(xt[x].size())
# print('after softmax: ', len(xt))
time.sleep(100)
#accummulate all the features of the fc layer into a list
for p in res_classifier.fc.parameters():
params_list.append(p) #will contain weights and biases
params_weight, params_bias = params_list[0], params_list[1]
#reshape params_weight
params_weight = params_weight.view(128)
X_tr = int(0.8*len(params_weight))
X_te = int(0.2*len(params_weight))
X = len(params_weight)
#reshape inputs
train_X = torch.unsqueeze(params_weight, 0).expand(seqLength, 1, X)
test_X = torch.unsqueeze(params_weight[X_tr:], 0).expand(seqLength, 1, X_te+1)
# Get regressor model and predict bounding boxes
regressor = StackRegressive(res_cube=res_classifier, inputSize=128, nHidden=[64,32,12], noutputs=12,\
batchSize=args.cbatchSize, cuda=args.cuda, numLayers=2)
#initialize the weights of the network with xavier uniform initialization
for name, weights in regressor.named_parameters():
#use normal initialization for now
init.uniform(weights, 0, 1)
if(args.cuda):
train_X = train_X.cuda()
test_X = test_X.cuda()
# regressor = regressor.cuda()
#define optimizer
optimizer = optim.SGD(regressor.parameters(), lr)
# Forward + Backward + Optimize
targ_X = None
for _, targ_X in bbox_loader:
targ_X = targ_X
if args.cuda:
targ_X = targ_X.cuda()
for epoch in xrange(maxIter):
for i in xrange(targ_X.size(1)*10):
inputs = train_X
targets = Variable(targ_X[:,i:i+seqLength,:])
optimizer.zero_grad()
outputs = regressor(inputs)
#reshape targets for inputs
targets = targets.view(seqLength, -1)
loss = regressor.criterion(outputs, targets)
loss.backward()
optimizer.step()
if epoch % 5 == 0 and epoch >0:
lr *= 1./epoch
optimizer = optim.SGD(regressor.parameters(), lr)
print('Epoch: {}, \tIter: {}, \tLoss: {:.4f}'.format(
epoch, i, loss.data[0]))
if i+seqLength >= int(targ_X.size(1)):
break
# #initialize the weights of the network with xavier uniform initialization
# for name, weights in regressor.named_parameters():
# #use normal initialization for now
# init.uniform(weights, 0, 1)
# #extract last convolution layer
# last_layer, feat_cube = res_classifier.layer3, []
# for param in last_layer.parameters():
# if param.dim() > 1: # extract only conv cubes
# feat_cube.append(param)
# '''
# get the soft attention mechanism's x_t vector::
# see this:
# https://arxiv.org/pdf/1511.04119.pdf
# '''
# lt = [] # this contains the soft max
# for x in xrange(len(feat_cube)):
# temp = softmax(feat_cube[x])
# lt.append(temp)
#
# # find xt = Sum_i^(KxK) l_t_i X_t_i
# xt = []
# for i in xrange(len(feat_cube)):
# temp = torch.mul(lt[i], feat_cube[i])
# xt.append(temp)
# print(xt[i].size())
#
# # Now feed each tensor in xt through LSTM layers
# '''
# feat cube is of shape
# 64L, 32L, 3L, 3L
# 64L, 64L, 3L, 3L
# 64L, 32L, 3L, 3L
# 64L, 64L, 3L, 3L
# 64L, 64L, 3L, 3L
# 64L, 64L, 3L, 3L
# 64L, 64L, 3L, 3L
# '''
| mit | -8,447,022,509,786,860,000 | 32.832258 | 105 | 0.602021 | false | 3.438689 | false | false | false |
Flexget/Flexget | flexget/components/backlog/backlog.py | 3 | 4349 | from datetime import datetime
from loguru import logger
from flexget import plugin
from flexget.components.backlog.db import BacklogEntry, clear_entries, get_entries
from flexget.event import event
from flexget.manager import Session
from flexget.utils.database import with_session
from flexget.utils.serialization import serialize
from flexget.utils.tools import parse_timedelta
logger = logger.bind(name='backlog')
class InputBacklog:
"""
Keeps task history for given amount of time.
Example::
backlog: 4 days
Rarely useful for end users, mainly used by other plugins.
"""
schema = {'type': 'string', 'format': 'interval'}
@plugin.priority(plugin.PRIORITY_LAST)
def on_task_input(self, task, config):
# Get a list of entries to inject
injections = self.get_injections(task)
if config:
# If backlog is manually enabled for this task, learn the entries.
self.learn_backlog(task, config)
# Return the entries from backlog that are not already in the task
return injections
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_metainfo(self, task, config):
# Take a snapshot of any new entries' states before metainfo event in case we have to store them to backlog
for entry in task.entries:
entry['_backlog_snapshot'] = serialize(entry)
def on_task_abort(self, task, config):
"""Remember all entries until next execution when task gets aborted."""
if task.entries:
logger.debug('Remembering all entries to backlog because of task abort.')
self.learn_backlog(task)
@with_session
def add_backlog(self, task, entry, amount='', session=None):
"""Add single entry to task backlog
If :amount: is not specified, entry will only be injected on next execution."""
snapshot = entry.get('_backlog_snapshot')
if not snapshot:
if task.current_phase != 'input':
# Not having a snapshot is normal during input phase, don't display a warning
logger.warning(
'No input snapshot available for `{}`, using current state', entry['title']
)
snapshot = serialize(entry)
expire_time = datetime.now() + parse_timedelta(amount)
backlog_entry = (
session.query(BacklogEntry)
.filter(BacklogEntry.title == entry['title'])
.filter(BacklogEntry.task == task.name)
.first()
)
if backlog_entry:
# If there is already a backlog entry for this, update the expiry time if necessary.
if backlog_entry.expire < expire_time:
logger.debug('Updating expiry time for {}', entry['title'])
backlog_entry.expire = expire_time
else:
logger.debug('Saving {}', entry['title'])
backlog_entry = BacklogEntry()
backlog_entry.title = entry['title']
backlog_entry.entry = snapshot
backlog_entry.task = task.name
backlog_entry.expire = expire_time
session.add(backlog_entry)
def learn_backlog(self, task, amount=''):
"""Learn current entries into backlog. All task inputs must have been executed."""
with Session() as session:
for entry in task.entries:
self.add_backlog(task, entry, amount, session=session)
@with_session
def get_injections(self, task, session=None):
"""Insert missing entries from backlog."""
entries = []
for backlog_entry in get_entries(task=task.name, session=session):
entry = backlog_entry.entry
# this is already in the task
if task.find_entry(title=entry['title'], url=entry['url']):
continue
logger.debug('Restoring {}', entry['title'])
entries.append(entry)
if entries:
logger.verbose('Added {} entries from backlog', len(entries))
# purge expired
purged = clear_entries(task=task.name, all=False, session=session)
logger.debug('{} entries purged from backlog', purged)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(InputBacklog, 'backlog', builtin=True, api_ver=2)
| mit | -8,171,266,410,993,751,000 | 36.817391 | 115 | 0.627501 | false | 4.301682 | false | false | false |
michaelsmit/openparliament | parliament/imports/parl_cmte.py | 1 | 11228 | import datetime
import logging
import re
import time
import urllib2
from django.db import transaction
from BeautifulSoup import BeautifulSoup
import lxml.html
from parliament.committees.models import (Committee, CommitteeMeeting,
CommitteeActivity, CommitteeActivityInSession,
CommitteeReport, CommitteeInSession)
from parliament.core.models import Session
from parliament.hansards.models import Document
logger = logging.getLogger(__name__)
COMMITTEE_LIST_URL = 'http://www2.parl.gc.ca/CommitteeBusiness/CommitteeList.aspx?Language=E&Parl=%d&Ses=%d&Mode=2'
@transaction.commit_on_success
def import_committee_list(session=None):
if session is None:
session = Session.objects.current()
def make_committee(namestring, parent=None):
#print namestring
match = re.search(r'^(.+) \(([A-Z0-9]{3,5})\)$', namestring)
(name, acronym) = match.groups()
try:
return Committee.objects.get_by_acronym(acronym, session)
except Committee.DoesNotExist:
committee, created = Committee.objects.get_or_create(name=name_en.strip(), parent=parent)
if created:
logger.warning(u"Creating committee: %s, %s" % (committee.name_en, committee.slug))
CommitteeInSession.objects.get_or_create(
committee=committee, session=session, acronym=acronym)
return committee
soup = BeautifulSoup(urllib2.urlopen(COMMITTEE_LIST_URL %
(session.parliamentnum, session.sessnum)))
for li in soup.findAll('li', 'CommitteeItem'):
com = make_committee(li.find('a').string)
for sub in li.findAll('li', 'SubCommitteeItem'):
make_committee(sub.find('a').string, parent=com)
return True
def _docid_from_url(u):
return int(re.search(r'DocId=(\d+)&', u).group(1))
def _12hr(hour, ampm):
hour = int(hour)
hour += 12 * bool('p' in ampm.lower())
if hour % 12 == 0:
# noon, midnight
hour -= 12
return hour
def _parse_date(d):
"""datetime objects from e.g. March 11, 2011"""
return datetime.date(
*time.strptime(d, '%B %d, %Y')[:3]
)
def import_committee_documents(session):
for comm in Committee.objects.filter(sessions=session).order_by('-parent'):
# subcommittees last
import_committee_meetings(comm, session)
import_committee_reports(comm, session)
time.sleep(1)
COMMITTEE_MEETINGS_URL = 'http://www2.parl.gc.ca/CommitteeBusiness/CommitteeMeetings.aspx?Cmte=%(acronym)s&Language=E&Parl=%(parliamentnum)d&Ses=%(sessnum)d&Mode=1'
@transaction.commit_on_success
def import_committee_meetings(committee, session):
acronym = committee.get_acronym(session)
url = COMMITTEE_MEETINGS_URL % {'acronym': acronym,
'parliamentnum': session.parliamentnum,
'sessnum': session.sessnum}
resp = urllib2.urlopen(url)
tree = lxml.html.parse(resp)
root = tree.getroot()
for mtg_row in root.cssselect('.MeetingTableRow'):
number = int(re.sub(r'\D', '', mtg_row.cssselect('.MeetingNumber')[0].text))
assert number > 0
try:
meeting = CommitteeMeeting.objects.select_related('evidence').get(
committee=committee,session=session, number=number)
except CommitteeMeeting.DoesNotExist:
meeting = CommitteeMeeting(committee=committee,
session=session, number=number)
meeting.date = _parse_date(mtg_row.cssselect('.MeetingDate')[0].text)
timestring = mtg_row.cssselect('.MeetingTime')[0].text_content()
match = re.search(r'(\d\d?):(\d\d) ([ap]\.m\.)(?: - (\d\d?):(\d\d) ([ap]\.m\.))?\s\(',
timestring, re.UNICODE)
meeting.start_time = datetime.time(_12hr(match.group(1), match.group(3)), int(match.group(2)))
if match.group(4):
meeting.end_time = datetime.time(_12hr(match.group(4), match.group(6)), int(match.group(5)))
notice_link = mtg_row.cssselect('.MeetingPublicationIcon[headers=thNoticeFuture] a')
if notice_link:
meeting.notice = _docid_from_url(notice_link[0].get('href'))
minutes_link = mtg_row.cssselect('.MeetingPublicationIcon[headers=thMinutesPast] a')
if minutes_link:
meeting.minutes = _docid_from_url(minutes_link[0].get('href'))
evidence_link = mtg_row.cssselect('.MeetingPublicationIcon[headers=thEvidencePast] a')
if evidence_link:
evidence_id = _docid_from_url(evidence_link[0].get('href'))
if meeting.evidence_id:
if meeting.evidence.source_id != evidence_id:
raise Exception("Evidence docid mismatch for %s %s: %s %s" %
(committee.acronym, number, evidence_id, meeting.evidence.source_id))
else:
# Evidence hasn't changed; we don't need to worry about updating
continue
else:
if Document.objects.filter(source_id=evidence_id).exists():
raise Exception("Found evidence source_id %s, but it already exists" % evidence_id)
meeting.evidence = Document.objects.create(
source_id=evidence_id,
date=meeting.date,
session=session,
document_type=Document.EVIDENCE)
meeting.webcast = bool(mtg_row.cssselect('.MeetingStatusIcon img[title=Webcast]'))
meeting.in_camera = bool(mtg_row.cssselect('.MeetingStatusIcon img[title*="in camera"]'))
if not meeting.televised:
meeting.televised = bool(mtg_row.cssselect('.MeetingStatusIcon img[title*="televised"]'))
if not meeting.travel:
meeting.travel = bool(mtg_row.cssselect('.MeetingStatusIcon img[title*="travel"]'))
meeting.save()
for study_link in mtg_row.cssselect('.MeetingStudyActivity a'):
name = study_link.text.strip()
study = get_activity_by_url(study_link.get('href'))
meeting.activities.add(study)
return True
COMMITTEE_ACTIVITY_URL = 'http://www.parl.gc.ca/CommitteeBusiness/StudyActivityHome.aspx?Stac=%(activity_id)d&Language=%(language)s&Parl=%(parliamentnum)d&Ses=%(sessnum)d'
def get_activity_by_url(activity_url):
activity_id = int(re.search(r'Stac=(\d+)', activity_url).group(1))
session = Session.objects.get_from_parl_url(activity_url)
try:
return CommitteeActivityInSession.objects.get(source_id=activity_id).activity
except CommitteeActivityInSession.DoesNotExist:
pass
activity = CommitteeActivity()
url = COMMITTEE_ACTIVITY_URL % {
'activity_id': activity_id,
'language': 'E',
'parliamentnum': session.parliamentnum,
'sessnum': session.sessnum
}
root = lxml.html.parse(urllib2.urlopen(url)).getroot()
acronym = re.search(r'\(([A-Z][A-Z0-9]{2,4})\)', root.cssselect('div.HeaderTitle span')[0].text).group(1)
activity.committee = CommitteeInSession.objects.get(acronym=acronym, session=session).committee
activity_type = root.cssselect('span.StacTitlePrefix')[0]
activity.study = 'Study' in activity_type.text
activity.name_en = activity_type.tail.strip()[:500]
# See if this already exists for another session
try:
activity = CommitteeActivity.objects.get(
committee=activity.committee,
study=activity.study,
name_en=activity.name_en
)
except CommitteeActivity.DoesNotExist:
# Get the French name
url = COMMITTEE_ACTIVITY_URL % {
'activity_id': activity_id,
'language': 'F',
'parliamentnum': session.parliamentnum,
'sessnum': session.sessnum
}
root = lxml.html.parse(urllib2.urlopen(url)).getroot()
activity_type = root.cssselect('span.StacTitlePrefix')[0]
activity.name_fr = activity_type.tail.strip()[:500]
activity.save()
if CommitteeActivityInSession.objects.exclude(source_id=activity_id).filter(
session=session, activity=activity).exists():
logger.warning("Apparent duplicate activity ID for %s %s %s: %s" %
(activity, activity.committee, session, activity_id))
return activity
CommitteeActivityInSession.objects.create(
session=session,
activity=activity,
source_id=activity_id
)
return activity
COMMITTEE_REPORT_URL = 'http://www2.parl.gc.ca/CommitteeBusiness/ReportsResponses.aspx?Cmte=%(acronym)s&Language=E&Mode=1&Parl=%(parliamentnum)d&Ses=%(sessnum)d'
@transaction.commit_on_success
def import_committee_reports(committee, session):
# FIXME rework to parse out the single all-reports page?
acronym = committee.get_acronym(session)
url = COMMITTEE_REPORT_URL % {'acronym': acronym,
'parliamentnum': session.parliamentnum,
'sessnum': session.sessnum}
tree = lxml.html.parse(urllib2.urlopen(url))
def _import_report(report_link, parent=None):
report_docid = _docid_from_url(report_link.get('href'))
try:
report = CommitteeReport.objects.get(committee=committee,
session=session, source_id=report_docid, parent=parent)
if report.presented_date:
# We can consider this report fully parsed
return report
except CommitteeReport.DoesNotExist:
if CommitteeReport.objects.filter(source_id=report_docid).exists():
if committee.parent and \
CommitteeReport.objects.filter(source_id=report_docid, committee=committee.parent).exists():
# Reference to parent committee report
return None
else:
raise Exception("Duplicate report ID %s on %s" % (report_docid, url))
report = CommitteeReport(committee=committee,
session=session, source_id=report_docid, parent=parent)
report_name = report_link.text.strip()
match = re.search(r'^Report (\d+) - (.+)', report_name)
if match:
report.number = int(match.group(1))
report.name_en = match.group(2).strip()
else:
report.name_en = report_name
report.government_response = bool(report_link.xpath("../span[contains(., 'Government Response')]"))
match = re.search(r'Adopted by the Committee on ([a-zA-Z0-9, ]+)', report_link.tail)
if match:
report.adopted_date = _parse_date(match.group(1))
match = re.search(r'Presented to the House on ([a-zA-Z0-9, ]+)', report_link.tail)
if match:
report.presented_date = _parse_date(match.group(1))
report.save()
return report
for item in tree.getroot().cssselect('.TocReportItemText'):
report_link = item.xpath('./a')[0]
report = _import_report(report_link)
for response_link in item.cssselect('.TocResponseItemText a'):
_import_report(response_link, parent=report)
return True | agpl-3.0 | -7,497,008,543,799,382,000 | 42.188462 | 171 | 0.62736 | false | 3.562183 | false | false | false |
rcanepa/cs-fundamentals | python/interview_questions/longest_palindromic_substring.py | 1 | 3402 | """Given a string s, find the longest palindromic substring in s. You may
assume that the maximum length of s is 1000.
Example:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
Example:
Input: "cbbd"
Output: "bb"
"""
def is_palindrome(string, low, high):
i = low
j = high
while j > i:
if string[i] != string[j]:
return False
i += 1
j -= 1
return True
def longest_palindrome(string):
if len(string) < 2:
return string
n = len(string)
longest_palindrome_size = 0
longest_palindrome_start = 0
longest_palindrome_end = 0
# TC: O(N^3), where N = len(string)
for i in range(n): # TC: O(N)
for j in range(i, n): # TC: O(N)
if is_palindrome(string, i, j) and j - i + 1 > longest_palindrome_size: # TC: O(N)
longest_palindrome_size = j - i + 1
longest_palindrome_start = i
longest_palindrome_end = j
return string[longest_palindrome_start:longest_palindrome_end + 1]
def longest_palindrome_dp(string):
n = len(string)
if n < 2:
return string
dp = [[False] * n for _ in range(n)]
# All substring of length 1 are palindromes
# TC: O(N)
for i in range(n):
dp[i][i] = True
# Check for substrings of length 2
# TC: O(N)
for i in range(n - 1):
dp[i][i + 1] = string[i] == string[i + 1]
# Check the rest of the substrings
m = 2
# TC: O(N^2), where N = len(string)
while m < n: # TC: O(N)
for i in range(n - m): # TC: O(N / 2) = O(N)
j = i + m
dp[i][j] = string[i] == string[j] and dp[i + 1][j - 1]
m += 1
longest_palindrome_size = 1
longest_palindrome_start = longest_palindrome_end = 0
# TC: O(N^2), where N = len(string)
for i in range(n): # TC: O(N)
for j in range(i + 1, n): # TC: O(N / 2) = O(N)
if dp[i][j] and j - i + 1 > longest_palindrome_size:
longest_palindrome_size = j - i + 1
longest_palindrome_start = i
longest_palindrome_end = j
return string[longest_palindrome_start:longest_palindrome_end + 1]
def _expand_around_center(string, low, high):
l, r = low, high
# TC: O(N)
while l >= 0 and r < len(string) and string[l] == string[r]:
l -= 1
r += 1
return r - l - 1
def longest_palindrome_expand_around_center(string):
start = end = 0
# TC: O(N^2), where N = len(string)
for i in range(len(string) - 1): # TC: O(N) * O(N + N) = O(N^2)
lp1 = _expand_around_center(string, i, i) # TC: O(N)
lp2 = _expand_around_center(string, i, i + 1) # TC: O(N)
max_length = max(lp1, lp2)
if max_length > end - start + 1:
start = i - ((max_length - 1) // 2)
end = i + (max_length // 2)
return string[start:end + 1]
if __name__ == "__main__":
test_cases = [
("", ""),
("a", "a"),
("aa", "aa"),
("abaabc", "baab"),
("babad", "bab"),
("cbbd", "bb"),
("abaabc", "baab"),
("madama", "madam"),
("jklollolkidding", "klollolk")
]
for string, expected_result in test_cases:
result = longest_palindrome_expand_around_center(string)
print(string, result)
assert result == expected_result
| mit | 843,714,206,536,434,300 | 26 | 95 | 0.5194 | false | 2.940363 | false | false | false |
hellonts/iwork | Iwork/models.py | 1 | 5558 | #coding=utf-8
from django.db import models
# Create your models here.
# create protocol_type
class Ssh_protocol(models.Model):
ssh_protocol_type = models.CharField(max_length=10)
def __unicode__(self):
return self.ssh_protocol_type
# create Ip
class Ip(models.Model):
ip_address = models.IPAddressField(max_length=20,unique=True)
port = models.CharField(max_length=10)
user = models.CharField(max_length=30)
passwd = models.CharField(max_length=50)
protocol_type = models.ForeignKey(Ssh_protocol)
hostname = models.CharField(max_length=50,blank=True)
publish_time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.ip_address
# create grouplist
class Group(models.Model):
group_name = models.CharField(max_length=20,blank=True)
ips = models.ManyToManyField(Ip,blank=True)
publish_time = models.DateTimeField(auto_now_add=True)
iplist = []
def save(self, *args, **kwargs):
super(Group, self).save()
for i in self.iplist:
p, created = Ip.objects.get_or_create(ip_address=i)
self.ips.add(p)
def __unicode__(self):
return u'%s %s' % (self.group_name, self.publish_time)
# create system_resource
class System_resource(models.Model):
system_ver = models.CharField(max_length=50,blank=True)
digit_number = models.CharField(max_length=10,blank=True)
cpu = models.CharField(max_length=50,blank=True)
cpu_number = models.CharField(max_length=50,blank=True)
physics_mem = models.CharField(max_length=50,blank=True)
swap_mem = models.CharField(max_length=50,blank=True)
disk = models.CharField(max_length=50,blank=True)
network_card = models.CharField(max_length=50,blank=True)
ip = models.ForeignKey(Ip)
def __unicode__(self):
return self.system_ver
# create System_command
class System_command(models.Model):
text = models.TextField(max_length=200,blank=True)
input_time = models.DateTimeField(auto_now_add=True)
ip = models.ForeignKey(Ip)
def __unicode__(self):
return self.text
# create System_stat
class System_stat(models.Model):
user_stat = models.CharField(max_length=200,blank=True)
time = models.DateTimeField(auto_now_add=True)
server_stat = models.CharField(max_length=200,blank=True)
system_resource = models.ForeignKey(System_resource)
ip = models.ForeignKey(Ip)
def __unicode__(self):
return self.user_stat
# create System_task
class System_task(models.Model):
file_name = models.CharField(max_length=50,blank=True)
time = models.DateTimeField(auto_now_add=True)
path = models.FilePathField(max_length=50,blank=True)
comm = models.CharField(max_length=50,blank=True)
processing_time = models.DateTimeField(auto_now_add=True)
back_state = models.CharField(max_length=50,blank=True)
ip = models.ForeignKey(Ip)
# send_mail =
def __unicode__(self):
return self.file_name
# create Server
#class Server(models.Model):
# http =
# mysql =
# cache =
# ip =
# create Network
class Network(models.Model):
input = models.CharField(max_length=50,blank=True)
time = models.DateTimeField(auto_now_add=True)
output = models.CharField(max_length=50,blank=True)
ip = models.ForeignKey(Ip)
def __unicode__(self):
return self.input
# create Syslog
class Syslog(models.Model):
system_log = models.TextField(max_length=300,blank=True)
time = models.DateTimeField(auto_now_add=True)
server_log = models.TextField(max_length=300,blank=True)
ip = models.ForeignKey(Ip)
def __unicode__(self):
return self.system_log
# create System_use
class System_use(models.Model):
mem = models.CharField(max_length=50,blank=True)
time = models.DateTimeField(auto_now_add=True)
cpu = models.CharField(max_length=50,blank=True)
swap = models.CharField(max_length=50,blank=True)
disk = models.CharField(max_length=50,blank=True)
system_load = models.CharField(max_length=50,blank=True)
ip = models.ForeignKey(Ip)
def __unicode__(self):
return self.mem
# create System_monitoring
class System_monitoring(models.Model):
online_user = models.CharField(max_length=50,blank=True)
time = models.DateTimeField(auto_now_add=True)
# server = models.ForeignKey(Server)
networK = models.ForeignKey(Network)
syslog = models.ForeignKey(Syslog)
system_use = models.ForeignKey(System_use)
ip = models.ForeignKey(Ip)
def __unicode__(self):
return self.online_user
# create upload_file
class Document(models.Model):
docfile = models.FileField(upload_to='documents/%Y-%m-%d')
# create System_servermanager
class System_servermanager(models.Model):
servername = models.CharField(max_length=20,blank=True)
scriptname = models.CharField(max_length=20,blank=True)
time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.servername
# create envname_ver
class Envname_ver(models.Model):
envver = models.FloatField(blank=True, null=True)
time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return unicode(self.envver)
# create Deployment_environment
class Deployment_Environment(models.Model):
envname = models.CharField(max_length=20)
scriptname = models.CharField(max_length=20)
scriptpath = models.CharField(max_length=255)
env_ver = models.ForeignKey(Envname_ver)
def __unicode__(self):
return '%s %s %s ' % (self.envname,self.scriptname,self.env_ver)
| gpl-3.0 | 8,693,521,421,438,800,000 | 32.281437 | 65 | 0.699712 | false | 3.370528 | false | false | false |
CodyKochmann/generators | examples/tail.py | 1 | 2187 | # -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2019-05-01 07:54:28
# @Last Modified by: Cody Kochmann
# @Last Modified time: 2019-05-02 13:24:38
""" This demonstrates how you can use generator pipelines to implement tail in
pure python.
"""
import os
from typing import Iterable
# this example does require you to have inotify installed
try:
import inotify.adapters
except ImportError:
raise ImportError('''
this example requires that you have "inotify" installed so python
can watch for file events. If you're using pip, "pip install inotify"
is all you need!
''')
from generators import Generator as G
def tail(file_path:str) -> Iterable[str]:
assert os.path.isfile(file_path)
notifier = inotify.adapters.Inotify()
notifier.add_watch(file_path)
with open(file_path, 'r') as f:
notifier.add_watch(file_path)
yield from G( # create a Generator fed by the notifier
notifier.event_gen(yield_nones=False)
).filter( # filter for IN_MODIFY events (mask equals 2)
lambda i: i[0].mask == 2
).map( # when the file is modified, get the new size
lambda i: os.path.getsize(i[2])
).uniq( # filter duplicates, just incase nothing was added to the file
).window( # window the (previous_size, current_size)
2
).side_task( # seek the file descriptor and pass the input since f.seek returns None
lambda i: f.seek(i[0])
).map( # read in the newly added data
lambda i: f.read(i[1]-i[0])
).chain( # chain the incoming chunks since they might not be single lines
).groupby( # seperate groups by lines
lambda i:i=='\n'
).filter( # exclude groups that are just '\n', since they are the delimiters
lambda i:i[0]==False
).map( # join the characters to construct each line as a string
lambda i:''.join(i[1])
#).print('-', use_repr=True # uncomment this line to see the constructed lines
)
if __name__ == '__main__':
from sys import argv
for line in tail(argv[-1]):
print(line.strip())
| mit | 6,455,776,862,744,148,000 | 34.852459 | 93 | 0.620027 | false | 3.70678 | false | false | false |
mmm444/eie-pro-linux | exp/ana.py | 1 | 1287 | import sys
from xml.sax.handler import ContentHandler
import xml.sax
import xml.parsers.expat
import ConfigParser
import xml.sax
from collections import defaultdict
class Exact(xml.sax.handler.ContentHandler):
def __init__(self):
self.state = 0
self.lens = []
def startElement(self, name, attrs):
if self.state == 0:
if name == "field" and attrs['name'] == "usb.endpoint_number" and attrs['show'] == "0x81":
self.state = 1
elif name == "field" and attrs['name'] == "usb.endpoint_number" and attrs['show'] == "0x02":
self.state = 2
elif self.state == 1 and name == "field" and attrs['name'] == "usb.iso.data":
print "In: " + attrs['show']
self.state = -1
elif self.state == 2 and name == "field" and attrs['name'] == "usb.win32.iso_data_len":
self.lens.append(int(attrs['show'], 16))
def endElement(self, name):
if name == 'packet':
if self.lens:
d = defaultdict(int)
s = ""
for l in self.lens:
s += str(l/12) + " "
d[l] += 1
print "Out: " + str(d) + s
self.lens = []
self.state = 0
if __name__ == '__main__':
parser = xml.sax.make_parser()
handler = Exact()
parser.setContentHandler(handler)
parser.parse(open(sys.argv[1]))
| gpl-2.0 | 5,238,775,657,370,010,000 | 27.6 | 98 | 0.582751 | false | 3.31701 | false | false | false |
sileht/check_mk | web/htdocs/main.py | 2 | 2473 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2012 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import defaults, config
def page_index():
start_url = html.var("start_url", config.start_url)
html.req.headers_out.add("Cache-Control", "max-age=7200, public");
if "%s" in config.page_heading:
heading = config.page_heading % (defaults.omd_site or _("Multisite"))
else:
heading = config.page_heading
html.write("""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">
<html>
<head>
<title>%s</title>
<link rel="shortcut icon" href="images/favicon.ico" type="image/ico">
</head>
<frameset cols="280,*" frameborder="0" framespacing="0" border="0">
<frame src="side.py" name="side" noresize scrolling="no">
<frame src="%s" name="main" noresize>
</frameset>
</html>
""" % (heading, start_url))
# This function does nothing. The sites have already
# been reconfigured according to the variable _site_switch,
# because that variable is processed by connect_to_livestatus()
def ajax_switch_site():
pass
| gpl-2.0 | 1,284,514,131,423,132,400 | 43.963636 | 101 | 0.536595 | false | 3.429958 | true | false | false |
izzyalonso/tndata_backend | tndata_backend/goals/migrations/0140_auto_20160426_1805.py | 2 | 1387 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('goals', '0139_reset_montly_frequencies'),
]
operations = [
migrations.AddField(
model_name='useraction',
name='updated_on',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 26, 18, 4, 58, 692802, tzinfo=utc), auto_now=True),
preserve_default=False,
),
migrations.AddField(
model_name='userbehavior',
name='updated_on',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 26, 18, 5, 2, 173238, tzinfo=utc), auto_now=True),
preserve_default=False,
),
migrations.AddField(
model_name='usercategory',
name='updated_on',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 26, 18, 5, 6, 229293, tzinfo=utc), auto_now=True),
preserve_default=False,
),
migrations.AddField(
model_name='usergoal',
name='updated_on',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 26, 18, 5, 11, 404946, tzinfo=utc), auto_now=True),
preserve_default=False,
),
]
| mit | -5,244,381,456,912,752,000 | 33.675 | 125 | 0.596251 | false | 3.974212 | false | false | false |
antoinecarme/pyaf | tests/pickling/test_ozone_pickle.py | 1 | 1484 | from __future__ import absolute_import
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
def pickleModel(iModel):
import pickle
output = pickle.dumps(iModel)
lReloadedObject = pickle.loads(output)
output2 = pickle.dumps(lReloadedObject)
assert(iModel.to_json() == lReloadedObject.to_json())
return lReloadedObject;
b1 = tsds.load_ozone()
df = b1.mPastData
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots("outputs/my_ozone");
lEngine2 = pickleModel(lEngine)
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine2.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine2.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| bsd-3-clause | 3,360,944,208,366,305,300 | 22.555556 | 83 | 0.72035 | false | 2.640569 | false | false | false |
cedricbonhomme/shelter-database | shelter/web/views/admin.py | 1 | 2341 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ***** BEGIN LICENSE BLOCK *****
# This file is part of Shelter Database.
# Copyright (c) 2016 Luxembourg Institute of Science and Technology.
# All rights reserved.
#
#
#
# ***** END LICENSE BLOCK *****
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 0.2 $"
__date__ = "$Date: 2016/06/07 $"
__revision__ = "$Date: 2016/07/12 $"
__copyright__ = "Copyright 2016 Luxembourg Institute of Science and Technology"
__license__ = ""
#
# Views generated by Flask-Admin for the database administration.
#
from flask_login import current_user
from flask import current_app
from flask_admin import Admin, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.menu import MenuLink
from bootstrap import db
from web.models import User, Shelter, Value, Translation
class TranslationView(ModelView):
column_searchable_list = ("original", "translated")
column_filters = ["language_code"]
column_editable_list = ["translated"]
def is_accessible(self):
return current_user.is_authenticated and current_user.is_admin
class ValueView(ModelView):
column_searchable_list = ("name",)
column_filters = ["attribute_id"]
def is_accessible(self):
return current_user.is_authenticated and current_user.is_admin
class UserView(ModelView):
column_exclude_list = ["pwdhash"]
column_editable_list = ["email", "name"]
def is_accessible(self):
return current_user.is_authenticated and current_user.is_admin
class ShelterView(ModelView):
column_exclude_list = ["properties"]
form_excluded_columns = ["properties"]
def is_accessible(self):
return current_user.is_authenticated and current_user.is_admin
menu_link_back_dashboard = MenuLink(name="Back to dashboard", url="/admin/dashboard")
menu_link_back_home = MenuLink(name="Back to home", url="/")
admin = Admin(
current_app,
name="Management of data",
template_mode="bootstrap3",
index_view=AdminIndexView(name="Home", url="/admin/data_management"),
)
admin.add_view(UserView(User, db.session))
admin.add_view(ShelterView(Shelter, db.session))
admin.add_view(ValueView(Value, db.session))
admin.add_view(TranslationView(Translation, db.session))
admin.add_link(menu_link_back_home)
admin.add_link(menu_link_back_dashboard)
| mit | -4,430,197,794,344,298,500 | 28.2625 | 85 | 0.709099 | false | 3.437592 | false | false | false |
dubzzz/py-cpp-dependencies | dependencies.py | 1 | 5097 | from os import listdir, path
import json
import re
class Logger:
def info(self, msg):
print(msg)
def scan_directory(scan_path, allowed_extensions):
r"""
scan_path path to scan
allowed_extensions extensions to consider
"""
files = list()
for f in listdir(scan_path):
full_path = path.join(scan_path, f)
if path.isdir(full_path):
subfiles = scan_directory(full_path, allowed_extensions)
for sf in subfiles:
files.append(sf)
else:
correct_extension = False
for ext in allowed_extensions:
if f.endswith(ext):
correct_extension = True
break
if correct_extension:
files.append(full_path)
return files
def build_dependency_tree(scan_path, includes, output):
r"""
scan_path path to scan
includes directories to find includes
output filename of the output
"""
logger = Logger()
# Get files to analyse
logger.info("List of files to analyse:")
allowed_extensions = [".c", ".cpp", ".c++", ".cxx", ".h", ".hpp", ".h++", ".hxx", ".r",]
files = scan_directory(scan_path, allowed_extensions)
del allowed_extensions
logger.info("> %d potential source files" % (len(files),))
# Filter files on blacklist criteria
include_files = list()
dependency_tree = list()
blacklist_criteria = [re.compile(r'sources\/others'),]
for f in files:
blacklisted = False
for criteria in blacklist_criteria:
if criteria.search(f.replace('\\', '/')):
blacklisted = True
break
if not blacklisted:
include_files.append(f)
dependency_tree.append({"file": f[len(scan_path):].replace('\\', '/'), "includes": list(), "used_by": list(),})
del blacklist_criteria
del files
logger.info("> %d non-blacklisted source files" % (len(dependency_tree),))
# Read source files for includes
logger.info("Read and parse all files")
include_regex = re.compile(r'#include\s+([\"<"]{1})([^"^>]+)([\">"]{1})')
for source in include_files:
with open(source, 'r') as f:
source_id = include_files.index(path.join(scan_path, source.replace('/', path.sep)))
for line in f:
# Is the line corresponding to an include?
m = include_regex.search(line)
if m and (m.group(1) == m.group(3) or (m.group(1) == '<' and m.group(3) == '>')):
include_name = m.group(2)
# What is the related file?
for include in includes:
# Build the path corresponding to <include>
include_path = include
for subdir in include_name.split('/'):
include_path = path.join(include_path, subdir)
# Known file?
if include_path in include_files:
include_id = include_files.index(include_path)
dependency_tree[source_id]["includes"].append(include_id)
dependency_tree[include_id]["used_by"].append(source_id)
break
logger.info("> %d include(s)\tfor %s" % (len(dependency_tree[source_id]["includes"]),source,))
with open(output, 'w') as f:
f.write(json.dumps(dependency_tree))
def load_dependency_tree(output):
with open(output, 'r') as f:
return json.loads(f.read())
return list()
def who_is_using(scan_path, output, filename):
if not filename.startswith(scan_path):
raise Exception("Filename does not correspond to the scan path")
dependency_tree = load_dependency_tree(output)
include_files = list()
for dep in dependency_tree:
include_files.append(path.join(scan_path, dep["file"][1:].replace('/', path.sep)))
if not filename in include_files:
raise Exception("Filename has not been scanned")
using_this_file = [filename,]
to_analyse = [include_files.index(filename),]
while len(to_analyse) > 0:
for f_id in dependency_tree[to_analyse[0]]["used_by"]:
if not include_files[f_id] in using_this_file:
using_this_file.append(include_files[f_id])
to_analyse.append(f_id)
dependency_tree[to_analyse[0]]["used_by"] = list()
del to_analyse[0]
return using_this_file
if __name__ =='__main__':
scan_path = "/path/to/source/files"
includes = ["/path/to/source",] # include "files/toto" can mean /path/to/source/files/toto
output = "/path/to/output.json"
test_path = "/path/to/source/files/foo.hpp"
build_dependency_tree(scan_path, includes, output)
print(who_is_using(scan_path, output, test_path))
| mit | 253,529,975,884,525,700 | 36.908397 | 123 | 0.546792 | false | 4.058121 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.