repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
ejoful/scrapy_example | zhihu_spider/zhihu_spider/settings.py | 1 | 3176 | # -*- coding: utf-8 -*-
# Scrapy settings for zhihu_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'zhihu_spider'
SPIDER_MODULES = ['zhihu_spider.spiders']
NEWSPIDER_MODULE = 'zhihu_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'zhihu_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'zhihu_spider.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'zhihu_spider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'zhihu_spider.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| gpl-3.0 |
tboyce021/home-assistant | tests/helpers/test_area_registry.py | 1 | 5502 | """Tests for the Area Registry."""
import asyncio
import pytest
from homeassistant.core import callback
from homeassistant.helpers import area_registry
import tests.async_mock
from tests.common import flush_store, mock_area_registry
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_area_registry(hass)
@pytest.fixture
def update_events(hass):
"""Capture update events."""
events = []
@callback
def async_capture(event):
events.append(event.data)
hass.bus.async_listen(area_registry.EVENT_AREA_REGISTRY_UPDATED, async_capture)
return events
async def test_list_areas(registry):
"""Make sure that we can read areas."""
registry.async_create("mock")
areas = registry.async_list_areas()
assert len(areas) == len(registry.areas)
async def test_create_area(hass, registry, update_events):
"""Make sure that we can create an area."""
area = registry.async_create("mock")
assert area.id == "mock"
assert area.name == "mock"
assert len(registry.areas) == 1
await hass.async_block_till_done()
assert len(update_events) == 1
assert update_events[0]["action"] == "create"
assert update_events[0]["area_id"] == area.id
async def test_create_area_with_name_already_in_use(hass, registry, update_events):
"""Make sure that we can't create an area with a name already in use."""
area1 = registry.async_create("mock")
with pytest.raises(ValueError) as e_info:
area2 = registry.async_create("mock")
assert area1 != area2
assert e_info == "Name is already in use"
await hass.async_block_till_done()
assert len(registry.areas) == 1
assert len(update_events) == 1
async def test_create_area_with_id_already_in_use(registry):
"""Make sure that we can't create an area with a name already in use."""
area1 = registry.async_create("mock")
updated_area1 = registry.async_update(area1.id, "New Name")
assert updated_area1.id == area1.id
area2 = registry.async_create("mock")
assert area2.id == "mock_2"
async def test_delete_area(hass, registry, update_events):
"""Make sure that we can delete an area."""
area = registry.async_create("mock")
await registry.async_delete(area.id)
assert not registry.areas
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["area_id"] == area.id
assert update_events[1]["action"] == "remove"
assert update_events[1]["area_id"] == area.id
async def test_delete_non_existing_area(registry):
"""Make sure that we can't delete an area that doesn't exist."""
registry.async_create("mock")
with pytest.raises(KeyError):
await registry.async_delete("")
assert len(registry.areas) == 1
async def test_update_area(hass, registry, update_events):
"""Make sure that we can read areas."""
area = registry.async_create("mock")
updated_area = registry.async_update(area.id, name="mock1")
assert updated_area != area
assert updated_area.name == "mock1"
assert len(registry.areas) == 1
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["area_id"] == area.id
assert update_events[1]["action"] == "update"
assert update_events[1]["area_id"] == area.id
async def test_update_area_with_same_name(registry):
"""Make sure that we can reapply the same name to the area."""
area = registry.async_create("mock")
updated_area = registry.async_update(area.id, name="mock")
assert updated_area == area
assert len(registry.areas) == 1
async def test_update_area_with_name_already_in_use(registry):
"""Make sure that we can't update an area with a name already in use."""
area1 = registry.async_create("mock1")
area2 = registry.async_create("mock2")
with pytest.raises(ValueError) as e_info:
registry.async_update(area1.id, name="mock2")
assert e_info == "Name is already in use"
assert area1.name == "mock1"
assert area2.name == "mock2"
assert len(registry.areas) == 2
async def test_load_area(hass, registry):
"""Make sure that we can load/save data correctly."""
registry.async_create("mock1")
registry.async_create("mock2")
assert len(registry.areas) == 2
registry2 = area_registry.AreaRegistry(hass)
await flush_store(registry._store)
await registry2.async_load()
assert list(registry.areas) == list(registry2.areas)
async def test_loading_area_from_storage(hass, hass_storage):
"""Test loading stored areas on start."""
hass_storage[area_registry.STORAGE_KEY] = {
"version": area_registry.STORAGE_VERSION,
"data": {"areas": [{"id": "12345A", "name": "mock"}]},
}
registry = await area_registry.async_get_registry(hass)
assert len(registry.areas) == 1
async def test_loading_race_condition(hass):
"""Test only one storage load called when concurrent loading occurred ."""
with tests.async_mock.patch(
"homeassistant.helpers.area_registry.AreaRegistry.async_load"
) as mock_load:
results = await asyncio.gather(
area_registry.async_get_registry(hass),
area_registry.async_get_registry(hass),
)
mock_load.assert_called_once_with()
assert results[0] == results[1]
| apache-2.0 |
CapOM/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/protorpc/protorpc/util_test.py | 19 | 14232 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for protorpc.util."""
import six
__author__ = '[email protected] (Rafe Kaplan)'
import datetime
import random
import sys
import types
import unittest
from protorpc import test_util
from protorpc import util
class ModuleInterfaceTest(test_util.ModuleInterfaceTest,
test_util.TestCase):
MODULE = util
class PadStringTest(test_util.TestCase):
def testPadEmptyString(self):
self.assertEquals(' ' * 512, util.pad_string(''))
def testPadString(self):
self.assertEquals('hello' + (507 * ' '), util.pad_string('hello'))
def testPadLongString(self):
self.assertEquals('x' * 1000, util.pad_string('x' * 1000))
class UtilTest(test_util.TestCase):
def testDecoratedFunction_LengthZero(self):
@util.positional(0)
def fn(kwonly=1):
return [kwonly]
self.assertEquals([1], fn())
self.assertEquals([2], fn(kwonly=2))
self.assertRaisesWithRegexpMatch(TypeError,
r'fn\(\) takes at most 0 positional '
r'arguments \(1 given\)',
fn, 1)
def testDecoratedFunction_LengthOne(self):
@util.positional(1)
def fn(pos, kwonly=1):
return [pos, kwonly]
self.assertEquals([1, 1], fn(1))
self.assertEquals([2, 2], fn(2, kwonly=2))
self.assertRaisesWithRegexpMatch(TypeError,
r'fn\(\) takes at most 1 positional '
r'argument \(2 given\)',
fn, 2, 3)
def testDecoratedFunction_LengthTwoWithDefault(self):
@util.positional(2)
def fn(pos1, pos2=1, kwonly=1):
return [pos1, pos2, kwonly]
self.assertEquals([1, 1, 1], fn(1))
self.assertEquals([2, 2, 1], fn(2, 2))
self.assertEquals([2, 3, 4], fn(2, 3, kwonly=4))
self.assertRaisesWithRegexpMatch(TypeError,
r'fn\(\) takes at most 2 positional '
r'arguments \(3 given\)',
fn, 2, 3, 4)
def testDecoratedMethod(self):
class MyClass(object):
@util.positional(2)
def meth(self, pos1, kwonly=1):
return [pos1, kwonly]
self.assertEquals([1, 1], MyClass().meth(1))
self.assertEquals([2, 2], MyClass().meth(2, kwonly=2))
self.assertRaisesWithRegexpMatch(TypeError,
r'meth\(\) takes at most 2 positional '
r'arguments \(3 given\)',
MyClass().meth, 2, 3)
def testDefaultDecoration(self):
@util.positional
def fn(a, b, c=None):
return a, b, c
self.assertEquals((1, 2, 3), fn(1, 2, c=3))
self.assertEquals((3, 4, None), fn(3, b=4))
self.assertRaisesWithRegexpMatch(TypeError,
r'fn\(\) takes at most 2 positional '
r'arguments \(3 given\)',
fn, 2, 3, 4)
def testDefaultDecorationNoKwdsFails(self):
def fn(a):
return a
self.assertRaisesRegexp(
ValueError,
'Functions with no keyword arguments must specify max_positional_args',
util.positional, fn)
class AcceptItemTest(test_util.TestCase):
def CheckAttributes(self, item, main_type, sub_type, q=1, values={}, index=1):
self.assertEquals(index, item.index)
self.assertEquals(main_type, item.main_type)
self.assertEquals(sub_type, item.sub_type)
self.assertEquals(q, item.q)
self.assertEquals(values, item.values)
def testParse(self):
self.CheckAttributes(util.AcceptItem('*/*', 1), None, None)
self.CheckAttributes(util.AcceptItem('text/*', 1), 'text', None)
self.CheckAttributes(util.AcceptItem('text/plain', 1), 'text', 'plain')
self.CheckAttributes(
util.AcceptItem('text/plain; q=0.3', 1), 'text', 'plain', 0.3,
values={'q': '0.3'})
self.CheckAttributes(
util.AcceptItem('text/plain; level=2', 1), 'text', 'plain',
values={'level': '2'})
self.CheckAttributes(
util.AcceptItem('text/plain', 10), 'text', 'plain', index=10)
def testCaseInsensitive(self):
self.CheckAttributes(util.AcceptItem('Text/Plain', 1), 'text', 'plain')
def testBadValue(self):
self.assertRaises(util.AcceptError,
util.AcceptItem, 'bad value', 1)
self.assertRaises(util.AcceptError,
util.AcceptItem, 'bad value/', 1)
self.assertRaises(util.AcceptError,
util.AcceptItem, '/bad value', 1)
def testSortKey(self):
item = util.AcceptItem('main/sub; q=0.2; level=3', 11)
self.assertEquals((False, False, -0.2, False, 11), item.sort_key)
item = util.AcceptItem('main/*', 12)
self.assertEquals((False, True, -1, True, 12), item.sort_key)
item = util.AcceptItem('*/*', 1)
self.assertEquals((True, True, -1, True, 1), item.sort_key)
def testSort(self):
i1 = util.AcceptItem('text/*', 1)
i2 = util.AcceptItem('text/html', 2)
i3 = util.AcceptItem('text/html; q=0.9', 3)
i4 = util.AcceptItem('text/html; q=0.3', 4)
i5 = util.AcceptItem('text/xml', 5)
i6 = util.AcceptItem('text/html; level=1', 6)
i7 = util.AcceptItem('*/*', 7)
items = [i1, i2 ,i3 ,i4 ,i5 ,i6, i7]
random.shuffle(items)
self.assertEquals([i6, i2, i5, i3, i4, i1, i7], sorted(items))
def testMatchAll(self):
item = util.AcceptItem('*/*', 1)
self.assertTrue(item.match('text/html'))
self.assertTrue(item.match('text/plain; level=1'))
self.assertTrue(item.match('image/png'))
self.assertTrue(item.match('image/png; q=0.3'))
def testMatchMainType(self):
item = util.AcceptItem('text/*', 1)
self.assertTrue(item.match('text/html'))
self.assertTrue(item.match('text/plain; level=1'))
self.assertFalse(item.match('image/png'))
self.assertFalse(item.match('image/png; q=0.3'))
def testMatchFullType(self):
item = util.AcceptItem('text/plain', 1)
self.assertFalse(item.match('text/html'))
self.assertTrue(item.match('text/plain; level=1'))
self.assertFalse(item.match('image/png'))
self.assertFalse(item.match('image/png; q=0.3'))
def testMatchCaseInsensitive(self):
item = util.AcceptItem('text/plain', 1)
self.assertTrue(item.match('tExt/pLain'))
def testStr(self):
self.assertHeaderSame('*/*', str(util.AcceptItem('*/*', 1)))
self.assertHeaderSame('text/*', str(util.AcceptItem('text/*', 1)))
self.assertHeaderSame('text/plain',
str(util.AcceptItem('text/plain', 1)))
self.assertHeaderSame('text/plain; q=0.2',
str(util.AcceptItem('text/plain; q=0.2', 1)))
self.assertHeaderSame(
'text/plain; q=0.2; level=1',
str(util.AcceptItem('text/plain; level=1; q=0.2', 1)))
def testRepr(self):
self.assertEquals("AcceptItem('*/*', 1)", repr(util.AcceptItem('*/*', 1)))
self.assertEquals("AcceptItem('text/plain', 11)",
repr(util.AcceptItem('text/plain', 11)))
def testValues(self):
item = util.AcceptItem('text/plain; a=1; b=2; c=3;', 1)
values = item.values
self.assertEquals(dict(a="1", b="2", c="3"), values)
values['a'] = "7"
self.assertNotEquals(values, item.values)
class ParseAcceptHeaderTest(test_util.TestCase):
def testIndex(self):
accept_header = """text/*, text/html, text/html; q=0.9,
text/xml,
text/html; level=1, */*"""
accepts = util.parse_accept_header(accept_header)
self.assertEquals(6, len(accepts))
self.assertEquals([4, 1, 3, 2, 0, 5], [a.index for a in accepts])
class ChooseContentTypeTest(test_util.TestCase):
def testIgnoreUnrequested(self):
self.assertEquals('application/json',
util.choose_content_type(
'text/plain, application/json, */*',
['application/X-Google-protobuf',
'application/json'
]))
def testUseCorrectPreferenceIndex(self):
self.assertEquals('application/json',
util.choose_content_type(
'*/*, text/plain, application/json',
['application/X-Google-protobuf',
'application/json'
]))
def testPreferFirstInList(self):
self.assertEquals('application/X-Google-protobuf',
util.choose_content_type(
'*/*',
['application/X-Google-protobuf',
'application/json'
]))
def testCaseInsensitive(self):
self.assertEquals('application/X-Google-protobuf',
util.choose_content_type(
'application/x-google-protobuf',
['application/X-Google-protobuf',
'application/json'
]))
class GetPackageForModuleTest(test_util.TestCase):
def setUp(self):
self.original_modules = dict(sys.modules)
def tearDown(self):
sys.modules.clear()
sys.modules.update(self.original_modules)
def CreateModule(self, name, file_name=None):
if file_name is None:
file_name = '%s.py' % name
module = types.ModuleType(name)
sys.modules[name] = module
return module
def assertPackageEquals(self, expected, actual):
self.assertEquals(expected, actual)
if actual is not None:
self.assertTrue(isinstance(actual, six.text_type))
def testByString(self):
module = self.CreateModule('service_module')
module.package = 'my_package'
self.assertPackageEquals('my_package',
util.get_package_for_module('service_module'))
def testModuleNameNotInSys(self):
self.assertPackageEquals(None,
util.get_package_for_module('service_module'))
def testHasPackage(self):
module = self.CreateModule('service_module')
module.package = 'my_package'
self.assertPackageEquals('my_package', util.get_package_for_module(module))
def testHasModuleName(self):
module = self.CreateModule('service_module')
self.assertPackageEquals('service_module',
util.get_package_for_module(module))
def testIsMain(self):
module = self.CreateModule('__main__')
module.__file__ = '/bing/blam/bloom/blarm/my_file.py'
self.assertPackageEquals('my_file', util.get_package_for_module(module))
def testIsMainCompiled(self):
module = self.CreateModule('__main__')
module.__file__ = '/bing/blam/bloom/blarm/my_file.pyc'
self.assertPackageEquals('my_file', util.get_package_for_module(module))
def testNoExtension(self):
module = self.CreateModule('__main__')
module.__file__ = '/bing/blam/bloom/blarm/my_file'
self.assertPackageEquals('my_file', util.get_package_for_module(module))
def testNoPackageAtAll(self):
module = self.CreateModule('__main__')
self.assertPackageEquals('__main__', util.get_package_for_module(module))
class DateTimeTests(test_util.TestCase):
def testDecodeDateTime(self):
"""Test that a RFC 3339 datetime string is decoded properly."""
for datetime_string, datetime_vals in (
('2012-09-30T15:31:50.262', (2012, 9, 30, 15, 31, 50, 262000)),
('2012-09-30T15:31:50', (2012, 9, 30, 15, 31, 50, 0))):
decoded = util.decode_datetime(datetime_string)
expected = datetime.datetime(*datetime_vals)
self.assertEquals(expected, decoded)
def testDateTimeTimeZones(self):
"""Test that a datetime string with a timezone is decoded correctly."""
for datetime_string, datetime_vals in (
('2012-09-30T15:31:50.262-06:00',
(2012, 9, 30, 15, 31, 50, 262000, util.TimeZoneOffset(-360))),
('2012-09-30T15:31:50.262+01:30',
(2012, 9, 30, 15, 31, 50, 262000, util.TimeZoneOffset(90))),
('2012-09-30T15:31:50+00:05',
(2012, 9, 30, 15, 31, 50, 0, util.TimeZoneOffset(5))),
('2012-09-30T15:31:50+00:00',
(2012, 9, 30, 15, 31, 50, 0, util.TimeZoneOffset(0))),
('2012-09-30t15:31:50-00:00',
(2012, 9, 30, 15, 31, 50, 0, util.TimeZoneOffset(0))),
('2012-09-30t15:31:50z',
(2012, 9, 30, 15, 31, 50, 0, util.TimeZoneOffset(0))),
('2012-09-30T15:31:50-23:00',
(2012, 9, 30, 15, 31, 50, 0, util.TimeZoneOffset(-1380)))):
decoded = util.decode_datetime(datetime_string)
expected = datetime.datetime(*datetime_vals)
self.assertEquals(expected, decoded)
def testDecodeDateTimeInvalid(self):
"""Test that decoding malformed datetime strings raises execptions."""
for datetime_string in ('invalid',
'2012-09-30T15:31:50.',
'-08:00 2012-09-30T15:31:50.262',
'2012-09-30T15:31',
'2012-09-30T15:31Z',
'2012-09-30T15:31:50ZZ',
'2012-09-30T15:31:50.262 blah blah -08:00',
'1000-99-99T25:99:99.999-99:99'):
self.assertRaises(ValueError, util.decode_datetime, datetime_string)
def testTimeZoneOffsetDelta(self):
"""Test that delta works with TimeZoneOffset."""
time_zone = util.TimeZoneOffset(datetime.timedelta(minutes=3))
epoch = time_zone.utcoffset(datetime.datetime.utcfromtimestamp(0))
self.assertEqual(180, util.total_seconds(epoch))
def main():
unittest.main()
if __name__ == '__main__':
main()
| bsd-3-clause |
devendermishrajio/nova_test_latest | nova/tests/functional/v3/test_networks.py | 29 | 3919 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.network import api as network_api
from nova.tests.functional.v3 import api_sample_base
from nova.tests.unit.api.openstack.compute.contrib import test_networks
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class NetworksJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-networks"
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(NetworksJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.os_networks.Os_networks')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.extended_networks.Extended_networks')
return f
def setUp(self):
super(NetworksJsonTests, self).setUp()
fake_network_api = test_networks.FakeNetworkAPI()
self.stubs.Set(network_api.API, "get_all",
fake_network_api.get_all)
self.stubs.Set(network_api.API, "get",
fake_network_api.get)
self.stubs.Set(network_api.API, "associate",
fake_network_api.associate)
self.stubs.Set(network_api.API, "delete",
fake_network_api.delete)
self.stubs.Set(network_api.API, "create",
fake_network_api.create)
self.stubs.Set(network_api.API, "add_network_to_project",
fake_network_api.add_network_to_project)
def test_network_list(self):
response = self._do_get('os-networks')
subs = self._get_regexes()
self._verify_response('networks-list-resp', subs, response, 200)
def test_network_disassociate(self):
uuid = test_networks.FAKE_NETWORKS[0]['uuid']
response = self._do_post('os-networks/%s/action' % uuid,
'networks-disassociate-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_network_show(self):
uuid = test_networks.FAKE_NETWORKS[0]['uuid']
response = self._do_get('os-networks/%s' % uuid)
subs = self._get_regexes()
self._verify_response('network-show-resp', subs, response, 200)
def test_network_create(self):
response = self._do_post("os-networks",
'network-create-req', {})
subs = self._get_regexes()
self._verify_response('network-create-resp', subs, response, 200)
def test_network_add(self):
response = self._do_post("os-networks/add",
'network-add-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_network_delete(self):
response = self._do_delete('os-networks/always_delete')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
| apache-2.0 |
msiedlarek/qtwebkit | Tools/Scripts/webkitpy/port/gtk_unittest.py | 117 | 5598 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import sys
import os
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.port.gtk import GtkPort
from webkitpy.port.pulseaudio_sanitizer_mock import PulseAudioSanitizerMock
from webkitpy.port import port_testcase
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
class GtkPortTest(port_testcase.PortTestCase):
port_name = 'gtk'
port_maker = GtkPort
# Additionally mocks out the PulseAudioSanitizer methods.
def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
port = super(GtkPortTest, self).make_port(host, port_name, options, os_name, os_version, **kwargs)
port._pulseaudio_sanitizer = PulseAudioSanitizerMock()
return port
def test_default_baseline_search_path(self):
port = self.make_port()
self.assertEqual(port.default_baseline_search_path(), ['/mock-checkout/LayoutTests/platform/gtk-wk1',
'/mock-checkout/LayoutTests/platform/gtk'])
port = self.make_port(options=MockOptions(webkit_test_runner=True))
self.assertEqual(port.default_baseline_search_path(), ['/mock-checkout/LayoutTests/platform/gtk-wk2',
'/mock-checkout/LayoutTests/platform/wk2', '/mock-checkout/LayoutTests/platform/gtk'])
def test_port_specific_expectations_files(self):
port = self.make_port()
self.assertEqual(port.expectations_files(), ['/mock-checkout/LayoutTests/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk-wk1/TestExpectations'])
port = self.make_port(options=MockOptions(webkit_test_runner=True))
self.assertEqual(port.expectations_files(), ['/mock-checkout/LayoutTests/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk/TestExpectations',
'/mock-checkout/LayoutTests/platform/wk2/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk-wk2/TestExpectations'])
def test_show_results_html_file(self):
port = self.make_port()
port._executive = MockExecutive(should_log=True)
expected_logs = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--gtk', 'file://test.html'], cwd=/mock-checkout\n"
OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_logs=expected_logs)
def test_default_timeout_ms(self):
self.assertEqual(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
self.assertEqual(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 12000)
def test_get_crash_log(self):
core_directory = os.environ.get('WEBKIT_CORE_DUMPS_DIRECTORY', '/path/to/coredumps')
core_pattern = os.path.join(core_directory, "core-pid_%p-_-process_%e")
mock_empty_crash_log = """\
Crash log for DumpRenderTree (pid 28529):
Coredump core-pid_28529-_-process_DumpRenderTree not found. To enable crash logs:
- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
- enable core dumps: ulimit -c unlimited
- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(core_directory)s
STDERR: <empty>""" % locals()
def _mock_gdb_output(coredump_path):
return (mock_empty_crash_log, [])
port = self.make_port()
port._get_gdb_output = mock_empty_crash_log
stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=None)
self.assertEqual(stderr, "")
self.assertMultiLineEqual(log, mock_empty_crash_log)
stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=0.0)
self.assertEqual(stderr, "")
self.assertMultiLineEqual(log, mock_empty_crash_log)
| lgpl-3.0 |
keedio/hue | desktop/core/ext-py/Django-1.6.10/django/db/backends/postgresql_psycopg2/base.py | 47 | 6866 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import sys
from django.db.backends import *
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
from django.db.backends.postgresql_psycopg2.client import DatabaseClient
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.db.backends.postgresql_psycopg2.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeText, SafeBytes
from django.utils.timezone import utc
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
def utc_tzinfo_factory(offset):
if offset != 0:
raise AssertionError("database connection isn't set to UTC")
return utc
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = True
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
has_select_for_update = True
has_select_for_update_nowait = True
has_bulk_insert = True
uses_savepoints = True
supports_tablespaces = True
supports_transactions = True
can_distinct_on_fields = True
can_rollback_ddl = True
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
opts = self.settings_dict["OPTIONS"]
RC = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
self.isolation_level = opts.get('isolation_level', RC)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if 'isolation_level' in conn_params:
del conn_params['isolation_level']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
return Database.connect(**conn_params)
def init_connection_state(self):
settings_dict = self.settings_dict
self.connection.set_client_encoding('UTF8')
tz = 'UTC' if settings.USE_TZ else settings_dict.get('TIME_ZONE')
if tz:
try:
get_parameter_status = self.connection.get_parameter_status
except AttributeError:
# psycopg2 < 2.0.12 doesn't have get_parameter_status
conn_tz = None
else:
conn_tz = get_parameter_status('TimeZone')
if conn_tz != tz:
self.connection.cursor().execute(
self.ops.set_time_zone_sql(), [tz])
# Commit after setting the time zone (see #17062)
self.connection.commit()
self.connection.set_isolation_level(self.isolation_level)
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_isolation_level(self, isolation_level):
assert isolation_level in range(1, 5) # Use set_autocommit for level = 0
if self.psycopg2_version >= (2, 4, 2):
self.connection.set_session(isolation_level=isolation_level)
else:
self.connection.set_isolation_level(isolation_level)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
if self.psycopg2_version >= (2, 4, 2):
self.connection.autocommit = autocommit
else:
if autocommit:
level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
else:
level = self.isolation_level
self.connection.set_isolation_level(level)
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@cached_property
def psycopg2_version(self):
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.'))
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
| apache-2.0 |
tobegit3hub/glance_docker | glance/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py | 19 | 1514 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from glance.db.sqlalchemy.migrate_repo import schema
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData()
meta.bind = migrate_engine
image_locations_table = sqlalchemy.Table('image_locations',
meta,
autoload=True)
meta_data = sqlalchemy.Column('meta_data',
schema.PickleType(),
default={})
meta_data.create(image_locations_table)
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData()
meta.bind = migrate_engine
image_locations_table = sqlalchemy.Table('image_locations',
meta,
autoload=True)
image_locations_table.columns['meta_data'].drop()
| apache-2.0 |
luoyetx/Apriori | apriori/apriori.py | 1 | 9932 | # -*- coding: utf-8 -*-
from collections import defaultdict
from itertools import combinations
from sys import stdout
class cached_property(object):
"""A cached property only computed once
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class Base(object):
"""A base workflow for Apriori algorithm
"""
def _before_generate_frequent_itemset(self):
"""Invoked before generate_frequent_itemset()
"""
pass
def _after_generate_frequent_itemset(self):
"""Invoked before generate_frequent_itemset()
"""
pass
def generate_frequent_itemset(self):
"""Generate and return frequent itemset
"""
raise NotImplementedError("generate_frequent_itemset(self) need to be implemented.")
def _before_generate_rule(self):
"""Invoked before generate_frequent_itemset()
"""
pass
def _after_generate_rule(self):
"""Invoked before generate_frequent_itemset()
"""
pass
def generate_rule(self):
"""Generate and return rule
"""
raise NotImplementedError("generate_rule(self) need to be implemented.")
def run(self):
"""Run Apriori algorithm and return rules
"""
# generate frequent itemset
self._before_generate_frequent_itemset()
self.generate_frequent_itemset()
self._after_generate_frequent_itemset()
# generate rule
self._before_generate_rule()
self.generate_rule()
self._after_generate_rule()
class Apriori(Base):
"""A simple implementation of Apriori algorithm
Example:
dataset = [
['bread', 'milk'],
['bread', 'diaper', 'beer', 'egg'],
['milk', 'diaper', 'beer', 'cola'],
['bread', 'milk', 'diaper', 'beer'],
['bread', 'milk', 'diaper', 'cola'],
]
minsup = minconf = 0.6
apriori = Apriori(dataset, minsup, minconf)
apriori.run()
apriori.print_rule()
Results:
Rules
milk --> bread (confidence = 0.75)
bread --> milk (confidence = 0.75)
diaper --> bread (confidence = 0.75)
bread --> diaper (confidence = 0.75)
beer --> diaper (confidence = 1.0)
diaper --> beer (confidence = 0.75)
diaper --> milk (confidence = 0.75)
milk --> diaper (confidence = 0.75)
"""
def __init__(self, transaction_list, minsup, minconf, selected_items=None):
"""Initialization
:param transaction_list: a list cantains transaction
:param minsup: minimum support
:param minconf: minimum confidence
:param selected_items: selected items in frequent itemset, default `None`
"""
self.transaction_list = transaction_list
self.transaction_list_full_length = len(transaction_list)
self.minsup = minsup
self.minconf = minconf
if selected_items is not None and selected_items is not []:
self.selected_items = frozenset(selected_items)
else:
self.selected_items = None
self.frequent_itemset = dict()
# support for every frequent itemset
self.frequent_itemset_support = defaultdict(float)
# convert transaction_list
self.transaction_list = list([frozenset(transaction) \
for transaction in transaction_list])
self.rule = []
def set_selected_items(self, selected_items):
"""Set selected items
"""
self.selected_items = frozenset(selected_items)
@cached_property
def items(self):
"""Return all items in the self.transaction_list
"""
items = set()
for transaction in self.transaction_list:
for item in transaction:
items.add(item)
return items
def filter_with_minsup(self, itemsets):
"""Return subset of itemsets which satisfies minsup
and record their support
"""
local_counter = defaultdict(int)
for itemset in itemsets:
for transaction in self.transaction_list:
if itemset.issubset(transaction):
local_counter[itemset] += 1
# filter with counter
result = set()
for itemset, count in local_counter.items():
support = float(count) / self.transaction_list_full_length
if support >= self.minsup:
result.add(itemset)
self.frequent_itemset_support[itemset] = support
return result
def _after_generate_frequent_itemset(self):
"""Filter frequent itemset with selected items
"""
if self.selected_items is None:
return
local_remove = []
for key, val in self.frequent_itemset.items():
for itemset in val:
if not self.selected_items.issubset(itemset):
local_remove.append((key, itemset))
for (key, itemset) in local_remove:
self.frequent_itemset[key].remove(itemset)
def generate_frequent_itemset(self):
"""Generate and return frequent itemset
"""
def _apriori_gen(itemset, length):
"""Return candidate itemset with given itemset and length
"""
# simply use F(k-1) x F(k-1) (itemset + itemset)
return set([x.union(y) for x in itemset for y in itemset \
if len(x.union(y)) == length])
k = 1
current_itemset = set()
# generate 1-frequnt_itemset
for item in self.items: current_itemset.add(frozenset([item]))
self.frequent_itemset[k] = self.filter_with_minsup(current_itemset)
# generate k-frequent_itemset
while True:
k += 1
current_itemset = _apriori_gen(current_itemset, k)
current_itemset = self.filter_with_minsup(current_itemset)
if current_itemset != set([]):
self.frequent_itemset[k] = current_itemset
else:
break
return self.frequent_itemset
def _generate_rule(self, itemset, frequent_itemset_k):
"""Generate rule with F(k) in DFS style
"""
# make sure the itemset has at least two element to generate the rule
if len(itemset) < 2:
return
for element in combinations(list(itemset), 1):
rule_head = itemset - frozenset(element)
confidence = self.frequent_itemset_support[frequent_itemset_k] / \
self.frequent_itemset_support[rule_head]
if confidence >= self.minconf:
rule = ((rule_head, itemset - rule_head), confidence)
# if rule not in self.rule, add and recall _generate_rule() in DFS
if rule not in self.rule:
self.rule.append(rule);
self._generate_rule(rule_head, frequent_itemset_k)
def generate_rule(self):
"""Generate and return rule
"""
# generate frequent itemset if not generated
if len(self.frequent_itemset) == 0:
self.generate_frequent_itemset()
# generate in DFS style
for key, val in self.frequent_itemset.items():
if key == 1:
continue
for itemset in val:
self._generate_rule(itemset, itemset)
return self.rule
def print_frequent_itemset(self):
"""Print out frequent itemset
"""
stdout.write('======================================================\n')
stdout.write('Frequent itemset:\n')
for key, val in self.frequent_itemset.items():
#stdout.write('frequent itemset size of {0}:\n'.format(key))
for itemset in val:
stdout.write('(')
stdout.write(', '.join(itemset))
stdout.write(')')
stdout.write(' support = {0}\n'.format(round(self.frequent_itemset_support[itemset], 3)))
stdout.write('======================================================\n')
def print_rule(self):
"""Print out rules
"""
stdout.write('======================================================\n')
stdout.write('Rules:\n')
for rule in self.rule:
head = rule[0][0]
tail = rule[0][1]
confidence = rule[1]
stdout.write('(')
stdout.write(', '.join(head))
stdout.write(')')
stdout.write(' ==> ')
stdout.write('(')
stdout.write(', '.join(tail))
stdout.write(')')
stdout.write(' confidence = {0}\n'.format(round(confidence, 3)))
stdout.write('======================================================\n')
class ImprovedApriori(Apriori):
"""Use Hash to filter frequent itemsets
"""
def filter_with_minsup(self, itemsets):
"""Return subset of itemset which satisfies minsup
and record their support
"""
for itemset in itemsets:
k = len(itemset)
break
local_counter = defaultdict(int)
for transaction in self.transaction_list:
for itemset in combinations(list(transaction), k):
if frozenset(itemset) in itemsets:
local_counter[frozenset(itemset)] += 1
# filter with counter
result = set()
for itemset, count in local_counter.items():
support = float(count) / self.transaction_list_full_length
if support >= self.minsup:
result.add(itemset)
self.frequent_itemset_support[itemset] = support
return result
| mit |
gptech/ansible | lib/ansible/modules/network/iosxr/iosxr_system.py | 50 | 8452 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: iosxr_system
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Cisco IOS XR devices
description:
- This module provides declarative management of node system attributes
on Cisco IOS XR devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
extends_documentation_fragment: iosxr
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
lookup_source:
description:
- The C(lookup_source) argument provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
lookup_enabled:
description:
- Provides administrative control
for enabling or disabling DNS lookups. When this argument is
set to True, lookups are performed and when it is set to False,
lookups are not performed.
type: bool
name_servers:
description:
- The C(name_serves) argument accepts a list of DNS name servers by
way of either FQDN or IP address to use to perform name resolution
lookups. This argument accepts wither a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain-name
iosxr_system:
hostname: iosxr01
domain_name: test.example.com
domain-search:
- ansible.com
- redhat.com
- cisco.com
- name: remove configuration
iosxr_system:
state: absent
- name: configure DNS lookup sources
iosxr_system:
lookup_source: MgmtEth0/0/CPU0/0
lookup_enabled: yes
- name: configure name servers
iosxr_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname iosxr01
- ip domain-name test.example.com
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.iosxr import get_config, load_config
from ansible.module_utils.iosxr import iosxr_argument_spec, check_args
def diff_list(want, have):
adds = set(want).difference(have)
removes = set(have).difference(want)
return (adds, removes)
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
if state == 'absent':
if have['hostname'] != 'ios':
commands.append('no hostname')
if have['domain_name']:
commands.append('no domain name')
if have['lookup_source']:
commands.append('no domain lookup source-interface %s' % have['lookup_source'])
if not have['lookup_enabled']:
commands.append('no domain lookup disable')
for item in have['name_servers']:
commands.append('no domain name-server %s' % item)
for item in have['domain_search']:
commands.append('no domain list %s' % item)
elif state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('domain_name'):
commands.append('domain name %s' % want['domain_name'])
if needs_update('lookup_source'):
commands.append('domain lookup source-interface %s' % want['lookup_source'])
if needs_update('lookup_enabled'):
cmd = 'domain lookup disable'
if want['lookup_enabled']:
cmd = 'no %s' % cmd
commands.append(cmd)
if want['name_servers'] is not None:
adds, removes = diff_list(want['name_servers'], have['name_servers'])
for item in adds:
commands.append('domain name-server %s' % item)
for item in removes:
commands.append('no domain name-server %s' % item)
if want['domain_search'] is not None:
adds, removes = diff_list(want['domain_search'], have['domain_search'])
for item in adds:
commands.append('domain list %s' % item)
for item in removes:
commands.append('no domain list %s' % item)
return commands
def parse_hostname(config):
match = re.search('^hostname (\S+)', config, re.M)
return match.group(1)
def parse_domain_name(config):
match = re.search('^domain name (\S+)', config, re.M)
if match:
return match.group(1)
def parse_lookup_source(config):
match = re.search('^domain lookup source-interface (\S+)', config, re.M)
if match:
return match.group(1)
def map_config_to_obj(module):
config = get_config(module)
return {
'hostname': parse_hostname(config),
'domain_name': parse_domain_name(config),
'domain_search': re.findall('^domain list (\S+)', config, re.M),
'lookup_source': parse_lookup_source(config),
'lookup_enabled': 'domain lookup disable' not in config,
'name_servers': re.findall('^domain name-server (\S+)', config, re.M)
}
def map_params_to_obj(module):
return {
'hostname': module.params['hostname'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'lookup_source': module.params['lookup_source'],
'lookup_enabled': module.params['lookup_enabled'],
'name_servers': module.params['name_servers']
}
def main():
""" Main entry point for Ansible module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
lookup_source=dict(),
lookup_enabled=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present')
)
argument_spec.update(iosxr_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands, result['warnings'], commit=True)
result['changed'] = True
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
alanhamlett/flask | examples/flaskr/flaskr.py | 157 | 2893 | # -*- coding: utf-8 -*-
"""
Flaskr
~~~~~~
A microblog example application written as Flask tutorial with
Flask and sqlite3.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
# create our little application :)
app = Flask(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flaskr.db'),
DEBUG=True,
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
"""Initializes the database."""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Creates the database tables."""
init_db()
print('Initialized the database.')
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def show_entries():
db = get_db()
cur = db.execute('select title, text from entries order by id desc')
entries = cur.fetchall()
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
| bsd-3-clause |
wilvk/ansible | test/units/modules/network/nxos/test_nxos_vxlan_vtep.py | 57 | 2521 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_vxlan_vtep
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVxlanVtepVniModule(TestNxosModule):
module = nxos_vxlan_vtep
def setUp(self):
super(TestNxosVxlanVtepVniModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosVxlanVtepVniModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_vxlan_vtep', 'config.cfg')
self.load_config.return_value = None
def test_nxos_vxlan_vtep(self):
set_module_args(dict(interface='nve1', description='simple description'))
self.execute_module(changed=True, commands=['interface nve1', 'description simple description'])
def test_nxos_vxlan_vtep_present_no_change(self):
set_module_args(dict(interface='nve1'))
self.execute_module(changed=False, commands=[])
def test_nxos_vxlan_vtep_absent(self):
set_module_args(dict(interface='nve1', state='absent'))
self.execute_module(changed=True, commands=['no interface nve1'])
def test_nxos_vxlan_vtep_absent_no_change(self):
set_module_args(dict(interface='nve2', state='absent'))
self.execute_module(changed=False, commands=[])
| gpl-3.0 |
lazytech-org/RIOT | tests/libfixmath/do-test.py | 19 | 2250 | #!/usr/bin/env python3
import math
import operator
import sys
def rem(a, b):
ret = a % b
if ret < 0 and a > 0 and b < 0 or \
ret > 0 and a < 0 and b > 0:
ret -= b
return ret
FUNS = {
'add': operator.add,
'sub': operator.sub,
'mul': operator.mul,
'div': operator.truediv,
'mod': rem,
'sadd': operator.add,
'ssub': operator.sub,
'smul': operator.mul,
'sdiv': operator.truediv,
'min': min,
'max': max,
'abs': abs,
'sqrt': math.sqrt,
'sq': lambda x: x * x,
'sin': math.sin,
'cos': math.cos,
'tan': math.tan,
'asin': math.asin,
'acos': math.acos,
'atan': math.atan,
'exp': math.exp,
'log': math.log,
'log2': math.log2,
'slog2': math.log2,
}
ABS_ERROR_LIMIT = 0.011
def main():
total = 0
errors = 0
print('Calculation: abs result != exp result, abs error > limit')
started = False
for line in sys.stdin:
line = line.strip()
if not started:
if line == 'Unary.':
print(line)
started = True
continue
elif line == 'Binary.':
print(line)
continue
elif line == 'Done.':
print(line)
break
total += 1
try:
res_locals = {}
res_locals['input'], res_locals['expected'] = map(str.strip, line.split('='))
exec('result = {}'.format(res_locals['input']), FUNS, res_locals)
abs_error = abs(res_locals['result'] - float(res_locals['expected']))
res_locals['result'] = '{:.4f}'.format(res_locals['result'])
if abs_error > ABS_ERROR_LIMIT:
print('{}: {} != {}, {:.4f} > {}'.format(res_locals['input'], res_locals['result'], res_locals['expected'],
abs_error, ABS_ERROR_LIMIT))
errors += 1
except:
errors += 1
print('ERROR {}'.format(line))
print('{} calculations passed.'.format(total - errors))
if errors:
print('{} calculations had errors.'.format(errors))
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
| lgpl-2.1 |
frouty/odoo_oph | openerp/workflow/wkf_service.py | 61 | 6596 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import instance
import openerp.netsvc as netsvc
class workflow_service(netsvc.Service):
"""
Sometimes you might want to fire a signal or re-evaluate the current state
of a workflow using the service's API. You can access the workflow services
using:
>>> import netsvc
>>> wf_service = netsvc.LocalService("workflow")
"""
def __init__(self, name='workflow'):
netsvc.Service.__init__(self, name)
self.wkf_on_create_cache={}
def clear_cache(self, cr, uid):
self.wkf_on_create_cache[cr.dbname]={}
def trg_write(self, uid, res_type, res_id, cr):
"""
Reevaluates the specified workflow instance. Thus if any condition for
a transition have been changed in the backend, then running ``trg_write``
will move the workflow over that transition.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
ident = (uid,res_type,res_id)
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id or None,res_type or None, 'active'))
for (id,) in cr.fetchall():
instance.update(cr, id, ident)
def trg_trigger(self, uid, res_type, res_id, cr):
"""
Activate a trigger.
If a workflow instance is waiting for a trigger from another model, then this
trigger can be activated if its conditions are met.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
cr.execute('select instance_id from wkf_triggers where res_id=%s and model=%s', (res_id,res_type))
res = cr.fetchall()
for (instance_id,) in res:
cr.execute('select %s,res_type,res_id from wkf_instance where id=%s', (uid, instance_id,))
ident = cr.fetchone()
instance.update(cr, instance_id, ident)
def trg_delete(self, uid, res_type, res_id, cr):
"""
Delete a workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
ident = (uid,res_type,res_id)
instance.delete(cr, ident)
def trg_create(self, uid, res_type, res_id, cr):
"""
Create a new workflow instance
:param res_type: the model name
:param res_id: the model instance id to own the created worfklow instance
:param cr: a database cursor
"""
ident = (uid,res_type,res_id)
self.wkf_on_create_cache.setdefault(cr.dbname, {})
if res_type in self.wkf_on_create_cache[cr.dbname]:
wkf_ids = self.wkf_on_create_cache[cr.dbname][res_type]
else:
cr.execute('select id from wkf where osv=%s and on_create=True', (res_type,))
wkf_ids = cr.fetchall()
self.wkf_on_create_cache[cr.dbname][res_type] = wkf_ids
for (wkf_id,) in wkf_ids:
instance.create(cr, ident, wkf_id)
def trg_validate(self, uid, res_type, res_id, signal, cr):
"""
Fire a signal on a given workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:signal: the signal name to be fired
:param cr: a database cursor
"""
result = False
ident = (uid,res_type,res_id)
# ids of all active workflow instances for a corresponding resource (id, model_nam)
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id, res_type, 'active'))
for (id,) in cr.fetchall():
res2 = instance.validate(cr, id, ident, signal)
result = result or res2
return result
def trg_redirect(self, uid, res_type, res_id, new_rid, cr):
"""
Re-bind a workflow instance to another instance of the same model.
Make all workitems which are waiting for a (subflow) workflow instance
for the old resource point to the (first active) workflow instance for
the new resource.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param new_rid: the model instance id to own the worfklow instance
:param cr: a database cursor
"""
# get ids of wkf instances for the old resource (res_id)
#CHECKME: shouldn't we get only active instances?
cr.execute('select id, wkf_id from wkf_instance where res_id=%s and res_type=%s', (res_id, res_type))
for old_inst_id, wkf_id in cr.fetchall():
# first active instance for new resource (new_rid), using same wkf
cr.execute(
'SELECT id '\
'FROM wkf_instance '\
'WHERE res_id=%s AND res_type=%s AND wkf_id=%s AND state=%s',
(new_rid, res_type, wkf_id, 'active'))
new_id = cr.fetchone()
if new_id:
# select all workitems which "wait" for the old instance
cr.execute('select id from wkf_workitem where subflow_id=%s', (old_inst_id,))
for (item_id,) in cr.fetchall():
# redirect all those workitems to the wkf instance of the new resource
cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (new_id[0], item_id))
workflow_service()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kartben/iotivity | tools/scons/BoostBuild.py | 1 | 5868 | # -*- coding: utf-8 -*-
# *********************************************************************
#
# Copyright 2014 Intel Mobile Communications GmbH All Rights Reserved.
#
# *********************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *********************************************************************
# This builder executes the boost builder ('b2') for the toolchain
# defined currently in the SCONS environment. This builder was created
# to create cross-compiled version of boost. In particular, it has
# created to create boost binaries for Android's various architectures.
import os, subprocess
import SCons.Builder, SCons.Node, SCons.Errors
# Creates the building message
#
# @param s original message
# @param target target name
# @param source source name
# @param env environment object
def __message( s, target, source, env ) :
print "building boost from [%s] for ..." % (source[0])
# Create the builder action which constructs a user-config.jam based
# on the current toolchain and executes the boost build system ('b2')
#
# @param target target file on the local drive
# @param source URL for download
# @@param env environment object
def __action( target, source, env ) :
cmd = None
# Windows...
if env["PLATFORM"] in ["win32"] :
if env.WhereIs("cmd") :
# TODO: Add Windows Support
cmd = None
# read the tools on *nix systems and sets the default parameters
elif env["PLATFORM"] in ["darwin", "linux", "posix"] :
if env.WhereIs("sh") :
cmd = ['./b2']
if not cmd :
raise SCons.Errors.StopError("Boost build system not supported on this platform [%s]" % (env["PLATFORM"]))
# We need to be in the target's directory
cwd = os.path.dirname(os.path.realpath(source[0].path))
# Gather all of the path, bin and flags
version = env.get('VERSION','')
target_os = env['TARGET_OS']
target_arch = env['TARGET_ARCH']
tool_path = os.path.dirname(env['CXX'])
cxx_bin = os.path.basename(env['CXX'])
ar_bin = os.path.basename(env['AR'])
ranlib_bin = os.path.basename(env['RANLIB'])
ccflags = list(env['CFLAGS'])
cxxflags = list(env['CXXFLAGS'])
try:
cxxflags.remove('-fno-rtti')
except ValueError:
pass
try:
cxxflags.remove('-fno-exceptions')
except ValueError:
pass
# Write a user-config for this variant
user_config_name = cwd+os.sep+'tools'+os.sep+'build'+os.sep+'src'+os.sep+'user-config.jam'
user_config_file = open(user_config_name, 'w')
user_config_file.write('import os ;\n')
user_config_file.write('using gcc :')
user_config_file.write(' '+version+' :')
#user_config_file.write(' :')
#user_config_file.write(' '+os.path.basename(toolchain['CXX']['BIN'])+' :\n')
user_config_file.write(' '+cxx_bin+' :\n')
user_config_file.write(' <archiver>'+ar_bin+'\n')
user_config_file.write(' <ranlib>'+ranlib_bin+'\n')
for value in env['CPPDEFINES'] :
if len(value) > 1 :
user_config_file.write(' <compileflags>-D'+value[0]+'='+value[1]+'\n')
else :
user_config_file.write(' <compileflags>-D'+value[0]+'\n')
for value in env['CPPPATH'] :
user_config_file.write(' <compileflags>-I'+value+'\n')
for flag in ccflags :
user_config_file.write(' <compileflags>'+flag+'\n')
for flag in cxxflags :
user_config_file.write(' <cxxflags>'+flag+'\n')
user_config_file.write(' ;\n')
user_config_file.close();
# Ensure that the toolchain is in the PATH
penv = os.environ.copy()
penv["PATH"] = tool_path+":" + penv["PATH"]
build_path = 'build' + os.sep + target_os + os.sep + target_arch
cmd.append('-q')
cmd.append('target-os=linux')
cmd.append('link=static')
cmd.append('threading=multi')
cmd.append('--layout=system')
cmd.append('--build-type=minimal')
cmd.append('--prefix='+env['PREFIX'])
cmd.append('--build-dir='+build_path)
for module in env.get('MODULES',[]) :
cmd.append('--with-'+module)
cmd.append('headers')
cmd.append('install')
# build it now (we need the shell, because some programs need it)
devnull = open(os.devnull, "wb")
handle = subprocess.Popen( cmd, env=penv, cwd=cwd ) #, stdout=devnull )
if handle.wait() <> 0 :
raise SCons.Errors.BuildError( "Building boost [%s] on the source [%s]" % (cmd, source[0]) )
# Define the emitter of the builder
#
# @param target target file on the local drive
# @param source
# @param env environment object
def __emitter( target, source, env ) :
return target, source
# Generate function which adds the builder to the environment
#
# @param env environment object
def generate( env ) :
env["BUILDERS"]["BoostBuild"] = SCons.Builder.Builder( action = __action, emitter = __emitter, target_factory = SCons.Node.FS.Entry, source_factory = SCons.Node.FS.File, single_source = True, PRINT_CMD_LINE_FUNC = __message )
# Exist function of the builder
# @param env environment object
# @return true
def exists( env ) :
return 1
| apache-2.0 |
davetcoleman/sdk-examples | baxter/baxter_interface/src/baxter_interface/robustcontroller.py | 3 | 5500 | # Copyright (c) 2013, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import errno
import roslib
roslib.load_manifest('baxter_interface')
import rospy
from baxter_msgs.msg import (
RobustControllerStatus,
)
class RobustController(object):
STATE_IDLE = 0
STATE_STARTING = 1
STATE_RUNNING = 2
STATE_STOPPING = 3
def __init__(self, namespace, enable_msg, disable_msg, timeout = 60):
"""
Wrapper around controlling a RobustController
@param namespace - namespace containing the enable and status topics
@param enable_msg - message to send to enable the RC
@param disable_msg - message to send to disable the RC
@param timeout - seconds to wait for the RC to finish [60]
"""
self._command_pub = rospy.Publisher(
namespace + '/enable',
type(enable_msg))
self._status_sub = rospy.Subscriber(
namespace + '/status',
RobustControllerStatus,
self._callback)
self._enable_msg = enable_msg
self._disable_msg = disable_msg
self._timeout = timeout
self._state = self.STATE_IDLE
self._return = 0
rospy.on_shutdown(self._on_shutdown)
def _callback(self, msg):
if self._state == self.STATE_RUNNING:
if msg.complete == RobustControllerStatus.COMPLETE_W_SUCCESS:
self._state = self.STATE_STOPPING
self._return = 0
elif msg.complete == RobustControllerStatus.COMPLETE_W_FAILURE:
self._state = self.STATE_STOPPING
self._return = errno.EIO
elif not msg.isEnabled:
self._state = self.STATE_IDLE
self._return = errno.ENOMSG
elif self._state == self.STATE_STOPPING and not msg.isEnabled:
# Would be nice to use msg.state here, but it does not
# consistently reflect reality.
self._state = self.STATE_IDLE
elif self._state == self.STATE_STARTING and msg.isEnabled:
self._state = self.STATE_RUNNING
def _run_loop(self):
# RobustControllers need messages at < 1Hz in order to continue
# their current operation.
rate = rospy.Rate(2)
start = rospy.Time.now()
while not rospy.is_shutdown():
if self._state == self.STATE_RUNNING and (rospy.Time.now() - start).to_sec() > self._timeout:
self._state = self.STATE_STOPPING
self._command_pub.publish(self._disable_msg)
self._return = errno.ETIMEDOUT
elif self._state in (self.STATE_STARTING, self.STATE_RUNNING):
self._command_pub.publish(self._enable_msg)
elif self._state == self.STATE_STOPPING:
self._command_pub.publish(self._disable_msg)
elif self._state == self.STATE_IDLE:
break
rate.sleep()
def _on_shutdown(self):
rate = rospy.Rate(2)
while not self._state == self.STATE_IDLE:
self._command_pub.publish(self._disable_msg)
rate.sleep()
self._return = errno.ECONNABORTED
def run(self):
"""
Enable the RobustController and run until completion or error.
"""
self._state = self.STATE_STARTING
self._command_pub.publish(self._enable_msg)
self._run_loop()
if self._return != 0:
if self._return == errno.EIO:
raise IOError(self._return, "Robust controller failed")
elif self._return == errno.ENOMSG:
raise IOError(self._return, "Robust controller failed to enable")
elif self._return == errno.ETIMEDOUT:
raise IOError(self._return, "Robust controller timed out")
elif self._return == errno.ECONNABORTED:
raise IOError(self._return, "Robust controller interruped by user")
else:
raise IOError(self._return)
| bsd-3-clause |
megaumi/django | tests/m2m_recursive/tests.py | 424 | 5410 | from __future__ import unicode_literals
from operator import attrgetter
from django.test import TestCase
from .models import Person
class RecursiveM2MTests(TestCase):
def setUp(self):
self.a, self.b, self.c, self.d = [
Person.objects.create(name=name)
for name in ["Anne", "Bill", "Chuck", "David"]
]
# Anne is friends with Bill and Chuck
self.a.friends.add(self.b, self.c)
# David is friends with Anne and Chuck - add in reverse direction
self.d.friends.add(self.a, self.c)
def test_recursive_m2m_all(self):
""" Test that m2m relations are reported correctly """
# Who is friends with Anne?
self.assertQuerysetEqual(
self.a.friends.all(), [
"Bill",
"Chuck",
"David"
],
attrgetter("name"),
ordered=False
)
# Who is friends with Bill?
self.assertQuerysetEqual(
self.b.friends.all(), [
"Anne",
],
attrgetter("name")
)
# Who is friends with Chuck?
self.assertQuerysetEqual(
self.c.friends.all(), [
"Anne",
"David"
],
attrgetter("name"),
ordered=False
)
# Who is friends with David?
self.assertQuerysetEqual(
self.d.friends.all(), [
"Anne",
"Chuck",
],
attrgetter("name"),
ordered=False
)
def test_recursive_m2m_reverse_add(self):
""" Test reverse m2m relation is consistent """
# Bill is already friends with Anne - add Anne again, but in the
# reverse direction
self.b.friends.add(self.a)
# Who is friends with Anne?
self.assertQuerysetEqual(
self.a.friends.all(), [
"Bill",
"Chuck",
"David",
],
attrgetter("name"),
ordered=False
)
# Who is friends with Bill?
self.assertQuerysetEqual(
self.b.friends.all(), [
"Anne",
],
attrgetter("name")
)
def test_recursive_m2m_remove(self):
""" Test that we can remove items from an m2m relationship """
# Remove Anne from Bill's friends
self.b.friends.remove(self.a)
# Who is friends with Anne?
self.assertQuerysetEqual(
self.a.friends.all(), [
"Chuck",
"David",
],
attrgetter("name"),
ordered=False
)
# Who is friends with Bill?
self.assertQuerysetEqual(
self.b.friends.all(), []
)
def test_recursive_m2m_clear(self):
""" Tests the clear method works as expected on m2m fields """
# Clear Anne's group of friends
self.a.friends.clear()
# Who is friends with Anne?
self.assertQuerysetEqual(
self.a.friends.all(), []
)
# Reverse relationships should also be gone
# Who is friends with Chuck?
self.assertQuerysetEqual(
self.c.friends.all(), [
"David",
],
attrgetter("name")
)
# Who is friends with David?
self.assertQuerysetEqual(
self.d.friends.all(), [
"Chuck",
],
attrgetter("name")
)
def test_recursive_m2m_add_via_related_name(self):
""" Tests that we can add m2m relations via the related_name attribute """
# David is idolized by Anne and Chuck - add in reverse direction
self.d.stalkers.add(self.a)
# Who are Anne's idols?
self.assertQuerysetEqual(
self.a.idols.all(), [
"David",
],
attrgetter("name"),
ordered=False
)
# Who is stalking Anne?
self.assertQuerysetEqual(
self.a.stalkers.all(), [],
attrgetter("name")
)
def test_recursive_m2m_add_in_both_directions(self):
""" Check that adding the same relation twice results in a single relation """
# Ann idolizes David
self.a.idols.add(self.d)
# David is idolized by Anne
self.d.stalkers.add(self.a)
# Who are Anne's idols?
self.assertQuerysetEqual(
self.a.idols.all(), [
"David",
],
attrgetter("name"),
ordered=False
)
# As the assertQuerysetEqual uses a set for comparison,
# check we've only got David listed once
self.assertEqual(self.a.idols.all().count(), 1)
def test_recursive_m2m_related_to_self(self):
""" Check the expected behavior when an instance is related to itself """
# Ann idolizes herself
self.a.idols.add(self.a)
# Who are Anne's idols?
self.assertQuerysetEqual(
self.a.idols.all(), [
"Anne",
],
attrgetter("name"),
ordered=False
)
# Who is stalking Anne?
self.assertQuerysetEqual(
self.a.stalkers.all(), [
"Anne",
],
attrgetter("name")
)
| bsd-3-clause |
jpanikulam/experiments | gpgpu/generators/sdf_shape_defs.py | 1 | 1463 | # %codegen(cl_gen)
import generate_opencl_structs
def main():
plane_defd = [
{
'type': 'vector',
'length': 3,
'name': 'normal',
},
{
'type': 'float',
'length': 1,
'name': 'd',
}
]
sphere_defd = [
{
'type': 'vector',
'length': 3,
'name': 'origin',
},
{
'type': 'float',
'length': 1,
'name': 'r',
}
]
box_defd = [
{
'type': 'vector',
'length': 3,
'name': 'origin',
},
{
'type': 'vector',
'length': 3,
'name': 'extents',
},
]
cfg_defd = [
{
'type': 'int',
'length': 1,
'name': 'debug_mode',
},
{
'type': 'bool',
'length': 1,
'name': 'test_feature',
},
{
'type': 'int',
'length': 1,
'name': 'terminal_iteration',
}
]
definitions = [
("Plane", plane_defd),
("Sphere", sphere_defd),
("Box", box_defd),
("RenderConfig", cfg_defd),
]
destination = "/home/jacob/repos/experiments/gpgpu/demos/signed_distance_shapes"
generate_opencl_structs.write_files(definitions, destination)
if __name__ == '__main__':
main()
| mit |
napkindrawing/ansible | lib/ansible/modules/cloud/vmware/vmware_vm_facts.py | 33 | 3282 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_vm_facts
short_description: Return basic facts pertaining to a vSphere virtual machine guest
description:
- Return basic facts pertaining to a vSphere virtual machine guest
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather all registered virtual machines
local_action:
module: vmware_vm_facts
hostname: esxi_or_vcenter_ip_or_hostname
username: username
password: password
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py
def get_all_virtual_machines(content):
virtual_machines = get_all_objs(content, [vim.VirtualMachine])
_virtual_machines = {}
for vm in virtual_machines:
_ip_address = ""
summary = vm.summary
if summary.guest is not None:
_ip_address = summary.guest.ipAddress
if _ip_address is None:
_ip_address = ""
virtual_machine = {
summary.config.name: {
"guest_fullname": summary.config.guestFullName,
"power_state": summary.runtime.powerState,
"ip_address": _ip_address,
"uuid": summary.config.uuid
}
}
_virtual_machines.update(virtual_machine)
return _virtual_machines
def main():
argument_spec = vmware_argument_spec()
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
try:
content = connect_to_api(module)
_virtual_machines = get_all_virtual_machines(content)
module.exit_json(changed=False, virtual_machines=_virtual_machines)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
rkokkelk/CouchPotatoServer | libs/guessit/__main__.py | 94 | 6835 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from __future__ import print_function
from guessit import u
from guessit import slogging, guess_file_info
from optparse import OptionParser
import logging
import sys
import os
import locale
def detect_filename(filename, filetype, info=['filename'], advanced = False):
filename = u(filename)
print('For:', filename)
print('GuessIt found:', guess_file_info(filename, filetype, info).nice_string(advanced))
def run_demo(episodes=True, movies=True, advanced=False):
# NOTE: tests should not be added here but rather in the tests/ folder
# this is just intended as a quick example
if episodes:
testeps = [ 'Series/Californication/Season 2/Californication.2x05.Vaginatown.HDTV.XviD-0TV.[tvu.org.ru].avi',
'Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi',
'Series/Treme/Treme.1x03.Right.Place,.Wrong.Time.HDTV.XviD-NoTV.[tvu.org.ru].avi',
'Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi',
'Series/Duckman/Duckman - S1E13 Joking The Chicken (unedited).avi',
'Series/Simpsons/The_simpsons_s13e18_-_i_am_furious_yellow.mpg',
'Series/Simpsons/Saison 12 Français/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.[tvu.org.ru].avi',
'Series/Dr._Slump_-_002_DVB-Rip_Catalan_by_kelf.avi',
'Series/Kaamelott/Kaamelott - Livre V - Second Volet - HD 704x396 Xvid 2 pass - Son 5.1 - TntRip by Slurm.avi'
]
for f in testeps:
print('-'*80)
detect_filename(f, filetype='episode', advanced=advanced)
if movies:
testmovies = [ 'Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv',
'Movies/El Dia de la Bestia (1995)/El.dia.de.la.bestia.DVDrip.Spanish.DivX.by.Artik[SEDG].avi',
'Movies/Blade Runner (1982)/Blade.Runner.(1982).(Director\'s.Cut).CD1.DVDRip.XviD.AC3-WAF.avi',
'Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv',
'Movies/Sin City (BluRay) (2005)/Sin.City.2005.BDRip.720p.x264.AC3-SEPTiC.mkv',
'Movies/Borat (2006)/Borat.(2006).R5.PROPER.REPACK.DVDRip.XviD-PUKKA.avi', # FIXME: PROPER and R5 get overwritten
'[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv', # FIXME: title gets overwritten
'Battle Royale (2000)/Battle.Royale.(Batoru.Rowaiaru).(2000).(Special.Edition).CD1of2.DVDRiP.XviD-[ZeaL].avi',
'Movies/Brazil (1985)/Brazil_Criterion_Edition_(1985).CD2.English.srt',
'Movies/Persepolis (2007)/[XCT] Persepolis [H264+Aac-128(Fr-Eng)+ST(Fr-Eng)+Ind].mkv',
'Movies/Toy Story (1995)/Toy Story [HDTV 720p English-Spanish].mkv',
'Movies/Pirates of the Caribbean: The Curse of the Black Pearl (2003)/Pirates.Of.The.Carribean.DC.2003.iNT.DVDRip.XviD.AC3-NDRT.CD1.avi',
'Movies/Office Space (1999)/Office.Space.[Dual-DVDRip].[Spanish-English].[XviD-AC3-AC3].[by.Oswald].avi',
'Movies/The NeverEnding Story (1984)/The.NeverEnding.Story.1.1984.DVDRip.AC3.Xvid-Monteque.avi',
'Movies/Juno (2007)/Juno KLAXXON.avi',
'Movies/Chat noir, chat blanc (1998)/Chat noir, Chat blanc - Emir Kusturica (VO - VF - sub FR - Chapters).mkv',
'Movies/Wild Zero (2000)/Wild.Zero.DVDivX-EPiC.srt',
'Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720x432].avi',
'testsmewt_bugs/movies/Baraka_Edition_Collector.avi'
]
for f in testmovies:
print('-'*80)
detect_filename(f, filetype = 'movie', advanced = advanced)
def main():
slogging.setupLogging()
# see http://bugs.python.org/issue2128
if sys.version_info.major < 3 and os.name == 'nt':
for i, a in enumerate(sys.argv):
sys.argv[i] = a.decode(locale.getpreferredencoding())
parser = OptionParser(usage = 'usage: %prog [options] file1 [file2...]')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
help = 'display debug output')
parser.add_option('-i', '--info', dest = 'info', default = 'filename',
help = 'the desired information type: filename, hash_mpc or a hash from python\'s '
'hashlib module, such as hash_md5, hash_sha1, ...; or a list of any of '
'them, comma-separated')
parser.add_option('-t', '--type', dest = 'filetype', default = 'autodetect',
help = 'the suggested file type: movie, episode or autodetect')
parser.add_option('-a', '--advanced', dest = 'advanced', action='store_true', default = False,
help = 'display advanced information for filename guesses, as json output')
parser.add_option('-d', '--demo', action='store_true', dest='demo', default=False,
help = 'run a few builtin tests instead of analyzing a file')
options, args = parser.parse_args()
if options.verbose:
logging.getLogger('guessit').setLevel(logging.DEBUG)
if options.demo:
run_demo(episodes=True, movies=True, advanced=options.advanced)
else:
if args:
for filename in args:
detect_filename(filename,
filetype = options.filetype,
info = options.info.split(','),
advanced = options.advanced)
else:
parser.print_help()
if __name__ == '__main__':
main()
| gpl-3.0 |
coreos/chromite | scripts/cros_generate_sysroot.py | 2 | 3711 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a sysroot tarball for building a specific package.
Meant for use after setup_board and build_packages have been run.
"""
import os
from chromite.buildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import commandline
from chromite.lib import osutils
from chromite.lib import sudo
DEFAULT_NAME = 'sysroot_%(package)s.tar.xz'
PACKAGE_SEPARATOR = '/'
SYSROOT = 'sysroot'
def ParseCommandLine(argv):
"""Parse args, and run environment-independent checks."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--board', required=True,
help=('The board to generate the sysroot for.'))
parser.add_argument('--package', required=True,
help=('The package to generate the sysroot for.'))
parser.add_argument('--out-dir', type=osutils.ExpandPath, required=True,
help='Directory to place the generated tarball.')
parser.add_argument('--out-file',
help=('The name to give to the tarball. Defaults to %r.'
% DEFAULT_NAME))
options = parser.parse_args(argv)
if not options.out_file:
options.out_file = DEFAULT_NAME % {
'package': options.package.replace(PACKAGE_SEPARATOR, '_')
}
return options
class GenerateSysroot(object):
"""Wrapper for generation functionality."""
PARALLEL_EMERGE = os.path.join(constants.CHROMITE_BIN_DIR, 'parallel_emerge')
def __init__(self, sysroot, options):
"""Initialize
Arguments:
sysroot: Path to sysroot.
options: Parsed options.
"""
self.sysroot = sysroot
self.options = options
def _InstallToolchain(self):
cros_build_lib.RunCommand(
[os.path.join(constants.CROSUTILS_DIR, 'install_toolchain'),
'--noconfigure', '--board_root', self.sysroot, '--board',
self.options.board])
def _InstallKernelHeaders(self):
cros_build_lib.SudoRunCommand(
[self.PARALLEL_EMERGE, '--board=%s' % self.options.board,
'--root-deps=rdeps', '--getbinpkg', '--usepkg',
'--root=%s' % self.sysroot, 'sys-kernel/linux-headers'])
def _InstallBuildDependencies(self):
cros_build_lib.SudoRunCommand(
[self.PARALLEL_EMERGE, '--board=%s' % self.options.board,
'--root=%s' % self.sysroot, '--usepkg', '--onlydeps',
'--usepkg-exclude=%s' % self.options.package, self.options.package])
def _CreateTarball(self):
target = os.path.join(self.options.out_dir, self.options.out_file)
cros_build_lib.CreateTarball(target, self.sysroot, sudo=True)
def Perform(self):
"""Generate the sysroot."""
self._InstallToolchain()
self._InstallKernelHeaders()
self._InstallBuildDependencies()
self._CreateTarball()
def FinishParsing(options):
"""Run environment dependent checks on parsed args."""
target = os.path.join(options.out_dir, options.out_file)
if os.path.exists(target):
cros_build_lib.Die('Output file %r already exists.' % target)
if not os.path.isdir(options.out_dir):
cros_build_lib.Die(
'Non-existent directory %r specified for --out-dir' % options.out_dir)
def main(argv):
options = ParseCommandLine(argv)
FinishParsing(options)
cros_build_lib.AssertInsideChroot()
with sudo.SudoKeepAlive(ttyless_sudo=False):
with osutils.TempDirContextManager(sudo_rm=True) as tempdir:
sysroot = os.path.join(tempdir, SYSROOT)
os.mkdir(sysroot)
GenerateSysroot(sysroot, options).Perform()
| bsd-3-clause |
psi4/psi4 | psi4/share/psi4/scripts/apply_license.py | 7 | 3497 | # Checks all psi4 relevant files for proper boilerplate GNU license.
# This is sold as is with no warrenty-- probably should double check everything
# after running. I am not responsible if you break Psi4.
#
# Do not forget to do share/plugins by hand!
import os
# File type we know how to handle
ftypes = ['cc', 'h', 'py']
c_header ="""/*
* @BEGIN LICENSE
*
* Psi4: an open-source quantum chemistry software package
*
* Copyright (c) 2007-2021 The Psi4 Developers.
*
* The copyrights for code used from other parties are included in
* the corresponding files.
*
* This file is part of Psi4.
*
* Psi4 is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, version 3.
*
* Psi4 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License along
* with Psi4; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @END LICENSE
*/"""
py_header = c_header.replace(' */', '#')
py_header = py_header.replace('/*', '#')
py_header = py_header.replace(' *', '#')
c_header = c_header.splitlines()
py_header = py_header.splitlines()
def check_header(infile):
f = open(infile, 'r+')
data = f.read().splitlines()
# Find the header location
max_lines = 30
try:
symbol = None
if filename.split('.')[-1] in ['py']:
start = data.index("# @BEGIN LICENSE") - 1
end = data.index("# @END LICENSE") + 1
if data[start] != '#' or data[end] != '#':
f.close()
print('Did not find "wings" of license block in file %s' % infile)
return
else:
start = data.index(" * @BEGIN LICENSE") - 1
end = data.index(" * @END LICENSE") + 1
if data[start] != '/*' or data[end] != ' */':
f.close()
print('Did not find "wings" of license block in file %s' % infile)
return
except:
print('Could not find license block in file %s' % infile)
f.close()
return
# Make sure the block actually looks like a license
license = data[start:end+1]
top = any("PSI4:" in x.upper() for x in license[:5])
bot = any("51 Franklin Street" in x for x in license[5:])
if not (top and bot):
print('Did not understand infile %s' % infile)
f.close()
return
# Replace license
if filename.split('.')[-1] in ['cc', 'h']:
data[start:end + 1] = c_header
elif filename.split('.')[-1] in ['py']:
data[start:end + 1] = py_header
else:
print('Did not understand infile end: %s' % infile)
f.close()
return
# Write it out
f.seek(0)
f.write("\n".join(data))
f.truncate()
f.close()
avoid_strings = ['qcdb', 'libJKFactory']
walk = list(os.walk('../../src/'))
walk += list(os.walk('../python'))
for root, dirnames, filenames in walk:
if any(x in root for x in avoid_strings):
continue
for filename in filenames:
if filename.split('.')[-1] not in ftypes:
continue
check_header(root + '/' + filename)
| lgpl-3.0 |
Tivix/wagtail | wagtail/utils/setup.py | 5 | 1510 | from __future__ import absolute_import, print_function, unicode_literals
import os
import subprocess
from setuptools import Command
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command.sdist import sdist as base_sdist
class assets_mixin(object):
def compile_assets(self):
try:
subprocess.check_call(['npm', 'run', 'build'])
except (OSError, subprocess.CalledProcessError) as e:
print('Error compiling assets: ' + str(e))
raise SystemExit(1)
class assets(Command, assets_mixin):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.compile_assets()
class sdist(base_sdist, assets_mixin):
def run(self):
self.compile_assets()
base_sdist.run(self)
class check_bdist_egg(bdist_egg):
# If this file does not exist, warn the user to compile the assets
sentinel_dir = 'wagtail/wagtailadmin/static/'
def run(self):
bdist_egg.run(self)
if not os.path.isdir(self.sentinel_dir):
print("\n".join([
"************************************************************",
"The front end assets for Wagtail are missing.",
"To generate the assets, please refer to the documentation in",
"docs/contributing/css_guidelines.rst",
"************************************************************",
]))
| bsd-3-clause |
xxshutong/openerp-7.0 | openerp/addons/mrp/company.py | 56 | 1393 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class company(osv.osv):
_inherit = 'res.company'
_columns = {
'manufacturing_lead': fields.float('Manufacturing Lead Time', required=True,
help="Security days for each manufacturing operation."),
}
_defaults = {
'manufacturing_lead': lambda *a: 1.0,
}
company()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CareerVillage/slack-moderation | tools/smt/smt/conf/settings.py | 1 | 1207 | # -*- coding: utf-8 -*-
import os
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
PROJECT_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
def rel(*x):
return os.path.abspath(os.path.join(PROJECT_ROOT, *x))
SETUP_DIR = rel('../../../setup')
KEY_DIR = rel('../../../keys')
# Staging
STA_PUPPET_GIT_BRANCH = 'sta'
STA_PUPPET_GIT_REPO = '[email protected]:CareerVillage/slack-moderation.git'
STA_PUPPET_BASE_DOMAIN = 'staging.slack-moderation'
STA_PUPPET_AWS_ACCESS_KEY_ID = None
STA_PUPPET_AWS_SECRET_ACCESS_KEY = None
STA_PUPPET_SENTRY_DSN = None
STA_PUPPET_NEWRELIC_LICENSE = None
STA_PUPPET_SECRET_KEY = None
# Production
PRO_PUPPET_GIT_BRANCH = 'master'
PRO_PUPPET_GIT_REPO = '[email protected]:CareerVillage/slack-moderation.git'
PRO_PUPPET_BASE_DOMAIN = 'slack-moderation'
PRO_PUPPET_AWS_ACCESS_KEY_ID = None
PRO_PUPPET_AWS_SECRET_ACCESS_KEY = None
PRO_PUPPET_SENTRY_DSN = None
PRO_PUPPET_NEWRELIC_LICENSE = None
PRO_PUPPET_SECRET_KEY = None
try:
from secrets import *
except ImportError:
print 'Error importing secrets module on smt.conf.settings'
try:
from user import *
except ImportError:
print 'Error importing user module on smt.conf.settings'
| mit |
RecursiveGreen/pymod | formats/XM.py | 1 | 16803 | import struct
from pymod.constants import *
from pymod.module import *
from pymod.util import *
class XMNote(Note):
"""The definition of an note and it's effects in Fast Tracker II"""
def __init__(self, note=0, instrument=0, voleffect=0, volparam=0, effect=0, param=0):
super(XMNote, self).__init__(note, instrument, voleffect, volparam, effect, param)
def __unicode__(self):
keys = ['C-', 'C#', 'D-', 'D#', 'E-', 'F-', 'F#', 'G-', 'G#', 'A-', 'A#', 'B-']
commands = '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if self.note == 0: ret1 = '...'
elif self.note > 0 and self.note <=120:
split = divmod(self.note-13, 12)
ret1 = '%s%s' % (keys[split[1]], str(split[0]))
elif self.note == 254: ret1 = '^^^'
elif self.note == 255: ret1 = '==='
if self.instrument: ret2 = hex(self.instrument)[2:].zfill(2).upper()
else: ret2 = '..'
if self.voleffect == VOLFX_NONE: ret3 = '..'
elif self.voleffect == VOLFX_VOLUME: ret3 = hex(self.volparam)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_VOLSLIDEDOWN: ret3 = hex(self.volparam + 0x60)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_VOLSLIDEUP: ret3 = hex(self.volparam + 0x70)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_FINEVOLDOWN: ret3 = hex(self.volparam + 0x80)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_FINEVOLUP: ret3 = hex(self.volparam + 0x90)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_VIBRATOSPEED: ret3 = hex(self.volparam + 0xA0)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_VIBRATODEPTH: ret3 = hex(self.volparam + 0xB0)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_PANNING: ret3 = hex(((self.volparam - 2) >> 2) + 0xC0)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_PANSLIDELEFT: ret3 = hex(self.volparam + 0xD0)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_PANSLIDERIGHT: ret3 = hex(self.volparam + 0xE0)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_TONEPORTAMENTO: ret3 = hex(self.volparam + 0xF0)[2:].zfill(2).upper()
if self.effect: letter = commands[self.effect-1]
else: letter = '.'
ret4 = '%s%s' % (letter, hex(self.param)[2:].zfill(2).upper())
return '%s %s %s %s' % (ret1, ret2, ret3, ret4)
def __repr__(self):
return self.__unicode__()
class XMPattern(Pattern):
"""The definition of the XM pattern"""
def __init__(self, file=None, rows=64, channels=32):
super(XMPattern, self).__init__(rows, channels)
self.headerlen = 9
self.packtype = 0
self.packsize = rows * channels
if file:
self.load(file)
else:
self.data = self.empty(self.rows, self.channels)
def empty(self, rows, channels):
pattern = []
for row in range(rows):
pattern.append([])
for channel in range(channels):
pattern[row].append(XMNote())
return pattern
def load(self, file):
self.headerlen = struct.unpack("<L", file.read(4))[0]
self.packtype = struct.unpack("<B", file.read(1))[0]
self.rows = struct.unpack("<H", file.read(2))[0]
self.packsize = struct.unpack("<H", file.read(2))[0]
self.data = self.empty(self.rows, self.channels)
maskvar = 0
end = file.tell() + self.packsize
for row in range(self.rows):
for channel in range(self.channels):
if file.tell() < end:
maskvar = struct.unpack("<B", file.read(1))[0]
note = 0
inst = 0
voldata = 0
command = 0
param = 0
if maskvar & 128:
if maskvar & 1: note = struct.unpack("<B", file.read(1))[0]
if maskvar & 2: inst = struct.unpack("<B", file.read(1))[0]
if maskvar & 4: voldata = struct.unpack("<B", file.read(1))[0]
if maskvar & 8: command = struct.unpack("<B", file.read(1))[0]
if maskvar & 16: param = struct.unpack("<B", file.read(1))[0]
else:
note = maskvar
inst = struct.unpack("<B", file.read(1))[0]
voldata = struct.unpack("<B", file.read(1))[0]
command = struct.unpack("<B", file.read(1))[0]
param = struct.unpack("<B", file.read(1))[0]
# Cleanup. . .
if note > NOTE_NONE and note < 97:
self.data[row][channel].note = note + 12
elif note == 97:
self.data[row][channel].note = NOTE_OFF
else:
self.data[row][channel].note = NOTE_NONE
if inst == 255:
self.data[row][channel].instrument = 0
else:
self.data[row][channel].instrument = inst
if voldata >= 16 and voldata <= 80:
self.data[row][channel].voleffect = VOLFX_VOLUME
self.data[row][channel].volparam = voldata - 16
elif voldata >= 96:
volcmd = voldata & 0xF0
voldata = voldata & 0x0F
self.data[row][channel].volparam = voldata
if volcmd == 0x60: self.data[row][channel].voleffect = VOLFX_VOLSLIDEDOWN
if volcmd == 0x70: self.data[row][channel].voleffect = VOLFX_VOLSLIDEUP
if volcmd == 0x80: self.data[row][channel].voleffect = VOLFX_FINEVOLDOWN
if volcmd == 0x90: self.data[row][channel].voleffect = VOLFX_FINEVOLUP
if volcmd == 0xA0: self.data[row][channel].voleffect = VOLFX_VIBRATOSPEED
if volcmd == 0xB0: self.data[row][channel].voleffect = VOLFX_VIBRATODEPTH
if volcmd == 0xC0:
self.data[row][channel].voleffect = VOLFX_PANNING
self.data[row][channel].volparam = (voldata << 2) + 2
if volcmd == 0xD0: self.data[row][channel].voleffect = VOLFX_PANSLIDELEFT
if volcmd == 0xE0: self.data[row][channel].voleffect = VOLFX_PANSLIDERIGHT
if volcmd == 0xF0: self.data[row][channel].voleffect = VOLFX_TONEPORTAMENTO
self.data[row][channel].effect = command
self.data[row][channel].param = param
class XMEnvelope(Envelope):
"""The definition of an envelope for an XM instrument. There are a total
of two envelopes: Volume and Panning."""
def __init__(self, type=0):
super(XMEnvelope, self).__init__(type)
class XMSample(Sample):
"""Definition of an Fast Tracker II sample"""
def __init__(self, file=None):
super(XMSample, self).__init__()
self.xmsamploadflags = SF_LE | SF_M | SF_PCMD
if file: self.load(file, 0)
def load(self, file, loadtype=0):
if loadtype == 0:
# Loads the XM sample headers
xmsamplength = struct.unpack("<L", file.read(4))[0]
xmsamploopbegin = struct.unpack("<L", file.read(4))[0]
xmsamploopend = struct.unpack("<L", file.read(4))[0] + xmsamploopbegin
xmsampvolume = struct.unpack("<B", file.read(1))[0]
xmsampfinetune = struct.unpack("<b", file.read(1))[0]
xmsampflags = struct.unpack("<B", file.read(1))[0]
xmsamppanning = struct.unpack("<B", file.read(1))[0]
xmsamprelnote = struct.unpack("<b", file.read(1))[0]
xmsampRESERVED = struct.unpack("<B", file.read(1))[0]
xmsampname = struct.unpack("<22s", file.read(22))[0]
# Parse it into generic Sample
self.name = xmsampname
self.filename = xmsampname
self.volume = MIN(xmsampvolume, 64) * 4
self.length = xmsamplength
self.loopbegin = xmsamploopbegin
self.loopend = xmsamploopend
self.flags = CHN_PANNING
if self.loopbegin >= self.loopend:
xmsampflags = xmsampflags & ~3
if xmsampflags & 3:
if xmsampflags & 3 == 2: self.flags = self.flags | CHN_PINGPONGLOOP
if xmsampflags & 3 == 1: self.flags = self.flags | CHN_LOOP
if xmsampflags & 0x10:
self.flags = self.flags | CHN_16BIT
self.length = self.length >> 1
self.loopbegin = self.loopbegin >> 1
self.loopend = self.loopend >> 1
self.panning = xmsamppanning
self.c5speed = transpose_to_frequency(xmsamprelnote, xmsampfinetune)
elif loadtype == 1:
# . . .otherwise, load sample data
self.xmsamploadflags = self.xmsamploadflags | (SF_8, SF_16)[bool(self.flags & CHN_16BIT)]
super(XMSample, self).load(file, file.tell(), self.xmsamploadflags)
class XMInstrument(Instrument):
"""Definition of an Fast Tracker II instrument"""
def __init__(self, file=None):
super(XMInstrument, self).__init__()
self.xminstnumsamples = 0
self.samples = []
if file: self.load(file)
def load(self, file):
# Load the XM instrument data
xminstheadsize = struct.unpack("<L", file.read(4))[0]
xminstname = struct.unpack("<22s", file.read(22))[0]
xminsttype = struct.unpack("<B", file.read(1))[0] # random junk, supposedly. . .
self.xminstnumsamples = struct.unpack("<H", file.read(2))[0]
self.name = xminstname
xminstsmpheadsize = struct.unpack("<L", file.read(4))[0]
if self.xminstnumsamples > 0:
xminstnotekeytable = []
for i in range(96):
xminstnotekeytable.append(struct.unpack("<B", file.read(1))[0])
xminstvolenv= []
for i in range(12):
xminstvolenv.append(list(struct.unpack("<HH", file.read(4))))
xminstpanenv= []
for i in range(12):
xminstpanenv.append(list(struct.unpack("<HH", file.read(4))))
xminstvolpoints = struct.unpack("<B", file.read(1))[0]
xminstpanpoints = struct.unpack("<B", file.read(1))[0]
xminstvolsustpnt = struct.unpack("<B", file.read(1))[0]
xminstvollpstpnt = struct.unpack("<B", file.read(1))[0]
xminstvollpedpnt = struct.unpack("<B", file.read(1))[0]
xminstpansustpnt = struct.unpack("<B", file.read(1))[0]
xminstpanlpstpnt = struct.unpack("<B", file.read(1))[0]
xminstpanlpedpnt = struct.unpack("<B", file.read(1))[0]
xminstvolenvtype = struct.unpack("<B", file.read(1))[0]
xminstpanenvtype = struct.unpack("<B", file.read(1))[0]
xminstvibtype = struct.unpack("<B", file.read(1))[0]
xminstvibsweep = struct.unpack("<B", file.read(1))[0]
xminstvibdepth = struct.unpack("<B", file.read(1))[0]
xminstvibrate = struct.unpack("<B", file.read(1))[0]
xminstvolfadeout = struct.unpack("<H", file.read(2))[0]
xminstRESERVED1 = list(struct.unpack("<11H", file.read(22)))
# Parse it into the generic Instrument
for i in range(96):
self.notemap[i] = i
self.samplemap[i] = xminstnotekeytable[i]
self.volumeenv = XMEnvelope()
self.volumeenv.ticks = []
self.volumeenv.values = []
self.panningenv = XMEnvelope()
self.panningenv.ticks = []
self.panningenv.values = []
for i in range(12):
self.volumeenv.ticks.append(xminstvolenv[i][0])
self.volumeenv.values.append(xminstvolenv[i][1])
self.panningenv.ticks.append(xminstpanenv[i][0])
self.panningenv.values.append(xminstpanenv[i][1])
self.volumeenv.nodes = xminstvolpoints
self.panningenv.nodes = xminstpanpoints
self.volumeenv.sustloopbegin = xminstvolsustpnt
self.volumeenv.sustloopend = xminstvolsustpnt
self.volumeenv.loopbegin = xminstvollpstpnt
self.volumeenv.loopend = xminstvollpedpnt
self.panningenv.sustloopbegin = xminstpansustpnt
self.panningenv.sustloopend = xminstpansustpnt
self.panningenv.loopbegin = xminstpanlpstpnt
self.panningenv.loopend = xminstpanlpedpnt
if xminstvolenvtype & 1: self.flags | ENV_VOLUME
if xminstvolenvtype & 2: self.flags | ENV_VOLSUSTAIN
if xminstvolenvtype & 4: self.flags | ENV_VOLLOOP
if xminstpanenvtype & 1: self.flags | ENV_PANNING
if xminstpanenvtype & 2: self.flags | ENV_PANSUSTAIN
if xminstpanenvtype & 4: self.flags | ENV_PANLOOP
self.fadeout = xminstvolfadeout
if self.xminstnumsamples:
# Load headers. . .
for num in range(self.xminstnumsamples):
self.samples.append(XMSample(file))
self.samples[num].vibtype = xminstvibtype
self.samples[num].vibrate = xminstvibsweep
self.samples[num].vibdepth = xminstvibdepth
self.samples[num].vibspeed = xminstvibrate
# . . .followed by sample data
for num in range(self.xminstnumsamples):
self.samples[num].load(file, 1)
class XM(Module):
"""A class that holds an XM module file"""
def __init__(self, filename=None):
super(XM, self).__init__()
if not filename:
self.id = 'Extended Module: ' # 17 char length (stupid space)
self.b1Atch = 0x1A # byte 1A temp char. . . ;)
self.tracker = 'FastTracker v2.00'
self.cwtv = 0x0104
self.headerlength = 0
self.restartpos = 0
self.channelnum = 32
else:
f = open(filename, 'rb')
self.filename = filename
self.id = struct.unpack("<17s", f.read(17))[0] # 'Extended module: '
self.name = struct.unpack("<20s", f.read(20))[0] # Song title (padded with NULL)
self.b1Atch = struct.unpack("<B", f.read(1))[0] # 0x1A
self.tracker = struct.unpack("<20s", f.read(20))[0]
self.cwtv = struct.unpack("<H", f.read(2))[0] # Created with tracker version (XM y.xx = 0yxxh)
self.headerlength = struct.unpack("<L", f.read(4))[0]
self.ordernum = struct.unpack("<H", f.read(2))[0] # Number of orders in song
self.restartpos = struct.unpack("<H", f.read(2))[0] # Restart position
self.channelnum = struct.unpack("<H", f.read(2))[0] # Number of channels in song
self.patternnum = struct.unpack("<H", f.read(2))[0] # Number of patterns in song
self.instrumentnum = struct.unpack("<H", f.read(2))[0] # Number of instruments in song
self.flags = struct.unpack("<H", f.read(2))[0]
self.tempo = struct.unpack("<H", f.read(2))[0]
self.speed = struct.unpack("<H", f.read(2))[0]
self.orders = list(struct.unpack("<256B", f.read(256)))
self.patterns = []
if self.patternnum:
for num in range(self.patternnum):
self.patterns.append(XMPattern(f, channels=self.channelnum))
self.instruments = []
if self.instrumentnum:
for num in range(self.instrumentnum):
self.instruments.append(XMInstrument(f))
f.close()
def detect(filename):
f = open(filename, 'rb')
id = struct.unpack("<17s", f.read(17))[0]
f.close()
if id == 'Extended Module: ':
return 2
else:
return 0
detect = staticmethod(detect)
def gettracker(self):
return self.tracker.replace('\x00', ' ').strip()
| gpl-3.0 |
chatcannon/scipy | scipy/linalg/_cython_signature_generator.py | 52 | 8369 | """
A script that uses f2py to generate the signature files used to make
the Cython BLAS and LAPACK wrappers from the fortran source code for
LAPACK and the reference BLAS.
To generate the BLAS wrapper signatures call:
python _cython_signature_generator.py blas <blas_directory> <out_file>
To generate the LAPACK wrapper signatures call:
python _cython_signature_generator.py lapack <lapack_src_directory> <out_file>
"""
import glob
from numpy.f2py import crackfortran
sig_types = {'integer': 'int',
'complex': 'c',
'double precision': 'd',
'real': 's',
'complex*16': 'z',
'double complex': 'z',
'character': 'char',
'logical': 'bint'}
def get_type(info, arg):
argtype = sig_types[info['vars'][arg]['typespec']]
if argtype == 'c' and info['vars'][arg].get('kindselector') is not None:
argtype = 'z'
return argtype
def make_signature(filename):
info = crackfortran.crackfortran(filename)[0]
name = info['name']
if info['block'] == 'subroutine':
return_type = 'void'
else:
return_type = get_type(info, name)
arglist = [' *'.join([get_type(info, arg), arg]) for arg in info['args']]
args = ', '.join(arglist)
# Eliminate strange variable naming that replaces rank with rank_bn.
args = args.replace('rank_bn', 'rank')
return '{0} {1}({2})\n'.format(return_type, name, args)
def get_sig_name(line):
return line.split('(')[0].split(' ')[-1]
def sigs_from_dir(directory, outfile, manual_wrappers=None, exclusions=None):
if directory[-1] in ['/', '\\']:
directory = directory[:-1]
files = glob.glob(directory + '/*.f*')
if exclusions is None:
exclusions = []
if manual_wrappers is not None:
exclusions += [get_sig_name(l) for l in manual_wrappers.split('\n')]
signatures = []
for filename in files:
name = filename.split('\\')[-1][:-2]
if name in exclusions:
continue
signatures.append(make_signature(filename))
if manual_wrappers is not None:
signatures += [l + '\n' for l in manual_wrappers.split('\n')]
signatures.sort(key=get_sig_name)
comment = ["# This file was generated by _cython_wrapper_generators.py.\n",
"# Do not edit this file directly.\n\n"]
with open(outfile, 'w') as f:
f.writelines(comment)
f.writelines(signatures)
# The signature that is used for zcgesv in lapack 3.1.0 and 3.1.1 changed
# in version 3.2.0. The version included in the clapack on OSX has the
# more recent signature though.
# slamch and dlamch are not in the lapack src directory, but,since they
# already have Python wrappers, we'll wrap them as well.
# The other manual signatures are used because the signature generating
# functions don't work when function pointer arguments are used.
lapack_manual_wrappers = '''void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info)
void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info)
void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
d dlamch(char *cmach)
void ilaver(int *vers_major, int *vers_minor, int *vers_patch)
void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info)
void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info)
void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
s slamch(char *cmach)
void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info)'''
if __name__ == '__main__':
from sys import argv
libname, src_dir, outfile = argv[1:]
# Exclude scabs and sisnan since they aren't currently included
# in the scipy-specific ABI wrappers.
if libname.lower() == 'blas':
sigs_from_dir(src_dir, outfile, exclusions=['scabs1', 'xerbla'])
elif libname.lower() == 'lapack':
# Exclude all routines that do not have consistent interfaces from
# LAPACK 3.1.0 through 3.6.0.
# Also exclude routines with string arguments to avoid
# compatibility woes with different standards for string arguments.
# Exclude sisnan and slaneg since they aren't currently included in
# The ABI compatibility wrappers.
exclusions = ['sisnan', 'csrot', 'zdrot', 'ilaenv', 'iparmq', 'lsamen',
'xerbla', 'zcgesv', 'dlaisnan', 'slaisnan', 'dlazq3',
'dlazq4', 'slazq3', 'slazq4', 'dlasq3', 'dlasq4',
'slasq3', 'slasq4', 'dlasq5', 'slasq5', 'slaneg',
# Routines deprecated in LAPACK 3.6.0
'cgegs', 'cgegv', 'cgelsx', 'cgeqpf', 'cggsvd', 'cggsvp',
'clahrd', 'clatzm', 'ctzrqf', 'dgegs', 'dgegv', 'dgelsx',
'dgeqpf', 'dggsvd', 'dggsvp', 'dlahrd', 'dlatzm', 'dtzrqf',
'sgegs', 'sgegv', 'sgelsx', 'sgeqpf', 'sggsvd', 'sggsvp',
'slahrd', 'slatzm', 'stzrqf', 'zgegs', 'zgegv', 'zgelsx',
'zgeqpf', 'zggsvd', 'zggsvp', 'zlahrd', 'zlatzm', 'ztzrqf']
sigs_from_dir(src_dir, outfile, manual_wrappers=lapack_manual_wrappers,
exclusions=exclusions)
| bsd-3-clause |
samdoran/ansible | lib/ansible/modules/network/nxos/nxos_evpn_vni.py | 25 | 10474 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: nxos_evpn_vni
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages Cisco EVPN VXLAN Network Identifier (VNI).
description:
- Manages Cisco Ethernet Virtual Private Network (EVPN) VXLAN Network
Identifier (VNI) configurations of a Nexus device.
author: Gabriele Gerbino (@GGabriele)
notes:
- default, where supported, restores params default value.
- RD override is not permitted. You should set it to the default values
first and then reconfigure it.
- C(route_target_both), C(route_target_import) and
C(route_target_export valid) values are a list of extended communities,
(i.e. ['1.2.3.4:5', '33:55']) or the keywords 'auto' or 'default'.
- The C(route_target_both) property is discouraged due to the inconsistent
behavior of the property across Nexus platforms and image versions.
For this reason it is recommended to use explicit C(route_target_export)
and C(route_target_import) properties instead of C(route_target_both).
- RD valid values are a string in one of the route-distinguisher formats,
the keyword 'auto', or the keyword 'default'.
options:
vni:
description:
- The EVPN VXLAN Network Identifier.
required: true
default: null
route_distinguisher:
description:
- The VPN Route Distinguisher (RD). The RD is combined with
the IPv4 or IPv6 prefix learned by the PE router to create a
globally unique address.
required: true
default: null
route_target_both:
description:
- Enables/Disables route-target settings for both import and
export target communities using a single property.
required: false
default: null
route_target_import:
description:
- Sets the route-target 'import' extended communities.
required: false
default: null
route_target_export:
description:
- Sets the route-target 'import' extended communities.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: vni configuration
nxos_evpn_vni:
vni: 6000
route_distinguisher: "60:10"
route_target_import:
- "5000:10"
- "4100:100"
route_target_export: auto
route_target_both: default
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["evpn", "vni 6000 l2", "route-target import 5001:10"]
'''
import re
import time
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'vni': 'vni',
'route_distinguisher': 'rd',
'route_target_both': 'route-target both',
'route_target_import': 'route-target import',
'route_target_export': 'route-target export'
}
def get_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP.get(arg)
command_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
value = ''
if command in config:
value = command_re.search(config).group('value')
return value
def get_route_target_value(arg, config, module):
splitted_config = config.splitlines()
value_list = []
command = PARAM_TO_COMMAND_KEYMAP.get(arg)
command_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
for line in splitted_config:
value = ''
if command in line.strip():
value = command_re.search(line).group('value')
value_list.append(value)
return value_list
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg != 'vni':
if arg == 'route_distinguisher':
existing[arg] = get_value(arg, config, module)
else:
existing[arg] = get_route_target_value(arg, config, module)
existing_fix = dict((k, v) for k, v in existing.items() if v)
if existing_fix:
existing['vni'] = module.params['vni']
else:
existing = existing_fix
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = table.get(key)
return new_dict
def fix_proposed(proposed_commands):
new_proposed = {}
for key, value in proposed_commands.items():
if key == 'route-target both':
new_proposed['route-target export'] = value
new_proposed['route-target import'] = value
else:
new_proposed[key] = value
return new_proposed
def state_present(module, existing, proposed):
commands = list()
parents = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
if proposed_commands.get('route-target both'):
proposed_commands = fix_proposed(proposed_commands)
for key, value in proposed_commands.items():
if key.startswith('route-target'):
if value == ['default']:
existing_value = existing_commands.get(key)
if existing_value:
for target in existing_value:
commands.append('no {0} {1}'.format(key, target))
elif not isinstance(value, list):
value = [value]
for target in value:
if existing:
if target not in existing.get(key.replace('-', '_').replace(' ', '_')):
commands.append('{0} {1}'.format(key, target))
else:
commands.append('{0} {1}'.format(key, target))
elif value == 'default':
existing_value = existing_commands.get(key)
if existing_value:
commands.append('no {0} {1}'.format(key, existing_value))
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
return commands, parents
def state_absent(module, existing, proposed):
commands = ['no vni {0} l2'.format(module.params['vni'])]
parents = ['evpn']
return commands, parents
def main():
argument_spec = dict(
vni=dict(required=True, type='str'),
route_distinguisher=dict(required=False, type='str'),
route_target_both=dict(required=False, type='list'),
route_target_import=dict(required=False, type='list'),
route_target_export=dict(required=False, type='list'),
state=dict(choices=['present', 'absent'], default='present', required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = dict(changed=False, warnings=warnings)
state = module.params['state']
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
commands = []
parents = []
proposed = {}
for key, value in proposed_args.items():
if key != 'vni':
if value == 'true':
value = True
elif value == 'false':
value = False
if existing.get(key) != value:
proposed[key] = value
if state == 'present':
commands, parents = state_present(module, existing, proposed)
elif state == 'absent' and existing:
commands, parents = state_absent(module, existing, proposed)
if commands:
if (existing.get('route_distinguisher') and
proposed.get('route_distinguisher')):
if (existing['route_distinguisher'] != proposed['route_distinguisher'] and
proposed['route_distinguisher'] != 'default'):
warnings.append('EVPN RD {0} was automatically removed. '
'It is highly recommended to use a task '
'(with default as value) to explicitly '
'unconfigure it.'.format(existing['route_distinguisher']))
remove_commands = ['no rd {0}'.format(existing['route_distinguisher'])]
candidate = CustomNetworkConfig(indent=3)
candidate.add(remove_commands, parents=parents)
load_config(module, candidate)
results['changed'] = True
results['commands'] = candidate.items_text()
time.sleep(30)
else:
candidate = CustomNetworkConfig(indent=3)
candidate.add(commands, parents=parents)
load_config(module, candidate)
results['changed'] = True
results['commands'] = candidate.items_text()
else:
results['commands'] = []
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
NeuralEnsemble/neuroConstruct | lib/jython/Lib/test/test_shelve.py | 138 | 4596 | import os
import unittest
import shelve
import glob
from test import test_support
test_support.import_module('anydbm', deprecated=True)
class TestCase(unittest.TestCase):
fn = "shelftemp" + os.extsep + "db"
def test_close(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
s.close()
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def test_ascii_file_shelf(self):
try:
s = shelve.open(self.fn, protocol=0)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
finally:
for f in glob.glob(self.fn+"*"):
os.unlink(f)
def test_binary_file_shelf(self):
try:
s = shelve.open(self.fn, protocol=1)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
finally:
for f in glob.glob(self.fn+"*"):
os.unlink(f)
def test_proto2_file_shelf(self):
try:
s = shelve.open(self.fn, protocol=2)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
finally:
for f in glob.glob(self.fn+"*"):
os.unlink(f)
def test_in_memory_shelf(self):
d1 = {}
s = shelve.Shelf(d1, protocol=0)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
d2 = {}
s = shelve.Shelf(d2, protocol=1)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
self.assertEqual(len(d1), 1)
self.assertNotEqual(d1, d2)
def test_mutable_entry(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4])
s.close()
d2 = {}
s = shelve.Shelf(d2, protocol=2, writeback=True)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4,5])
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
def test_writeback_also_writes_immediately(self):
# Issue 5754
d = {}
s = shelve.Shelf(d, writeback=True)
s['key'] = [1]
p1 = d['key'] # Will give a KeyError if backing store not updated
s['key'].append(2)
s.close()
p2 = d['key']
self.assertNotEqual(p1, p2) # Write creates new object in store
from test import mapping_tests
class TestShelveBase(mapping_tests.BasicTestMappingProtocol):
fn = "shelftemp.db"
counter = 0
def __init__(self, *args, **kw):
self._db = []
mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw)
type2test = shelve.Shelf
def _reference(self):
return {"key1":"value1", "key2":2, "key3":(1,2,3)}
def _empty_mapping(self):
if self._in_mem:
x= shelve.Shelf({}, **self._args)
else:
self.counter+=1
x= shelve.open(self.fn+str(self.counter), **self._args)
self._db.append(x)
return x
def tearDown(self):
for db in self._db:
db.close()
self._db = []
if not self._in_mem:
for f in glob.glob(self.fn+"*"):
test_support.unlink(f)
class TestAsciiFileShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = False
class TestBinaryFileShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = False
class TestProto2FileShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = False
class TestAsciiMemShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = True
class TestBinaryMemShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = True
class TestProto2MemShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = True
def test_main():
test_support.run_unittest(
TestAsciiFileShelve,
TestBinaryFileShelve,
TestProto2FileShelve,
TestAsciiMemShelve,
TestBinaryMemShelve,
TestProto2MemShelve,
TestCase
)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
mick-d/nipype | nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py | 1 | 2404 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..convert import AnalyzeHeader
def test_AnalyzeHeader_inputs():
input_map = dict(args=dict(argstr='%s',
),
centre=dict(argstr='-centre %s',
units='mm',
),
data_dims=dict(argstr='-datadims %s',
units='voxels',
),
datatype=dict(argstr='-datatype %s',
mandatory=True,
),
description=dict(argstr='-description %s',
),
environ=dict(nohash=True,
usedefault=True,
),
greylevels=dict(argstr='-gl %s',
units='NA',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='< %s',
mandatory=True,
position=1,
),
initfromheader=dict(argstr='-initfromheader %s',
position=3,
),
intelbyteorder=dict(argstr='-intelbyteorder',
),
networkbyteorder=dict(argstr='-networkbyteorder',
),
nimages=dict(argstr='-nimages %d',
units='NA',
),
offset=dict(argstr='-offset %d',
units='NA',
),
out_file=dict(argstr='> %s',
genfile=True,
position=-1,
),
picoseed=dict(argstr='-picoseed %s',
units='mm',
),
printbigendian=dict(argstr='-printbigendian %s',
position=3,
),
printimagedims=dict(argstr='-printimagedims %s',
position=3,
),
printintelbyteorder=dict(argstr='-printintelbyteorder %s',
position=3,
),
printprogargs=dict(argstr='-printprogargs %s',
position=3,
),
readheader=dict(argstr='-readheader %s',
position=3,
),
scaleinter=dict(argstr='-scaleinter %d',
units='NA',
),
scaleslope=dict(argstr='-scaleslope %d',
units='NA',
),
scheme_file=dict(argstr='%s',
position=2,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
voxel_dims=dict(argstr='-voxeldims %s',
units='mm',
),
)
inputs = AnalyzeHeader.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_AnalyzeHeader_outputs():
output_map = dict(header=dict(),
)
outputs = AnalyzeHeader.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| bsd-3-clause |
hjanime/VisTrails | vistrails/db/versions/v0_9_0/domain/abstraction.py | 3 | 2728 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import copy
from auto_gen import DBAbstraction as _DBAbstraction
from auto_gen import DBAbstractionRef, DBModule
from id_scope import IdScope
class DBAbstraction(_DBAbstraction):
def __init__(self, *args, **kwargs):
_DBAbstraction.__init__(self, *args, **kwargs)
self.idScope = IdScope(remap={DBAbstractionRef.vtType: DBModule.vtType})
self.idScope.setBeginId('action', 1)
def __copy__(self):
return DBAbstraction.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBAbstraction.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBAbstraction
# need to go through and reset the index to the copied objects
cp.idScope = copy.copy(self.idScope)
return cp
| bsd-3-clause |
SanPen/GridCal | src/research/opf/dc_opf_3.py | 1 | 8418 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program implements the DC power flow as a linear program
This version uses the sparse structures and it the problem compilation is
blazing fast compared to the full matrix version
"""
from pulp import *
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from GridCal.Engine import *
class DcOpf3:
def __init__(self, multi_circuit: MultiCircuit):
"""
OPF simple dispatch problem
:param multi_circuit: GridCal Circuit instance (remember this must be a connected island)
"""
self.multi_circuit = multi_circuit
# circuit compilation
self.numerical_circuit = self.multi_circuit.compile_snapshot()
self.islands = self.numerical_circuit.compute()
self.Sbase = multi_circuit.Sbase
self.B = csc_matrix(self.numerical_circuit.get_B())
self.nbus = self.B.shape[0]
# node sets
self.pqpv = self.islands[0].pqpv
self.pv = self.islands[0].pv
self.vd = self.islands[0].ref
self.pq = self.islands[0].pq
# declare the voltage angles
self.theta = [None] * self.nbus
for i in range(self.nbus):
self.theta[i] = LpVariable("Theta" + str(i), -0.5, 0.5)
# declare the generation
self.PG = list()
def solve(self):
"""
Solve OPF using the sparse formulation
:return:
"""
'''
CSR format explanation:
The standard CSR representation where the column indices for row i are stored in
-> indices[indptr[i]:indptr[i+1]]
and their corresponding values are stored in
-> data[indptr[i]:indptr[i+1]]
If the shape parameter is not supplied, the matrix dimensions are inferred from the index arrays.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
'''
# print('Compiling LP')
prob = LpProblem("DC optimal power flow", LpMinimize)
################################################################################################################
# Add the objective function
################################################################################################################
fobj = 0
# add the voltage angles multiplied by zero (trick)
for j in self.pqpv:
fobj += self.theta[j] * 0.0
# Add the generators cost
for bus in self.multi_circuit.buses:
# check that there are at least one generator at the slack node
if len(bus.controlled_generators) == 0 and bus.type == BusMode.Slack:
raise Warning('There is no generator at the Slack node ' + bus.name + '!!!')
# Add the bus LP vars
for gen in bus.controlled_generators:
# create the generation variable
gen.initialize_lp_vars()
# add the variable to the objective function
fobj += gen.LPVar_P * gen.Cost
self.PG.append(gen.LPVar_P) # add the var reference just to print later...
# Add the objective function to the problem
prob += fobj
################################################################################################################
# Add the matrix multiplication as constraints
# See: https://math.stackexchange.com/questions/1727572/solving-a-feasible-system-of-linear-equations-
# using-linear-programming
################################################################################################################
for i in self.pqpv:
s = 0
d = 0
# add the calculated node power
for ii in range(self.B.indptr[i], self.B.indptr[i+1]):
j = self.B.indices[ii]
if j not in self.vd:
s += self.B.data[ii] * self.theta[j]
# add the generation LP vars
for gen in self.multi_circuit.buses[i].controlled_generators:
d += gen.LPVar_P
# add the nodal demand
for load in self.multi_circuit.buses[i].loads:
d -= load.P / self.Sbase
prob.add(s == d, 'ct_node_mismatch_' + str(i))
################################################################################################################
# set the slack nodes voltage angle
################################################################################################################
for i in self.vd:
prob.add(self.theta[i] == 0, 'ct_slack_theta')
################################################################################################################
# set the slack generator power
################################################################################################################
for i in self.vd:
val = 0
g = 0
# compute the slack node power
for ii in range(self.B.indptr[i], self.B.indptr[i+1]):
j = self.B.indices[ii]
val += self.B.data[ii] * self.theta[j]
# Sum the slack generators
for gen in self.multi_circuit.buses[i].controlled_generators:
g += gen.LPVar_P
# the sum of the slack node generators must be equal to the slack node power
prob.add(g == val, 'ct_slack_power_' + str(i))
################################################################################################################
# Set the branch limits
################################################################################################################
buses_dict = {bus: i for i, bus in enumerate(self.multi_circuit.buses)}
for k, branch in enumerate(self.multi_circuit.branches):
i = buses_dict[branch.bus_from]
j = buses_dict[branch.bus_to]
# branch flow
Fij = self.B[i, j] * (self.theta[i] - self.theta[j])
Fji = self.B[i, j] * (self.theta[j] - self.theta[i])
# constraints
prob.add(Fij <= branch.rate / self.Sbase, 'ct_br_flow_ij_' + str(k))
prob.add(Fji <= branch.rate / self.Sbase, 'ct_br_flow_ji_' + str(k))
################################################################################################################
# Solve
################################################################################################################
print('Solving LP')
prob.solve() # solve with CBC
# prob.solve(CPLEX())
# The status of the solution is printed to the screen
print("Status:", LpStatus[prob.status])
# The optimised objective function value is printed to the screen
print("Cost =", value(prob.objective), '€')
def print(self):
"""
Print results
:return:
"""
print('\nVoltages in p.u.')
for i, th in enumerate(self.theta):
print('Bus', i, '->', 1, '<', th.value(), 'rad')
print('\nGeneration power (in MW)')
for i, g in enumerate(self.PG):
val = g.value() * self.Sbase if g.value() is not None else 'None'
print(g.name, '->', val)
# Set the branch limits
print('\nBranch flows (in MW)')
buses_dict = {bus: i for i, bus in enumerate(self.multi_circuit.buses)}
for k, branch in enumerate(self.multi_circuit.branches):
i = buses_dict[branch.bus_from]
j = buses_dict[branch.bus_to]
if self.theta[i].value() is not None and self.theta[j].value() is not None:
F = self.B[i, j] * (self.theta[i].value() - self.theta[j].value()) * self.Sbase
else:
F = 'None'
print('Branch ' + str(i) + '-' + str(j) + '(', branch.rate, 'MW) ->', F)
if __name__ == '__main__':
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/Lynn 5 Bus pv.gridcal'
grid = FileOpen(fname).open()
# grid = FileOpen('IEEE30.xlsx').open()
# grid = FileOpen('Illinois200Bus.xlsx').open()
# declare and solve problem
problem = DcOpf3(grid)
problem.solve()
problem.print()
| gpl-3.0 |
NL66278/OCB | addons/l10n_ch/account_wizard.py | 424 | 2192 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# Translation contributors: brain-tec AG, Agile Business Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import TransientModel
class WizardMultiChartsAccounts(TransientModel):
_inherit ='wizard.multi.charts.accounts'
def onchange_chart_template_id(self, cursor, uid, ids, chart_template_id=False, context=None):
if context is None: context = {}
res = super(WizardMultiChartsAccounts, self).onchange_chart_template_id(cursor, uid, ids,
chart_template_id=chart_template_id,
context=context)
# 0 is evaluated as False in python so we have to do this
# because original wizard test code_digits value on a float widget
if chart_template_id:
sterchi_template = self.pool.get('ir.model.data').get_object(cursor, uid, 'l10n_ch', 'l10nch_chart_template')
if sterchi_template.id == chart_template_id:
res['value']['code_digits'] = 0
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tungvx/deploy | appengine_django/management/commands/testserver.py | 45 | 2504 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Overrides the default Django testserver command.
Instead of starting the default Django development server this command fires
up a copy of the full fledged appengine dev_appserver.
The appserver is always initialised with a blank datastore with the specified
fixtures loaded into it.
"""
help = 'Runs the development server with data from the given fixtures.'
def run_from_argv(self, argv):
fixtures = argv[2:]
# Ensure an on-disk test datastore is used.
from django.db import connection
connection.use_test_datastore = True
connection.test_datastore_inmemory = False
# Flush any existing test datastore.
connection.flush()
# Load the fixtures.
from django.core.management import call_command
call_command('loaddata', 'initial_data')
if fixtures:
call_command('loaddata', *fixtures)
# Build new arguments for dev_appserver.
datastore_path, history_path = get_test_datastore_paths(False)
new_args = argv[0:1]
new_args.extend(['--datastore_path', datastore_path])
new_args.extend(['--history_path', history_path])
new_args.extend([os.getcwdu()])
# Add email settings
from django.conf import settings
new_args.extend(['--smtp_host', settings.EMAIL_HOST,
'--smtp_port', str(settings.EMAIL_PORT),
'--smtp_user', settings.EMAIL_HOST_USER,
'--smtp_password', settings.EMAIL_HOST_PASSWORD])
# Allow skipped files so we don't die
new_args.extend(['--allow_skipped_files'])
# Start the test dev_appserver.
from google.appengine.tools import dev_appserver_main
dev_appserver_main.main(new_args)
| apache-2.0 |
jaspreetw/tempest | tempest/tests/base.py | 42 | 1610 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslotest import base
from oslotest import moxstubout
class TestCase(base.BaseTestCase):
def setUp(self):
super(TestCase, self).setUp()
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
def patch(self, target, **kwargs):
"""
Returns a started `mock.patch` object for the supplied target.
The caller may then call the returned patcher to create a mock object.
The caller does not need to call stop() on the returned
patcher object, as this method automatically adds a cleanup
to the test class to stop the patcher.
:param target: String module.class or module.object expression to patch
:param **kwargs: Passed as-is to `mock.patch`. See mock documentation
for details.
"""
p = mock.patch(target, **kwargs)
m = p.start()
self.addCleanup(p.stop)
return m
| apache-2.0 |
RAtechntukan/CouchPotatoServer | libs/pyutil/platformutil.py | 106 | 3607 | # Thanks to Daenyth for help porting this to Arch Linux.
import os, platform, re, subprocess
_distributor_id_cmdline_re = re.compile("(?:Distributor ID:)\s*(.*)", re.I)
_release_cmdline_re = re.compile("(?:Release:)\s*(.*)", re.I)
_distributor_id_file_re = re.compile("(?:DISTRIB_ID\s*=)\s*(.*)", re.I)
_release_file_re = re.compile("(?:DISTRIB_RELEASE\s*=)\s*(.*)", re.I)
global _distname,_version
_distname = None
_version = None
def get_linux_distro():
""" Tries to determine the name of the Linux OS distribution name.
First, try to parse a file named "/etc/lsb-release". If it exists, and
contains the "DISTRIB_ID=" line and the "DISTRIB_RELEASE=" line, then return
the strings parsed from that file.
If that doesn't work, then invoke platform.dist().
If that doesn't work, then try to execute "lsb_release", as standardized in
2001:
http://refspecs.freestandards.org/LSB_1.0.0/gLSB/lsbrelease.html
The current version of the standard is here:
http://refspecs.freestandards.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic/lsbrelease.html
that lsb_release emitted, as strings.
Returns a tuple (distname,version). Distname is what LSB calls a
"distributor id", e.g. "Ubuntu". Version is what LSB calls a "release",
e.g. "8.04".
A version of this has been submitted to python as a patch for the standard
library module "platform":
http://bugs.python.org/issue3937
"""
global _distname,_version
if _distname and _version:
return (_distname, _version)
try:
etclsbrel = open("/etc/lsb-release", "rU")
for line in etclsbrel:
m = _distributor_id_file_re.search(line)
if m:
_distname = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
m = _release_file_re.search(line)
if m:
_version = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
except EnvironmentError:
pass
(_distname, _version) = platform.dist()[:2]
if _distname and _version:
return (_distname, _version)
try:
p = subprocess.Popen(["lsb_release", "--all"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rc = p.wait()
if rc == 0:
for line in p.stdout.readlines():
m = _distributor_id_cmdline_re.search(line)
if m:
_distname = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
m = _release_cmdline_re.search(p.stdout.read())
if m:
_version = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
except EnvironmentError:
pass
if os.path.exists("/etc/arch-release"):
return ("Arch_Linux", "")
return (_distname,_version)
def get_platform():
# Our version of platform.platform(), telling us both less and more than the
# Python Standard Library's version does.
# We omit details such as the Linux kernel version number, but we add a
# more detailed and correct rendition of the Linux distribution and
# distribution-version.
if "linux" in platform.system().lower():
return platform.system()+"-"+"_".join(get_linux_distro())+"-"+platform.machine()+"-"+"_".join([x for x in platform.architecture() if x])
else:
return platform.platform()
| gpl-3.0 |
vmarkovtsev/django | tests/template_tests/tests.py | 183 | 4760 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.contrib.auth.models import Group
from django.core import urlresolvers
from django.template import Context, Engine, TemplateSyntaxError
from django.template.base import UNKNOWN_SOURCE
from django.test import SimpleTestCase, override_settings
class TemplateTests(SimpleTestCase):
def test_string_origin(self):
template = Engine().from_string('string template')
self.assertEqual(template.origin.name, UNKNOWN_SOURCE)
self.assertEqual(template.origin.loader_name, None)
self.assertEqual(template.source, 'string template')
@override_settings(SETTINGS_MODULE=None)
def test_url_reverse_no_settings_module(self):
"""
#9005 -- url tag shouldn't require settings.SETTINGS_MODULE to
be set.
"""
t = Engine(debug=True).from_string('{% url will_not_match %}')
c = Context()
with self.assertRaises(urlresolvers.NoReverseMatch):
t.render(c)
def test_url_reverse_view_name(self):
"""
#19827 -- url tag should keep original strack trace when reraising
exception.
"""
t = Engine().from_string('{% url will_not_match %}')
c = Context()
try:
t.render(c)
except urlresolvers.NoReverseMatch:
tb = sys.exc_info()[2]
depth = 0
while tb.tb_next is not None:
tb = tb.tb_next
depth += 1
self.assertGreater(depth, 5,
"The traceback context was lost when reraising the traceback. See #19827")
def test_no_wrapped_exception(self):
"""
# 16770 -- The template system doesn't wrap exceptions, but annotates
them.
"""
engine = Engine(debug=True)
c = Context({"coconuts": lambda: 42 / 0})
t = engine.from_string("{{ coconuts }}")
with self.assertRaises(ZeroDivisionError) as e:
t.render(c)
debug = e.exception.template_debug
self.assertEqual(debug['start'], 0)
self.assertEqual(debug['end'], 14)
def test_invalid_block_suggestion(self):
"""
#7876 -- Error messages should include the unexpected block name.
"""
engine = Engine()
with self.assertRaises(TemplateSyntaxError) as e:
engine.from_string("{% if 1 %}lala{% endblock %}{% endif %}")
self.assertEqual(
e.exception.args[0],
"Invalid block tag: 'endblock', expected 'elif', 'else' or 'endif'",
)
def test_compile_filter_expression_error(self):
"""
19819 -- Make sure the correct token is highlighted for
FilterExpression errors.
"""
engine = Engine(debug=True)
msg = "Could not parse the remainder: '@bar' from 'foo@bar'"
with self.assertRaisesMessage(TemplateSyntaxError, msg) as e:
engine.from_string("{% if 1 %}{{ foo@bar }}{% endif %}")
debug = e.exception.template_debug
self.assertEqual((debug['start'], debug['end']), (10, 23))
self.assertEqual((debug['during']), '{{ foo@bar }}')
def test_compile_tag_error(self):
"""
Errors raised while compiling nodes should include the token
information.
"""
engine = Engine(
debug=True,
libraries={'bad_tag': 'template_tests.templatetags.bad_tag'},
)
with self.assertRaises(RuntimeError) as e:
engine.from_string("{% load bad_tag %}{% badtag %}")
self.assertEqual(e.exception.template_debug['during'], '{% badtag %}')
def test_super_errors(self):
"""
#18169 -- NoReverseMatch should not be silence in block.super.
"""
engine = Engine(app_dirs=True)
t = engine.get_template('included_content.html')
with self.assertRaises(urlresolvers.NoReverseMatch):
t.render(Context())
def test_debug_tag_non_ascii(self):
"""
#23060 -- Test non-ASCII model representation in debug output.
"""
group = Group(name="清風")
c1 = Context({"objs": [group]})
t1 = Engine().from_string('{% debug %}')
self.assertIn("清風", t1.render(c1))
def test_extends_generic_template(self):
"""
#24338 -- Allow extending django.template.backends.django.Template
objects.
"""
engine = Engine()
parent = engine.from_string('{% block content %}parent{% endblock %}')
child = engine.from_string(
'{% extends parent %}{% block content %}child{% endblock %}')
self.assertEqual(child.render(Context({'parent': parent})), 'child')
| bsd-3-clause |
inetCatapult/flask-sqlalchemy | test_sqlalchemy.py | 7 | 23928 | from __future__ import with_statement
import atexit
import unittest
from datetime import datetime
import flask
import flask_sqlalchemy as sqlalchemy
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import sessionmaker
def make_todo_model(db):
class Todo(db.Model):
__tablename__ = 'todos'
id = db.Column('todo_id', db.Integer, primary_key=True)
title = db.Column(db.String(60))
text = db.Column(db.String)
done = db.Column(db.Boolean)
pub_date = db.Column(db.DateTime)
def __init__(self, title, text):
self.title = title
self.text = text
self.done = False
self.pub_date = datetime.utcnow()
return Todo
class BasicAppTestCase(unittest.TestCase):
def setUp(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
db = sqlalchemy.SQLAlchemy(app)
self.Todo = make_todo_model(db)
@app.route('/')
def index():
return '\n'.join(x.title for x in self.Todo.query.all())
@app.route('/add', methods=['POST'])
def add():
form = flask.request.form
todo = self.Todo(form['title'], form['text'])
db.session.add(todo)
db.session.commit()
return 'added'
db.create_all()
self.app = app
self.db = db
def tearDown(self):
self.db.drop_all()
def test_basic_insert(self):
c = self.app.test_client()
c.post('/add', data=dict(title='First Item', text='The text'))
c.post('/add', data=dict(title='2nd Item', text='The text'))
rv = c.get('/')
self.assertEqual(rv.data, b'First Item\n2nd Item')
def test_query_recording(self):
with self.app.test_request_context():
todo = self.Todo('Test 1', 'test')
self.db.session.add(todo)
self.db.session.commit()
queries = sqlalchemy.get_debug_queries()
self.assertEqual(len(queries), 1)
query = queries[0]
self.assertTrue('insert into' in query.statement.lower())
self.assertEqual(query.parameters[0], 'Test 1')
self.assertEqual(query.parameters[1], 'test')
self.assertTrue('test_sqlalchemy.py' in query.context)
self.assertTrue('test_query_recording' in query.context)
def test_helper_api(self):
self.assertEqual(self.db.metadata, self.db.Model.metadata)
class CustomMetaDataTestCase(unittest.TestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
self.app.config['TESTING'] = True
def test_custom_metadata_positive(self):
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=convention)
db = sqlalchemy.SQLAlchemy(self.app, metadata=metadata)
self.db = db
class One(db.Model):
id = db.Column(db.Integer, primary_key=True)
myindex = db.Column(db.Integer, index=True)
class Two(db.Model):
id = db.Column(db.Integer, primary_key=True)
one_id = db.Column(db.Integer, db.ForeignKey(One.id))
myunique = db.Column(db.Integer, unique=True)
self.assertEqual(list(One.__table__.constraints)[0].name, 'pk_one')
self.assertEqual(list(One.__table__.indexes)[0].name, 'ix_one_myindex')
self.assertTrue('fk_two_one_id_one' in [c.name for c in Two.__table__.constraints])
self.assertTrue('uq_two_myunique' in [c.name for c in Two.__table__.constraints])
self.assertTrue('pk_two' in [c.name for c in Two.__table__.constraints])
def test_custom_metadata_negative(self):
db = sqlalchemy.SQLAlchemy(self.app, metadata=None)
self.db = db
class One(db.Model):
id = db.Column(db.Integer, primary_key=True)
myindex = db.Column(db.Integer, index=True)
class Two(db.Model):
id = db.Column(db.Integer, primary_key=True)
one_id = db.Column(db.Integer, db.ForeignKey(One.id))
myunique = db.Column(db.Integer, unique=True)
self.assertNotEqual(list(One.__table__.constraints)[0].name, 'pk_one')
self.assertFalse('fk_two_one_id_one' in [c.name for c in Two.__table__.constraints])
self.assertFalse('uq_two_myunique' in [c.name for c in Two.__table__.constraints])
self.assertFalse('pk_two' in [c.name for c in Two.__table__.constraints])
class TestQueryProperty(unittest.TestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
self.app.config['TESTING'] = True
def test_no_app_bound(self):
db = sqlalchemy.SQLAlchemy()
db.init_app(self.app)
Todo = make_todo_model(db)
# If no app is bound to the SQLAlchemy instance, a
# request context is required to access Model.query.
self.assertRaises(RuntimeError, getattr, Todo, 'query')
with self.app.test_request_context():
db.create_all()
todo = Todo('Test', 'test')
db.session.add(todo)
db.session.commit()
self.assertEqual(len(Todo.query.all()), 1)
def test_app_bound(self):
db = sqlalchemy.SQLAlchemy(self.app)
Todo = make_todo_model(db)
db.create_all()
# If an app was passed to the SQLAlchemy constructor,
# the query property is always available.
todo = Todo('Test', 'test')
db.session.add(todo)
db.session.commit()
self.assertEqual(len(Todo.query.all()), 1)
class SignallingTestCase(unittest.TestCase):
def setUp(self):
self.app = app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
self.db = sqlalchemy.SQLAlchemy(app)
self.Todo = make_todo_model(self.db)
self.db.create_all()
def tearDown(self):
self.db.drop_all()
def test_before_committed(self):
class Namespace(object):
is_received = False
def before_committed(sender, changes):
Namespace.is_received = True
with sqlalchemy.before_models_committed.connected_to(before_committed, sender=self.app):
todo = self.Todo('Awesome', 'the text')
self.db.session.add(todo)
self.db.session.commit()
self.assertTrue(Namespace.is_received)
def test_model_signals(self):
recorded = []
def committed(sender, changes):
self.assertTrue(isinstance(changes, list))
recorded.extend(changes)
with sqlalchemy.models_committed.connected_to(committed,
sender=self.app):
todo = self.Todo('Awesome', 'the text')
self.db.session.add(todo)
self.assertEqual(len(recorded), 0)
self.db.session.commit()
self.assertEqual(len(recorded), 1)
self.assertEqual(recorded[0][0], todo)
self.assertEqual(recorded[0][1], 'insert')
del recorded[:]
todo.text = 'aha'
self.db.session.commit()
self.assertEqual(len(recorded), 1)
self.assertEqual(recorded[0][0], todo)
self.assertEqual(recorded[0][1], 'update')
del recorded[:]
self.db.session.delete(todo)
self.db.session.commit()
self.assertEqual(len(recorded), 1)
self.assertEqual(recorded[0][0], todo)
self.assertEqual(recorded[0][1], 'delete')
class TablenameTestCase(unittest.TestCase):
def test_name(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class FOOBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
class BazBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Ham(db.Model):
__tablename__ = 'spam'
id = db.Column(db.Integer, primary_key=True)
self.assertEqual(FOOBar.__tablename__, 'foo_bar')
self.assertEqual(BazBar.__tablename__, 'baz_bar')
self.assertEqual(Ham.__tablename__, 'spam')
def test_single_name(self):
"""Single table inheritance should not set a new name."""
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Mallard(Duck):
pass
self.assertEqual(Mallard.__tablename__, 'duck')
def test_joined_name(self):
"""Model has a separate primary key; it should set a new name."""
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Donald(Duck):
id = db.Column(db.Integer, db.ForeignKey(Duck.id), primary_key=True)
self.assertEqual(Donald.__tablename__, 'donald')
def test_mixin_name(self):
"""Primary key provided by mixin should still allow model to set tablename."""
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class Base(object):
id = db.Column(db.Integer, primary_key=True)
class Duck(Base, db.Model):
pass
self.assertFalse(hasattr(Base, '__tablename__'))
self.assertEqual(Duck.__tablename__, 'duck')
def test_abstract_name(self):
"""Abstract model should not set a name. Subclass should set a name."""
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
class Duck(Base):
pass
self.assertFalse(hasattr(Base, '__tablename__'))
self.assertEqual(Duck.__tablename__, 'duck')
def test_complex_inheritance(self):
"""Joined table inheritance, but the new primary key is provided by a mixin, not directly on the class."""
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class IdMixin(object):
@declared_attr
def id(cls):
return db.Column(db.Integer, db.ForeignKey(Duck.id), primary_key=True)
class RubberDuck(IdMixin, Duck):
pass
self.assertEqual(RubberDuck.__tablename__, 'rubber_duck')
class PaginationTestCase(unittest.TestCase):
def test_basic_pagination(self):
p = sqlalchemy.Pagination(None, 1, 20, 500, [])
self.assertEqual(p.page, 1)
self.assertFalse(p.has_prev)
self.assertTrue(p.has_next)
self.assertEqual(p.total, 500)
self.assertEqual(p.pages, 25)
self.assertEqual(p.next_num, 2)
self.assertEqual(list(p.iter_pages()),
[1, 2, 3, 4, 5, None, 24, 25])
p.page = 10
self.assertEqual(list(p.iter_pages()),
[1, 2, None, 8, 9, 10, 11, 12, 13, 14, None, 24, 25])
def test_pagination_pages_when_0_items_per_page(self):
p = sqlalchemy.Pagination(None, 1, 0, 500, [])
self.assertEqual(p.pages, 0)
def test_query_paginate(self):
app = flask.Flask(__name__)
db = sqlalchemy.SQLAlchemy(app)
Todo = make_todo_model(db)
db.create_all()
with app.app_context():
db.session.add_all([Todo('', '') for _ in range(100)])
db.session.commit()
@app.route('/')
def index():
p = Todo.query.paginate()
return '{0} items retrieved'.format(len(p.items))
c = app.test_client()
# request default
r = c.get('/')
self.assertEqual(r.status_code, 200)
# request args
r = c.get('/?per_page=10')
self.assertEqual(r.data.decode('utf8'), '10 items retrieved')
with app.app_context():
# query default
p = Todo.query.paginate()
self.assertEqual(p.total, 100)
class BindsTestCase(unittest.TestCase):
def test_basic_binds(self):
import tempfile
_, db1 = tempfile.mkstemp()
_, db2 = tempfile.mkstemp()
def _remove_files():
import os
try:
os.remove(db1)
os.remove(db2)
except IOError:
pass
atexit.register(_remove_files)
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['SQLALCHEMY_BINDS'] = {
'foo': 'sqlite:///' + db1,
'bar': 'sqlite:///' + db2
}
db = sqlalchemy.SQLAlchemy(app)
class Foo(db.Model):
__bind_key__ = 'foo'
__table_args__ = {"info": {"bind_key": "foo"}}
id = db.Column(db.Integer, primary_key=True)
class Bar(db.Model):
__bind_key__ = 'bar'
id = db.Column(db.Integer, primary_key=True)
class Baz(db.Model):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
# simple way to check if the engines are looked up properly
self.assertEqual(db.get_engine(app, None), db.engine)
for key in 'foo', 'bar':
engine = db.get_engine(app, key)
connector = app.extensions['sqlalchemy'].connectors[key]
self.assertEqual(engine, connector.get_engine())
self.assertEqual(str(engine.url),
app.config['SQLALCHEMY_BINDS'][key])
# do the models have the correct engines?
self.assertEqual(db.metadata.tables['foo'].info['bind_key'], 'foo')
self.assertEqual(db.metadata.tables['bar'].info['bind_key'], 'bar')
self.assertEqual(db.metadata.tables['baz'].info.get('bind_key'), None)
# see the tables created in an engine
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app, 'foo'))
self.assertEqual(len(metadata.tables), 1)
self.assertTrue('foo' in metadata.tables)
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app, 'bar'))
self.assertEqual(len(metadata.tables), 1)
self.assertTrue('bar' in metadata.tables)
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app))
self.assertEqual(len(metadata.tables), 1)
self.assertTrue('baz' in metadata.tables)
# do the session have the right binds set?
self.assertEqual(db.get_binds(app), {
Foo.__table__: db.get_engine(app, 'foo'),
Bar.__table__: db.get_engine(app, 'bar'),
Baz.__table__: db.get_engine(app, None)
})
class DefaultQueryClassTestCase(unittest.TestCase):
def test_default_query_class(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
db = sqlalchemy.SQLAlchemy(app)
class Parent(db.Model):
id = db.Column(db.Integer, primary_key=True)
children = db.relationship("Child", backref = "parents", lazy='dynamic')
class Child(db.Model):
id = db.Column(db.Integer, primary_key=True)
parent_id = db.Column(db.Integer, db.ForeignKey('parent.id'))
p = Parent()
c = Child()
c.parent = p
self.assertEqual(type(Parent.query), sqlalchemy.BaseQuery)
self.assertEqual(type(Child.query), sqlalchemy.BaseQuery)
self.assertTrue(isinstance(p.children, sqlalchemy.BaseQuery))
#self.assertTrue(isinstance(c.parents, sqlalchemy.BaseQuery))
class SQLAlchemyIncludesTestCase(unittest.TestCase):
def test(self):
"""Various SQLAlchemy objects are exposed as attributes.
"""
db = sqlalchemy.SQLAlchemy()
import sqlalchemy as sqlalchemy_lib
self.assertTrue(db.Column == sqlalchemy_lib.Column)
# The Query object we expose is actually our own subclass.
from flask_sqlalchemy import BaseQuery
self.assertTrue(db.Query == BaseQuery)
class RegressionTestCase(unittest.TestCase):
def test_joined_inheritance(self):
app = flask.Flask(__name__)
db = sqlalchemy.SQLAlchemy(app)
class Base(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Unicode(20))
__mapper_args__ = {'polymorphic_on': type}
class SubBase(Base):
id = db.Column(db.Integer, db.ForeignKey('base.id'),
primary_key=True)
__mapper_args__ = {'polymorphic_identity': 'sub'}
self.assertEqual(Base.__tablename__, 'base')
self.assertEqual(SubBase.__tablename__, 'sub_base')
db.create_all()
def test_single_table_inheritance(self):
app = flask.Flask(__name__)
db = sqlalchemy.SQLAlchemy(app)
class Base(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Unicode(20))
__mapper_args__ = {'polymorphic_on': type}
class SubBase(Base):
__mapper_args__ = {'polymorphic_identity': 'sub'}
self.assertEqual(Base.__tablename__, 'base')
self.assertEqual(SubBase.__tablename__, 'base')
db.create_all()
def test_joined_inheritance_relation(self):
app = flask.Flask(__name__)
db = sqlalchemy.SQLAlchemy(app)
class Relation(db.Model):
id = db.Column(db.Integer, primary_key=True)
base_id = db.Column(db.Integer, db.ForeignKey('base.id'))
name = db.Column(db.Unicode(20))
def __init__(self, name):
self.name = name
class Base(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Unicode(20))
__mapper_args__ = {'polymorphic_on': type}
class SubBase(Base):
id = db.Column(db.Integer, db.ForeignKey('base.id'),
primary_key=True)
__mapper_args__ = {'polymorphic_identity': u'sub'}
relations = db.relationship(Relation)
db.create_all()
base = SubBase()
base.relations = [Relation(name=u'foo')]
db.session.add(base)
db.session.commit()
base = base.query.one()
def test_connection_binds(self):
app = flask.Flask(__name__)
db = sqlalchemy.SQLAlchemy(app)
assert db.session.connection()
class SessionScopingTestCase(unittest.TestCase):
def test_default_session_scoping(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
db = sqlalchemy.SQLAlchemy(app)
class FOOBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
with app.test_request_context():
fb = FOOBar()
db.session.add(fb)
assert fb in db.session
def test_session_scoping_changing(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
def scopefunc():
return id(dict())
db = sqlalchemy.SQLAlchemy(app, session_options=dict(scopefunc=scopefunc))
class FOOBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
with app.test_request_context():
fb = FOOBar()
db.session.add(fb)
assert fb not in db.session # because a new scope is generated on each call
class CommitOnTeardownTestCase(unittest.TestCase):
def setUp(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = sqlalchemy.SQLAlchemy(app)
Todo = make_todo_model(db)
db.create_all()
@app.route('/')
def index():
return '\n'.join(x.title for x in Todo.query.all())
@app.route('/create', methods=['POST'])
def create():
db.session.add(Todo('Test one', 'test'))
if flask.request.form.get('fail'):
raise RuntimeError("Failing as requested")
return 'ok'
self.client = app.test_client()
def test_commit_on_success(self):
resp = self.client.post('/create')
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.client.get('/').data, b'Test one')
def test_roll_back_on_failure(self):
resp = self.client.post('/create', data={'fail': 'on'})
self.assertEqual(resp.status_code, 500)
self.assertEqual(self.client.get('/').data, b'')
class StandardSessionTestCase(unittest.TestCase):
def test_insert_update_delete(self):
# Ensure _SignalTrackingMapperExtension doesn't croak when
# faced with a vanilla SQLAlchemy session.
#
# Verifies that "AttributeError: 'SessionMaker' object has no attribute '_model_changes'"
# is not thrown.
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
db = sqlalchemy.SQLAlchemy(app)
Session = sessionmaker(bind=db.engine)
class QazWsx(db.Model):
id = db.Column(db.Integer, primary_key=True)
x = db.Column(db.String, default='')
db.create_all()
session = Session()
session.add(QazWsx())
session.flush() # issues an INSERT.
session.expunge_all()
qaz_wsx = session.query(QazWsx).first()
assert qaz_wsx.x == ''
qaz_wsx.x = 'test'
session.flush() # issues an UPDATE.
session.expunge_all()
qaz_wsx = session.query(QazWsx).first()
assert qaz_wsx.x == 'test'
session.delete(qaz_wsx) # issues a DELETE.
assert session.query(QazWsx).first() is None
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BasicAppTestCase))
suite.addTest(unittest.makeSuite(CustomMetaDataTestCase))
suite.addTest(unittest.makeSuite(TestQueryProperty))
suite.addTest(unittest.makeSuite(TablenameTestCase))
suite.addTest(unittest.makeSuite(PaginationTestCase))
suite.addTest(unittest.makeSuite(BindsTestCase))
suite.addTest(unittest.makeSuite(DefaultQueryClassTestCase))
suite.addTest(unittest.makeSuite(SQLAlchemyIncludesTestCase))
suite.addTest(unittest.makeSuite(RegressionTestCase))
suite.addTest(unittest.makeSuite(SessionScopingTestCase))
suite.addTest(unittest.makeSuite(CommitOnTeardownTestCase))
if flask.signals_available:
suite.addTest(unittest.makeSuite(SignallingTestCase))
suite.addTest(unittest.makeSuite(StandardSessionTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause |
un33k/CouchPotatoServer | libs/axl/axel.py | 65 | 13262 | # axel.py
#
# Copyright (C) 2010 Adrian Cristea adrian dot cristea at gmail dotcom
# Edits by Ruud Burger
#
# Based on an idea by Peter Thatcher, found on
# http://www.valuedlessons.com/2008/04/events-in-python.html
#
# This module is part of Axel and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
# Source: http://pypi.python.org/pypi/axel
# Docs: http://packages.python.org/axel
from Queue import Empty, Queue
import hashlib
import sys
import threading
from couchpotato.core.helpers.variable import natsortKey
class Event(object):
"""
Event object inspired by C# events. Handlers can be registered and
unregistered using += and -= operators. Execution and result are
influenced by the arguments passed to the constructor and += method.
from axel import Event
event = Event()
def on_event(*args, **kwargs):
return (args, kwargs)
event += on_event # handler registration
print(event(10, 20, y=30))
>> ((True, ((10, 20), {'y': 30}), <function on_event at 0x00BAA270>),)
event -= on_event # handler is unregistered
print(event(10, 20, y=30))
>> None
class Mouse(object):
def __init__(self):
self.click = Event(self)
self.click += self.on_click # handler registration
def on_click(self, sender, *args, **kwargs):
assert isinstance(sender, Mouse), 'Wrong sender'
return (args, kwargs)
mouse = Mouse()
print(mouse.click(10, 20))
>> ((True, ((10, 20), {}),
>> <bound method Mouse.on_click of <__main__.Mouse object at 0x00B6F470>>),)
mouse.click -= mouse.on_click # handler is unregistered
print(mouse.click(10, 20))
>> None
"""
def __init__(self, name = None, sender = None, asynch = False, exc_info = False,
lock = None, threads = 3, traceback = False):
""" Creates an event
asynch
if True handler's are executes asynchronous
exc_info
if True, result will contain sys.exc_info()[:2] on error
lock
threading.RLock used to synchronize execution
sender
event's sender. The sender is passed as the first argument to the
handler, only if is not None. For this case the handler must have
a placeholder in the arguments to receive the sender
threads
maximum number of threads that will be started
traceback
if True, the execution result will contain sys.exc_info()
on error. exc_info must be also True to get the traceback
hash = self.hash(handler)
Handlers are stored in a dictionary that has as keys the handler's hash
handlers = {
hash : (handler, memoize, timeout),
hash : (handler, memoize, timeout), ...
}
The execution result is cached using the following structure
memoize = {
hash : ((args, kwargs, result), (args, kwargs, result), ...),
hash : ((args, kwargs, result), ...), ...
}
The execution result is returned as a tuple having this structure
exec_result = (
(True, result, handler), # on success
(False, error_info, handler), # on error
(None, None, handler), ... # asynchronous execution
)
"""
self.name = name
self.asynchronous = asynch
self.exc_info = exc_info
self.lock = lock
self.sender = sender
self.threads = threads
self.traceback = traceback
self.handlers = {}
self.memoize = {}
def hash(self, handler):
return hashlib.md5(str(handler)).hexdigest()
def handle(self, handler, priority = 0):
""" Registers a handler. The handler can be transmitted together
with two arguments as a list or dictionary. The arguments are:
memoize
if True, the execution result will be cached in self.memoize
timeout
will allocate a predefined time interval for the execution
If arguments are provided as a list, they are considered to have
this sequence: (handler, memoize, timeout)
Examples:
event += handler
event += (handler, True, 1.5)
event += {'handler':handler, 'memoize':True, 'timeout':1.5}
"""
handler_, memoize, timeout = self._extract(handler)
self.handlers['%s.%s' % (priority, self.hash(handler_))] = (handler_, memoize, timeout)
return self
def unhandle(self, handler):
""" Unregisters a handler """
handler_, memoize, timeout = self._extract(handler)
key = self.hash(handler_)
if not key in self.handlers:
raise ValueError('Handler "%s" was not found' % str(handler_))
del self.handlers[key]
return self
def fire(self, *args, **kwargs):
""" Stores all registered handlers in a queue for processing """
self.queue = Queue()
result = {}
if self.handlers:
max_threads = 1 if kwargs.get('event_order_lock') else self._threads()
# Set global result
def add_to(key, value):
result[key] = value
kwargs['event_add_to_result'] = add_to
for i in range(max_threads):
t = threading.Thread(target = self._execute,
args = args, kwargs = kwargs)
t.daemon = True
t.start()
handler_keys = self.handlers.keys()
handler_keys.sort(key = natsortKey)
for handler in handler_keys:
self.queue.put(handler)
if self.asynchronous:
handler_, memoize, timeout = self.handlers[handler]
result[handler] = (None, None, handler_)
if not self.asynchronous:
self.queue.join()
return result
def count(self):
""" Returns the count of registered handlers """
return len(self.handlers)
def clear(self):
""" Discards all registered handlers and cached results """
self.handlers.clear()
self.memoize.clear()
def _execute(self, *args, **kwargs):
# Remove get and set from kwargs
add_to_result = kwargs.get('event_add_to_result')
del kwargs['event_add_to_result']
# Get and remove order lock
order_lock = kwargs.get('event_order_lock')
try: del kwargs['event_order_lock']
except: pass
# Get and remove return on first
return_on_result = kwargs.get('event_return_on_result')
try: del kwargs['event_return_on_result']
except: pass
got_results = False
""" Executes all handlers stored in the queue """
while True:
try:
h_ = self.queue.get(timeout = 2)
handler, memoize, timeout = self.handlers[h_]
if return_on_result and got_results:
if not self.asynchronous:
self.queue.task_done()
continue
if order_lock:
order_lock.acquire()
try:
r = self._memoize(memoize, timeout, handler, *args, **kwargs)
if not self.asynchronous:
if not return_on_result or (return_on_result and r[1] is not None):
add_to_result(h_, tuple(r))
got_results = True
except Exception:
if not self.asynchronous:
add_to_result(h_, (False, self._error(sys.exc_info()),
handler))
else:
self.error_handler(sys.exc_info())
finally:
if order_lock:
order_lock.release()
if not self.asynchronous:
self.queue.task_done()
if self.queue.empty():
raise Empty
except Empty:
break
def _extract(self, queue_item):
""" Extracts a handler and handler's arguments that can be provided
as list or dictionary. If arguments are provided as list, they are
considered to have this sequence: (handler, memoize, timeout)
Examples:
event += handler
event += (handler, True, 1.5)
event += {'handler':handler, 'memoize':True, 'timeout':1.5}
"""
assert queue_item, 'Invalid list of arguments'
handler = None
memoize = False
timeout = 0
if not isinstance(queue_item, (list, tuple, dict)):
handler = queue_item
elif isinstance(queue_item, (list, tuple)):
if len(queue_item) == 3:
handler, memoize, timeout = queue_item
elif len(queue_item) == 2:
handler, memoize, = queue_item
elif len(queue_item) == 1:
handler = queue_item
elif isinstance(queue_item, dict):
handler = queue_item.get('handler')
memoize = queue_item.get('memoize', False)
timeout = queue_item.get('timeout', 0)
return (handler, bool(memoize), float(timeout))
def _memoize(self, memoize, timeout, handler, *args, **kwargs):
""" Caches the execution result of successful executions
hash = self.hash(handler)
memoize = {
hash : ((args, kwargs, result), (args, kwargs, result), ...),
hash : ((args, kwargs, result), ...), ...
}
"""
if not isinstance(handler, Event) and self.sender is not None:
args = list(args)[:]
args.insert(0, self.sender)
if not memoize:
if timeout <= 0: #no time restriction
result = [True, handler(*args, **kwargs), handler]
return result
result = self._timeout(timeout, handler, *args, **kwargs)
if isinstance(result, tuple) and len(result) == 3:
if isinstance(result[1], Exception): #error occurred
return [False, self._error(result), handler]
return [True, result, handler]
else:
hash_ = self.hash(handler)
if hash_ in self.memoize:
for args_, kwargs_, result in self.memoize[hash_]:
if args_ == args and kwargs_ == kwargs:
return [True, result, handler]
if timeout <= 0: #no time restriction
result = handler(*args, **kwargs)
else:
result = self._timeout(timeout, handler, *args, **kwargs)
if isinstance(result, tuple) and len(result) == 3:
if isinstance(result[1], Exception): #error occurred
return [False, self._error(result), handler]
lock = threading.RLock()
lock.acquire()
try:
if hash_ not in self.memoize:
self.memoize[hash_] = []
self.memoize[hash_].append((args, kwargs, result))
return [True, result, handler]
finally:
lock.release()
def _timeout(self, timeout, handler, *args, **kwargs):
""" Controls the time allocated for the execution of a method """
t = spawn_thread(target = handler, args = args, kwargs = kwargs)
t.daemon = True
t.start()
t.join(timeout)
if not t.is_alive():
if t.exc_info:
return t.exc_info
return t.result
else:
try:
msg = '[%s] Execution was forcefully terminated'
raise RuntimeError(msg % t.name)
except:
return sys.exc_info()
def _threads(self):
""" Calculates maximum number of threads that will be started """
if self.threads < len(self.handlers):
return self.threads
return len(self.handlers)
def _error(self, exc_info):
""" Retrieves the error info """
if self.exc_info:
if self.traceback:
return exc_info
return exc_info[:2]
return exc_info[1]
__iadd__ = handle
__isub__ = unhandle
__call__ = fire
__len__ = count
class spawn_thread(threading.Thread):
""" Spawns a new thread and returns the execution result """
def __init__(self, target, args = (), kwargs = {}, default = None):
threading.Thread.__init__(self)
self._target = target
self._args = args
self._kwargs = kwargs
self.result = default
self.exc_info = None
def run(self):
try:
self.result = self._target(*self._args, **self._kwargs)
except:
self.exc_info = sys.exc_info()
finally:
del self._target, self._args, self._kwargs
| gpl-3.0 |
ford-prefect/cerbero | test/test_packages_common.py | 31 | 3804 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.config import Platform, Distro, DistroVersion
from cerbero.packages import package
from cerbero.packages.packagesstore import PackagesStore
from test.test_build_common import create_cookbook
class Package1(package.Package):
name = 'gstreamer-test1'
shortdesc = 'GStreamer Test'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
deps = ['gstreamer-test2']
files = ['recipe1:misc:libs:bins']
platform_files = {
Platform.WINDOWS: ['recipe5:libs']
}
class Package2(package.Package):
name = 'gstreamer-test2'
shortdesc = 'GStreamer Test 2'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
files = ['recipe2:misc']
class Package3(package.Package):
name = 'gstreamer-test3'
shortdesc = 'GStreamer Test 3'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
files = ['recipe3:misc']
class Package4(package.Package):
name = 'gstreamer-test-bindings'
shortdesc = 'GStreamer Bindings'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
sys_deps = {Distro.DEBIAN: ['python'],
DistroVersion.FEDORA_16: ['python27']}
files = ['recipe4:misc']
class MetaPackage(package.MetaPackage):
name = "gstreamer-runtime"
shortdesc = "GStreamer runtime"
longdesc = "GStreamer runtime"
title = "GStreamer runtime"
url = "http://www.gstreamer.net"
version = '1.0'
uuid = '3ffe67b2-4565-411f-8287-e8faa892f853'
vendor = "GStreamer Project"
org = 'net.gstreamer'
packages = [
('gstreamer-test1', True, True),
('gstreamer-test3', False, True),
('gstreamer-test-bindings', False, False)]
platform_packages = {
Platform.LINUX: [('gstreamer-test2', False, False)]}
icon = "gstreamer.ico"
class App(package.App):
name = "gstreamer-app"
shortdesc = "GStreamer sample app"
longdesc = "GStreamer sample app"
title = "GStreamer sample app"
url = "http://www.gstreamer.net"
version = '1.0'
uuid = '3ffe67b2-4565-411f-8287-e8faa892f853'
vendor = "GStreamer Project"
org = 'net.gstreamer'
app_recipe = 'recipe3'
deps = ['gstreamer-test1']
icon = "share/images/gstreamer.png"
embed_deps = True
class DummyConfig(object):
pass
def create_store(config):
cookbook = create_cookbook(config)
store = PackagesStore(config, False)
for klass in [Package1, Package2, Package3, Package4, App]:
package = klass(config, store, cookbook)
package.__file__ = 'test/test_packages_common.py'
store.add_package(package)
for klass in [MetaPackage]:
package = klass(config, store)
package.__file__ = 'test/test_packages_common.py'
store.add_package(package)
return store
| lgpl-2.1 |
mozts2005/OuterSpace | oslauncher/oslauncher/version.py | 3 | 1031 | #
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
try:
import versiondata
version = versiondata.version
versionString = "%d.%d.%d" % version
except ImportError:
version = (0, 0, 0)
versionString = "[Work In Progress]"
| gpl-2.0 |
JesseLivezey/pylearn2 | pylearn2/datasets/tests/test_ocr.py | 43 | 3754 | """module for testing datasets.ocr"""
import unittest
import numpy as np
from pylearn2.datasets.ocr import OCR
from pylearn2.space import Conv2DSpace
from pylearn2.testing.skip import skip_if_no_data
class TestOCR(unittest.TestCase):
"""
Unit test of OCR dataset
Parameters
----------
None
"""
def setUp(self):
"""Load train, test, valid sets"""
skip_if_no_data()
self.train = OCR(which_set='train')
self.valid = OCR(which_set='valid')
self.test = OCR(which_set='test')
def test_topo(self):
"""Tests that a topological batch has 4 dimensions"""
topo = self.train.get_batch_topo(1)
assert topo.ndim == 4
def test_topo_c01b(self):
"""
Tests that a topological batch with axes ('c',0,1,'b')
can be dimshuffled back to match the standard ('b',0,1,'c')
format.
"""
batch_size = 100
c01b_test = OCR(which_set='test', axes=('c', 0, 1, 'b'))
c01b_X = c01b_test.X[0:batch_size, :]
c01b = c01b_test.get_topological_view(c01b_X)
assert c01b.shape == (1, 16, 8, batch_size)
b01c = c01b.transpose(3, 1, 2, 0)
b01c_X = self.test.X[0:batch_size, :]
assert c01b_X.shape == b01c_X.shape
assert np.all(c01b_X == b01c_X)
b01c_direct = self.test.get_topological_view(b01c_X)
assert b01c_direct.shape == b01c.shape
assert np.all(b01c_direct == b01c)
def test_iterator(self):
"""
Tests that batches returned by an iterator with topological
data_specs are the same as the ones returned by calling
get_topological_view on the dataset with the corresponding order
"""
batch_size = 100
b01c_X = self.test.X[0:batch_size, :]
b01c_topo = self.test.get_topological_view(b01c_X)
b01c_b01c_it = self.test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(16, 8),
num_channels=1,
axes=('b', 0, 1, 'c')),
'features'))
b01c_b01c = b01c_b01c_it.next()
assert np.all(b01c_topo == b01c_b01c)
c01b_test = OCR(which_set='test', axes=('c', 0, 1, 'b'))
c01b_X = c01b_test.X[0:batch_size, :]
c01b_topo = c01b_test.get_topological_view(c01b_X)
c01b_c01b_it = c01b_test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(16, 8),
num_channels=1,
axes=('c', 0, 1, 'b')),
'features'))
c01b_c01b = c01b_c01b_it.next()
assert np.all(c01b_topo == c01b_c01b)
# Also check that samples from iterators with the same data_specs
# with Conv2DSpace do not depend on the axes of the dataset
b01c_c01b_it = self.test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(16, 8),
num_channels=1,
axes=('c', 0, 1, 'b')),
'features'))
b01c_c01b = b01c_c01b_it.next()
assert np.all(b01c_c01b == c01b_c01b)
c01b_b01c_it = c01b_test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(16, 8),
num_channels=1,
axes=('b', 0, 1, 'c')),
'features'))
c01b_b01c = c01b_b01c_it.next()
assert np.all(c01b_b01c == b01c_b01c)
| bsd-3-clause |
sagemath/git-trac-command | git_trac/cmdline.py | 1 | 12216 | ## -*- encoding: utf-8 -*-
"""
Handle Command Line Options
"""
##############################################################################
# The "git trac ..." command extension for git
# Copyright (C) 2013 Volker Braun <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import sys
import os
import warnings
import argparse
from .logger import logger
from .ticket_or_branch import TicketOrBranch
def xdg_open(uri):
import subprocess
if sys.platform == 'darwin':
rc = subprocess.call(['open', uri])
error = 'Failed to run "open", please open {0}'
else:
rc = subprocess.call(['xdg-open', uri])
error = 'Failed to run "xdg-open", please open {0}'
if rc != 0:
print(error.format(uri))
def show_cheat_sheet():
# case where `git-trac` was just symbolically linked
root_dir = os.path.dirname(os.path.dirname(__file__))
cheat_sheet = os.path.join(root_dir, 'doc', 'git-cheat-sheet.pdf')
# case of `python setup.py install --user`
if not os.path.exists(cheat_sheet):
root_dir = __import__('site').USER_BASE
cheat_sheet = os.path.join(root_dir,
'share',
'git-trac-command',
'git-cheat-sheet.pdf')
# case of `python setup.py install`
if not os.path.exists(cheat_sheet):
root_dir = sys.prefix
cheat_sheet = os.path.join(root_dir,
'share',
'git-trac-command',
'git-cheat-sheet.pdf')
# go to internet if not found
if not os.path.exists(cheat_sheet):
cheat_sheet = "http://github.com/sagemath/git-trac-command/raw/master/doc/git-cheat-sheet.pdf"
print('Cheat sheet not found locally. Trying the internet.')
xdg_open(cheat_sheet)
def debug_shell(app, parser):
from IPython.terminal.ipapp import TerminalIPythonApp
ip = TerminalIPythonApp.instance()
ip.initialize(argv=[])
ip.shell.user_global_ns['app'] = app
ip.shell.user_global_ns['logger'] = logger
ip.shell.user_global_ns['repo'] = app.repo
ip.shell.user_global_ns['git'] = app.git
ip.shell.user_global_ns['trac'] = app.trac
ip.shell.user_global_ns['parser'] = parser
def ipy_import(module_name, identifier):
import importlib
module = importlib.import_module(module_name)
ip.shell.user_global_ns[identifier] = getattr(module, identifier)
ipy_import('git_trac.git_interface', 'GitInterface')
ipy_import('git_trac.trac_server', 'TracServer')
ip.start()
description = \
"""
The trac command extension for git
"""
def monkey_patch():
"""
Monkey patch ArgumentParser
"""
old_parse_args = argparse.ArgumentParser.parse_args
def parse_args_override(self, args=None):
"""
http://bugs.python.org/issue9253 prevents us from just redefining -h
Workaround by monkey-patching parse_args
"""
if args is None:
args = list(sys.argv)[1:]
if len(args) == 1 and args[-1] == '-h':
# Convert "git-trac -h" to "git-trac help"
args[-1] = 'help'
return old_parse_args(self, args)
setattr(argparse.ArgumentParser, 'parse_args', parse_args_override)
def make_parser():
monkey_patch()
parser = argparse.ArgumentParser(description=description, add_help=False)
# We cannot handle "git trac --help", this is outside of our control and purely within git
# redefine to not print '--help' in the online help
parser.add_argument('-h', dest='option_help', action='store_true',
default=False,
help='show this help message and exit')
parser.add_argument('--debug', dest='debug', action='store_true',
default=False,
help='debug')
parser.add_argument('--log', dest='log', default=None,
help='one of [DEBUG, INFO, ERROR, WARNING, CRITICAL]')
subparsers = parser.add_subparsers(dest='subcommand')
parser_create = subparsers.add_parser('create', help='Create new ticket')
parser_create.add_argument('-b', '--branch', dest='branch_name',
help='Branch name',
default=None)
parser_create.add_argument('summary', type=str, help='Ticket summary')
parser_checkout = subparsers.add_parser('checkout', help='Download branch')
parser_checkout.add_argument('-b', '--branch', dest='branch_name',
help='Local branch name',
default=None)
parser_checkout.add_argument('ticket_or_branch', type=TicketOrBranch,
help='Ticket number or remote branch name')
parser_search = subparsers.add_parser('search', help='Search trac')
parser_search.add_argument('--branch', dest='branch_name',
help='Remote git branch name (default: local branch)',
default=None)
parser_fetch = subparsers.add_parser('fetch', help='Fetch branch from trac ticket')
parser_fetch.add_argument('ticket_or_branch', nargs='?', type=TicketOrBranch,
help='Ticket number or remote branch name', default=None)
parser_pull = subparsers.add_parser('pull', help='Get updates')
parser_pull.add_argument('ticket_or_branch', nargs='?', type=TicketOrBranch,
help='Ticket number or remote branch name', default=None)
parser_push = subparsers.add_parser('push', help='Upload changes')
parser_push.add_argument('--force', dest='force', action='store_true',
default=False, help='Force push')
parser_push.add_argument('--branch', dest='remote',
default=None, help='Remote branch name')
parser_push.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_get = subparsers.add_parser('get', help='Print trac page')
parser_get.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_depends = subparsers.add_parser('depends', help='Print trac dependencies')
parser_depends.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_print = subparsers.add_parser('print', help='Print trac page')
parser_print.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_browse = subparsers.add_parser('browse', help='Open trac page in browser')
parser_browse.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_review = subparsers.add_parser('review', help='Show code to review')
parser_review.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_find = subparsers.add_parser('find', help='Find trac ticket from SHA1')
parser_find.add_argument('commit', type=str, help='Commit SHA1')
parser_try = subparsers.add_parser('try', help='Try out trac ticket in "detached HEAD"')
parser_try.add_argument('ticket_or_branch', type=TicketOrBranch,
help='Ticket number or remote branch name')
parser_log = subparsers.add_parser('log', help='Commit log for ticket')
parser_log.add_argument('ticket', type=int, help='Ticket number')
parser_log.add_argument('--oneline', dest='oneline', action='store_true',
default=False, help='One line per commit')
parser_config = subparsers.add_parser('config', help='Configure git-trac')
parser_config.add_argument('--user', dest='trac_user',
help='Trac username', default=None)
parser_config.add_argument('--pass', dest='trac_pass',
help='Trac password', default=None)
parser_config.add_argument('--token', dest='trac_token',
help="Trac authentication token (this can "
"be used in lieu of username/password "
"and must be used if you authenticate "
"with Trac via GitHub)")
parser_cheatsheet = subparsers.add_parser('cheat-sheet', help='Show the git trac cheat sheet')
parser_help = subparsers.add_parser('help', help='Show the git trac help')
return parser
def launch():
parser = make_parser()
args = parser.parse_args(sys.argv[1:])
if args.log is not None:
import logging
level = getattr(logging, args.log)
logger.setLevel(level=level)
from .app import Application
app = Application()
if args.debug:
print(args)
app.config.debug = True
debug_shell(app, parser)
elif args.option_help:
parser.print_help()
elif args.subcommand == 'create':
app.create(args.summary, args.branch_name)
elif args.subcommand == 'checkout':
app.checkout(args.ticket_or_branch, args.branch_name)
elif args.subcommand == 'fetch':
app.fetch(args.ticket_or_branch)
elif args.subcommand == 'pull':
app.pull(args.ticket_or_branch)
elif args.subcommand == 'push':
ticket_number = app.guess_ticket_number(args.ticket)
print('Pushing to Trac #{0}...'.format(ticket_number))
app.push(ticket_number, remote=args.remote, force=args.force)
elif args.subcommand == 'review':
ticket_number = app.guess_ticket_number(args.ticket)
app.review_diff(ticket_number)
elif args.subcommand == 'try':
app.tryout(args.ticket_or_branch)
elif args.subcommand == 'get':
warnings.warn('deprecated; use "git trac print" instead')
ticket_number = app.guess_ticket_number(args.ticket)
app.print_ticket(ticket_number)
elif args.subcommand == 'print':
ticket_number = app.guess_ticket_number(args.ticket)
app.print_ticket(ticket_number)
elif args.subcommand == 'depends':
ticket_number = app.guess_ticket_number(args.ticket)
app.print_dependencies(ticket_number)
elif args.subcommand == 'browse':
ticket_number = app.guess_ticket_number(args.ticket)
xdg_open('https://trac.sagemath.org/{0}'.format(ticket_number))
elif args.subcommand == 'log':
app.log(args.ticket, oneline=args.oneline)
elif args.subcommand == 'find':
app.find(args.commit)
elif args.subcommand == 'search':
try:
app.search(branch=args.branch_name)
except ValueError:
parser_search.print_help()
raise
elif args.subcommand == 'config':
app.add_remote()
if args.trac_user is not None:
app.save_trac_username(args.trac_user)
if args.trac_pass is not None:
app.save_trac_password(args.trac_pass)
if args.trac_token is not None:
app.save_trac_token(args.trac_token)
app.print_config()
elif args.subcommand == 'cheat-sheet':
show_cheat_sheet()
elif args.subcommand == 'help':
parser.print_help()
else:
print('Unknown subcommand "{0}"'.format(args.subcommand))
parser.print_help()
| gpl-3.0 |
gaboflowers/mallador_v3 | unidecode/x059.py | 252 | 4644 | data = (
'Shou ', # 0x00
'Yi ', # 0x01
'Zhi ', # 0x02
'Gu ', # 0x03
'Chu ', # 0x04
'Jiang ', # 0x05
'Feng ', # 0x06
'Bei ', # 0x07
'Cay ', # 0x08
'Bian ', # 0x09
'Sui ', # 0x0a
'Qun ', # 0x0b
'Ling ', # 0x0c
'Fu ', # 0x0d
'Zuo ', # 0x0e
'Xia ', # 0x0f
'Xiong ', # 0x10
'[?] ', # 0x11
'Nao ', # 0x12
'Xia ', # 0x13
'Kui ', # 0x14
'Xi ', # 0x15
'Wai ', # 0x16
'Yuan ', # 0x17
'Mao ', # 0x18
'Su ', # 0x19
'Duo ', # 0x1a
'Duo ', # 0x1b
'Ye ', # 0x1c
'Qing ', # 0x1d
'Uys ', # 0x1e
'Gou ', # 0x1f
'Gou ', # 0x20
'Qi ', # 0x21
'Meng ', # 0x22
'Meng ', # 0x23
'Yin ', # 0x24
'Huo ', # 0x25
'Chen ', # 0x26
'Da ', # 0x27
'Ze ', # 0x28
'Tian ', # 0x29
'Tai ', # 0x2a
'Fu ', # 0x2b
'Guai ', # 0x2c
'Yao ', # 0x2d
'Yang ', # 0x2e
'Hang ', # 0x2f
'Gao ', # 0x30
'Shi ', # 0x31
'Ben ', # 0x32
'Tai ', # 0x33
'Tou ', # 0x34
'Yan ', # 0x35
'Bi ', # 0x36
'Yi ', # 0x37
'Kua ', # 0x38
'Jia ', # 0x39
'Duo ', # 0x3a
'Kwu ', # 0x3b
'Kuang ', # 0x3c
'Yun ', # 0x3d
'Jia ', # 0x3e
'Pa ', # 0x3f
'En ', # 0x40
'Lian ', # 0x41
'Huan ', # 0x42
'Di ', # 0x43
'Yan ', # 0x44
'Pao ', # 0x45
'Quan ', # 0x46
'Qi ', # 0x47
'Nai ', # 0x48
'Feng ', # 0x49
'Xie ', # 0x4a
'Fen ', # 0x4b
'Dian ', # 0x4c
'[?] ', # 0x4d
'Kui ', # 0x4e
'Zou ', # 0x4f
'Huan ', # 0x50
'Qi ', # 0x51
'Kai ', # 0x52
'Zha ', # 0x53
'Ben ', # 0x54
'Yi ', # 0x55
'Jiang ', # 0x56
'Tao ', # 0x57
'Zang ', # 0x58
'Ben ', # 0x59
'Xi ', # 0x5a
'Xiang ', # 0x5b
'Fei ', # 0x5c
'Diao ', # 0x5d
'Xun ', # 0x5e
'Keng ', # 0x5f
'Dian ', # 0x60
'Ao ', # 0x61
'She ', # 0x62
'Weng ', # 0x63
'Pan ', # 0x64
'Ao ', # 0x65
'Wu ', # 0x66
'Ao ', # 0x67
'Jiang ', # 0x68
'Lian ', # 0x69
'Duo ', # 0x6a
'Yun ', # 0x6b
'Jiang ', # 0x6c
'Shi ', # 0x6d
'Fen ', # 0x6e
'Huo ', # 0x6f
'Bi ', # 0x70
'Lian ', # 0x71
'Duo ', # 0x72
'Nu ', # 0x73
'Nu ', # 0x74
'Ding ', # 0x75
'Nai ', # 0x76
'Qian ', # 0x77
'Jian ', # 0x78
'Ta ', # 0x79
'Jiu ', # 0x7a
'Nan ', # 0x7b
'Cha ', # 0x7c
'Hao ', # 0x7d
'Xian ', # 0x7e
'Fan ', # 0x7f
'Ji ', # 0x80
'Shuo ', # 0x81
'Ru ', # 0x82
'Fei ', # 0x83
'Wang ', # 0x84
'Hong ', # 0x85
'Zhuang ', # 0x86
'Fu ', # 0x87
'Ma ', # 0x88
'Dan ', # 0x89
'Ren ', # 0x8a
'Fu ', # 0x8b
'Jing ', # 0x8c
'Yan ', # 0x8d
'Xie ', # 0x8e
'Wen ', # 0x8f
'Zhong ', # 0x90
'Pa ', # 0x91
'Du ', # 0x92
'Ji ', # 0x93
'Keng ', # 0x94
'Zhong ', # 0x95
'Yao ', # 0x96
'Jin ', # 0x97
'Yun ', # 0x98
'Miao ', # 0x99
'Pei ', # 0x9a
'Shi ', # 0x9b
'Yue ', # 0x9c
'Zhuang ', # 0x9d
'Niu ', # 0x9e
'Yan ', # 0x9f
'Na ', # 0xa0
'Xin ', # 0xa1
'Fen ', # 0xa2
'Bi ', # 0xa3
'Yu ', # 0xa4
'Tuo ', # 0xa5
'Feng ', # 0xa6
'Yuan ', # 0xa7
'Fang ', # 0xa8
'Wu ', # 0xa9
'Yu ', # 0xaa
'Gui ', # 0xab
'Du ', # 0xac
'Ba ', # 0xad
'Ni ', # 0xae
'Zhou ', # 0xaf
'Zhuo ', # 0xb0
'Zhao ', # 0xb1
'Da ', # 0xb2
'Nai ', # 0xb3
'Yuan ', # 0xb4
'Tou ', # 0xb5
'Xuan ', # 0xb6
'Zhi ', # 0xb7
'E ', # 0xb8
'Mei ', # 0xb9
'Mo ', # 0xba
'Qi ', # 0xbb
'Bi ', # 0xbc
'Shen ', # 0xbd
'Qie ', # 0xbe
'E ', # 0xbf
'He ', # 0xc0
'Xu ', # 0xc1
'Fa ', # 0xc2
'Zheng ', # 0xc3
'Min ', # 0xc4
'Ban ', # 0xc5
'Mu ', # 0xc6
'Fu ', # 0xc7
'Ling ', # 0xc8
'Zi ', # 0xc9
'Zi ', # 0xca
'Shi ', # 0xcb
'Ran ', # 0xcc
'Shan ', # 0xcd
'Yang ', # 0xce
'Man ', # 0xcf
'Jie ', # 0xd0
'Gu ', # 0xd1
'Si ', # 0xd2
'Xing ', # 0xd3
'Wei ', # 0xd4
'Zi ', # 0xd5
'Ju ', # 0xd6
'Shan ', # 0xd7
'Pin ', # 0xd8
'Ren ', # 0xd9
'Yao ', # 0xda
'Tong ', # 0xdb
'Jiang ', # 0xdc
'Shu ', # 0xdd
'Ji ', # 0xde
'Gai ', # 0xdf
'Shang ', # 0xe0
'Kuo ', # 0xe1
'Juan ', # 0xe2
'Jiao ', # 0xe3
'Gou ', # 0xe4
'Mu ', # 0xe5
'Jian ', # 0xe6
'Jian ', # 0xe7
'Yi ', # 0xe8
'Nian ', # 0xe9
'Zhi ', # 0xea
'Ji ', # 0xeb
'Ji ', # 0xec
'Xian ', # 0xed
'Heng ', # 0xee
'Guang ', # 0xef
'Jun ', # 0xf0
'Kua ', # 0xf1
'Yan ', # 0xf2
'Ming ', # 0xf3
'Lie ', # 0xf4
'Pei ', # 0xf5
'Yan ', # 0xf6
'You ', # 0xf7
'Yan ', # 0xf8
'Cha ', # 0xf9
'Shen ', # 0xfa
'Yin ', # 0xfb
'Chi ', # 0xfc
'Gui ', # 0xfd
'Quan ', # 0xfe
'Zi ', # 0xff
)
| gpl-3.0 |
neuront/redis-ctl | models/cluster_plan.py | 2 | 1308 | import json
from werkzeug.utils import cached_property
from base import db, Base, DB_TEXT_TYPE
from cluster import Cluster
class ClusterBalancePlan(Base):
__tablename__ = 'cluster_balance_plan'
cluster_id = db.Column(db.ForeignKey(Cluster.id), unique=True,
nullable=False)
balance_plan_json = db.Column(DB_TEXT_TYPE, nullable=False)
@cached_property
def balance_plan(self):
return json.loads(self.balance_plan_json)
def save(self):
self.balance_plan_json = json.dumps(self.balance_plan)
db.session.add(self)
db.session.flush()
@cached_property
def pod(self):
return self.balance_plan['pod']
@cached_property
def host(self):
return self.balance_plan.get('host')
@cached_property
def slaves(self):
return self.balance_plan.get('slaves', [])
@cached_property
def aof(self):
return (self.balance_plan.get('entrypoint') == 'aof'
or self.balance_plan['aof'])
def get_balance_plan_by_addr(host, port):
from node import RedisNode
n = RedisNode.query.filter_by(host=host, port=port).first()
if n is None or n.assignee_id is None:
return None
return ClusterBalancePlan.query.filter_by(cluster_id=n.assignee_id).first()
| mit |
gboone/wedding.harmsboone.org | rsvp/migrations/0019_auto__chg_field_guest_notes.py | 1 | 5628 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Guest.notes'
db.alter_column(u'rsvp_guest', 'notes', self.gf('django.db.models.fields.TextField')(max_length=2048, null=True))
def backwards(self, orm):
# Changing field 'Guest.notes'
db.alter_column(u'rsvp_guest', 'notes', self.gf('django.db.models.fields.CharField')(max_length=2048, null=True))
models = {
u'rsvp.event': {
'Meta': {'object_name': 'Event'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Location']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.guest': {
'Meta': {'ordering': "['-last_name', '-first_name']", 'object_name': 'Guest'},
'arriving': ('django.db.models.fields.DateField', [], {'default': "'2014-08-14'"}),
'attending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'departing': ('django.db.models.fields.DateField', [], {'default': "'2014-08-17'"}),
'display_as': ('django.db.models.fields.CharField', [], {'max_length': '91', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'max_guests': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'nights': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "'None'", 'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'primary_email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'street_addr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zip_code': ('django.db.models.fields.IntegerField', [], {'max_length': '5'})
},
u'rsvp.hotel': {
'Meta': {'object_name': 'Hotel'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
'hotel_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'total_guest_count': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
u'rsvp.location': {
'Meta': {'object_name': 'Location'},
'distance': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.room': {
'Meta': {'object_name': 'Room'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
'hotel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Hotel']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_occupancy': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'room_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Roomtype']", 'null': 'True', 'blank': 'True'})
},
u'rsvp.roomtype': {
'Meta': {'object_name': 'Roomtype'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.table': {
'Meta': {'object_name': 'Table'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rsvp'] | mit |
YuriGural/erpnext | erpnext/hr/doctype/salary_slip/salary_slip.py | 8 | 16266 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe.utils import add_days, cint, cstr, flt, getdate, rounded, date_diff, money_in_words
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.hr.doctype.process_payroll.process_payroll import get_start_end_dates
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.utilities.transaction_base import TransactionBase
class SalarySlip(TransactionBase):
def autoname(self):
self.name = make_autoname('Sal Slip/' +self.employee + '/.#####')
def validate(self):
self.status = self.get_status()
self.validate_dates()
self.check_existing()
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
if not (len(self.get("earnings")) or len(self.get("deductions"))):
# get details from salary structure
self.get_emp_and_leave_details()
else:
self.get_leave_details(lwp = self.leave_without_pay)
# if self.salary_slip_based_on_timesheet or not self.net_pay:
self.calculate_net_pay()
company_currency = erpnext.get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
if frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet"):
max_working_hours = frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet")
if self.salary_slip_based_on_timesheet and (self.total_working_hours > int(max_working_hours)):
frappe.msgprint(_("Total working hours should not be greater than max working hours {0}").
format(max_working_hours), alert=True)
def validate_dates(self):
if date_diff(self.end_date, self.start_date) < 0:
frappe.throw(_("To date cannot be before From date"))
def calculate_component_amounts(self):
if not getattr(self, '_salary_structure_doc', None):
self._salary_structure_doc = frappe.get_doc('Salary Structure', self.salary_structure)
data = self.get_data_for_eval()
for key in ('earnings', 'deductions'):
for struct_row in self._salary_structure_doc.get(key):
amount = self.eval_condition_and_formula(struct_row, data)
if amount and struct_row.statistical_component == 0:
self.update_component_row(struct_row, amount, key)
def update_component_row(self, struct_row, amount, key):
component_row = None
for d in self.get(key):
if d.salary_component == struct_row.salary_component:
component_row = d
if not component_row:
self.append(key, {
'amount': amount,
'default_amount': amount,
'depends_on_lwp' : struct_row.depends_on_lwp,
'salary_component' : struct_row.salary_component
})
else:
component_row.amount = amount
def eval_condition_and_formula(self, d, data):
try:
if d.condition:
if not frappe.safe_eval(d.condition, None, data):
return None
amount = d.amount
if d.amount_based_on_formula:
if d.formula:
amount = frappe.safe_eval(d.formula, None, data)
if amount:
data[d.abbr] = amount
return amount
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in formula or condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def get_data_for_eval(self):
'''Returns data for evaluating formula'''
data = frappe._dict()
data.update(frappe.get_doc("Salary Structure Employee",
{"employee": self.employee, "parent": self.salary_structure}).as_dict())
data.update(frappe.get_doc("Employee", self.employee).as_dict())
data.update(self.as_dict())
# set values for components
salary_components = frappe.get_all("Salary Component", fields=["salary_component_abbr"])
for sc in salary_components:
data.setdefault(sc.salary_component_abbr, 0)
for key in ('earnings', 'deductions'):
for d in self.get(key):
data[d.abbr] = d.amount
return data
def get_emp_and_leave_details(self):
'''First time, load all the components from salary structure'''
if self.employee:
self.set("earnings", [])
self.set("deductions", [])
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.validate_dates()
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
self.get_leave_details(joining_date, relieving_date)
struct = self.check_sal_struct(joining_date, relieving_date)
if struct:
self._salary_structure_doc = frappe.get_doc('Salary Structure', struct)
self.salary_slip_based_on_timesheet = self._salary_structure_doc.salary_slip_based_on_timesheet or 0
self.set_time_sheet()
self.pull_sal_struct()
def set_time_sheet(self):
if self.salary_slip_based_on_timesheet:
self.set("timesheets", [])
timesheets = frappe.db.sql(""" select * from `tabTimesheet` where employee = %(employee)s and start_date BETWEEN %(start_date)s AND %(end_date)s and (status = 'Submitted' or
status = 'Billed')""", {'employee': self.employee, 'start_date': self.start_date, 'end_date': self.end_date}, as_dict=1)
for data in timesheets:
self.append('timesheets', {
'time_sheet': data.name,
'working_hours': data.total_hours
})
def get_date_details(self):
date_details = get_start_end_dates(self.payroll_frequency, self.start_date or self.posting_date)
self.start_date = date_details.start_date
self.end_date = date_details.end_date
def check_sal_struct(self, joining_date, relieving_date):
cond = ''
if self.payroll_frequency:
cond = """and payroll_frequency = '%(payroll_frequency)s'""" % {"payroll_frequency": self.payroll_frequency}
st_name = frappe.db.sql("""select parent from `tabSalary Structure Employee`
where employee=%s and (from_date <= %s or from_date <= %s)
and (to_date is null or to_date >= %s or to_date >= %s)
and parent in (select name from `tabSalary Structure`
where is_active = 'Yes'%s)
"""% ('%s', '%s', '%s','%s','%s', cond),(self.employee, self.start_date, joining_date, self.end_date, relieving_date))
if st_name:
if len(st_name) > 1:
frappe.msgprint(_("Multiple active Salary Structures found for employee {0} for the given dates")
.format(self.employee), title=_('Warning'))
return st_name and st_name[0][0] or ''
else:
self.salary_structure = None
frappe.msgprint(_("No active or default Salary Structure found for employee {0} for the given dates")
.format(self.employee), title=_('Salary Structure Missing'))
def pull_sal_struct(self):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
if self.salary_slip_based_on_timesheet:
self.salary_structure = self._salary_structure_doc.name
self.hour_rate = self._salary_structure_doc.hour_rate
self.total_working_hours = sum([d.working_hours or 0.0 for d in self.timesheets]) or 0.0
wages_amount = self.hour_rate * self.total_working_hours
self.add_earning_for_hourly_wages(self, self._salary_structure_doc.salary_component, wages_amount)
make_salary_slip(self._salary_structure_doc.name, self)
def process_salary_structure(self):
'''Calculate salary after salary structure details have been updated'''
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.pull_emp_details()
self.get_leave_details()
self.calculate_net_pay()
def add_earning_for_hourly_wages(self, doc, salary_component, amount):
row_exists = False
for row in doc.earnings:
if row.salary_component == salary_component:
row.amount = amount
row_exists = True
break
if not row_exists:
wages_row = {
"salary_component": salary_component,
"abbr": frappe.db.get_value("Salary Component", salary_component, "salary_component_abbr"),
"amount": self.hour_rate * self.total_working_hours
}
doc.append('earnings', wages_row)
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee, ["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def get_leave_details(self, joining_date=None, relieving_date=None, lwp=None):
if not joining_date:
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
holidays = self.get_holidays_for_employee(self.start_date, self.end_date)
working_days = date_diff(self.end_date, self.start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
working_days -= len(holidays)
if working_days < 0:
frappe.throw(_("There are more holidays than working days this month."))
actual_lwp = self.calculate_lwp(holidays, working_days)
if not lwp:
lwp = actual_lwp
elif lwp != actual_lwp:
frappe.msgprint(_("Leave Without Pay does not match with approved Leave Application records"))
self.total_working_days = working_days
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(joining_date, relieving_date)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, joining_date, relieving_date):
start_date = getdate(self.start_date)
if joining_date:
if getdate(self.start_date) <= joining_date <= getdate(self.end_date):
start_date = joining_date
elif joining_date > getdate(self.end_date):
return
end_date = getdate(self.end_date)
if relieving_date:
if getdate(self.start_date) <= relieving_date <= getdate(self.end_date):
end_date = relieving_date
elif relieving_date < getdate(self.start_date):
frappe.throw(_("Employee relieved on {0} must be set as 'Left'")
.format(relieving_date))
payment_days = date_diff(end_date, start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
holidays = self.get_holidays_for_employee(start_date, end_date)
payment_days -= len(holidays)
return payment_days
def get_holidays_for_employee(self, start_date, end_date):
holiday_list = get_holiday_list_for_employee(self.employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
def calculate_lwp(self, holidays, working_days):
lwp = 0
holidays = "','".join(holidays)
for d in range(working_days):
dt = add_days(cstr(getdate(self.start_date)), d)
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and t2.is_lwp = 1
and t1.docstatus = 1
and t1.status = 'Approved'
and t1.employee = %(employee)s
and CASE WHEN t2.include_holiday != 1 THEN %(dt)s not in ('{0}') and %(dt)s between from_date and to_date
WHEN t2.include_holiday THEN %(dt)s between from_date and to_date
END
""".format(holidays), {"employee": self.employee, "dt": dt})
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def check_existing(self):
if not self.salary_slip_based_on_timesheet:
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where start_date = %s and end_date = %s and docstatus != 2
and employee = %s and name != %s""",
(self.start_date, self.end_date, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this period").format(self.employee))
else:
for data in self.timesheets:
if frappe.db.get_value('Timesheet', data.time_sheet, 'status') == 'Payrolled':
frappe.throw(_("Salary Slip of employee {0} already created for time sheet {1}").format(self.employee, data.time_sheet))
def sum_components(self, component_type, total_field):
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
if not relieving_date:
relieving_date = getdate(self.end_date)
if not joining_date:
frappe.throw(_("Please set the Date Of Joining for employee {0}").format(frappe.bold(self.employee_name)))
for d in self.get(component_type):
if (self.salary_structure and
cint(d.depends_on_lwp) and
(not
self.salary_slip_based_on_timesheet or
getdate(self.start_date) < joining_date or
getdate(self.end_date) > relieving_date
)):
d.amount = rounded(
(flt(d.default_amount) * flt(self.payment_days)
/ cint(self.total_working_days)), self.precision("amount", component_type)
)
elif not self.payment_days and not self.salary_slip_based_on_timesheet:
d.amount = 0
elif not d.amount:
d.amount = d.default_amount
self.set(total_field, self.get(total_field) + flt(d.amount))
def calculate_net_pay(self):
if self.salary_structure:
self.calculate_component_amounts()
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total"))
self.total_deduction = 0
self.gross_pay = 0
self.sum_components('earnings', 'gross_pay')
self.sum_components('deductions', 'total_deduction')
self.set_loan_repayment()
self.net_pay = flt(self.gross_pay) - (flt(self.total_deduction) + flt(self.total_loan_repayment))
self.rounded_total = rounded(self.net_pay,
self.precision("net_pay") if disable_rounded_total else 0)
def set_loan_repayment(self):
employee_loan = frappe.db.sql("""select sum(principal_amount) as principal_amount, sum(interest_amount) as interest_amount,
sum(total_payment) as total_loan_repayment from `tabRepayment Schedule`
where payment_date between %s and %s and parent in (select name from `tabEmployee Loan`
where employee = %s and repay_from_salary = 1 and docstatus = 1)""",
(self.start_date, self.end_date, self.employee), as_dict=True)
if employee_loan:
self.principal_amount = employee_loan[0].principal_amount
self.interest_amount = employee_loan[0].interest_amount
self.total_loan_repayment = employee_loan[0].total_loan_repayment
def on_submit(self):
if self.net_pay < 0:
frappe.throw(_("Net Pay cannot be less than 0"))
else:
self.set_status()
self.update_status(self.name)
if(frappe.db.get_single_value("HR Settings", "email_salary_slip_to_employee")):
self.email_salary_slip()
def on_cancel(self):
self.set_status()
self.update_status()
def email_salary_slip(self):
receiver = frappe.db.get_value("Employee", self.employee, "prefered_email")
if receiver:
subj = 'Salary Slip - from {0} to {1}'.format(self.start_date, self.end_date)
frappe.sendmail([receiver], subject=subj, message = _("Please see attachment"),
attachments=[frappe.attach_print(self.doctype, self.name, file_name=self.name)], reference_doctype= self.doctype, reference_name= self.name)
else:
msgprint(_("{0}: Employee email not found, hence email not sent").format(self.employee_name))
def update_status(self, salary_slip=None):
for data in self.timesheets:
if data.time_sheet:
timesheet = frappe.get_doc('Timesheet', data.time_sheet)
timesheet.salary_slip = salary_slip
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def set_status(self, status=None):
'''Get and update status'''
if not status:
status = self.get_status()
self.db_set("status", status)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Submitted"
elif self.docstatus == 2:
status = "Cancelled"
return status
def unlink_ref_doc_from_salary_slip(ref_no):
linked_ss = frappe.db.sql_list("""select name from `tabSalary Slip`
where journal_entry=%s and docstatus < 2""", (ref_no))
if linked_ss:
for ss in linked_ss:
ss_doc = frappe.get_doc("Salary Slip", ss)
frappe.db.set_value("Salary Slip", ss_doc.name, "journal_entry", "")
| gpl-3.0 |
jnez71/demos | signals/gaussian_markov_kernel.py | 1 | 1646 | #!/usr/bin/env python3
"""
Kernel of Gaussian-transition scalar Markov process?
"""
import numpy as np
from matplotlib import pyplot
npr = np.random
np.set_printoptions(suppress=True)
pyplot.rcParams["font.size"] = 16
pyplot.rcParams["axes.grid"] = True
################################################## SYSTEM
def initial(m=10.0, s=2.0):
return npr.normal(m, s) # gaussian initial-condition
def transition(x, s=1.0):
#f = 0.5*x # linear
f = 10*np.sin(2/(1+x**2)) # nonlinear
return f + npr.normal(0.0, s) # gaussian transition
def simulate(d):
X = [initial()]
for i in range(d-1):
X.append(transition(X[-1]))
return X # one sample from d-dimensional joint (only gaussian if linear transitions)
################################################## SIMULATE
d = 9
n = int(5e5)
print("Simulating samples...")
samples = np.array([simulate(d) for i in range(n)])
print("Computing statistics...")
mean = np.mean(samples, axis=0)
covar = np.cov(samples, rowvar=False)
################################################## VISUALIZE
print("========================================")
print(np.round(mean, 3), '\n')
print(np.round(covar, 3))
print("========================================")
print("Visualizing covariance...")
vmax = np.max(np.abs(covar))
pyplot.imshow(covar, cmap="coolwarm", vmin=-vmax, vmax=vmax, interpolation="lanczos")
pyplot.colorbar()
pyplot.grid(False)
pyplot.title("Covariance")
print("Visualizing joint...")
pyplot.figure()
pyplot.scatter(samples[::int(n/1e3+1), 0], samples[::int(n/1e3+1), -1], alpha=0.4)
pyplot.xlabel("x0")
pyplot.ylabel("x{0}".format(d-1))
pyplot.show()
| mit |
rananda/cfme_tests | cfme/infrastructure/config_management.py | 1 | 19414 | from functools import partial
from cached_property import cached_property
from navmazing import NavigateToSibling, NavigateToAttribute
import cfme
import cfme.fixtures.pytest_selenium as sel
import cfme.web_ui.flash as flash
import cfme.web_ui.tabstrip as tabs
import cfme.web_ui.toolbar as tb
from cfme.web_ui import (
accordion, Quadicon, Form, Input, fill, form_buttons, mixins, Table, Region,
AngularSelect, match_location
)
from utils import version, conf
from utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from utils.appliance import Navigatable
from utils.log import logger
from utils.pretty import Pretty
from utils.update import Updateable
from utils.wait import wait_for
properties_form = Form(
fields=[
('name_text', Input('name')),
('type_select', AngularSelect("provider_type")),
('url_text', Input('url')),
('ssl_checkbox', Input('verify_ssl'))
])
credential_form = Form(
fields=[
('principal_text', Input('log_userid')),
('secret_pass', Input('log_password')),
('verify_secret_pass', Input('log_verify')),
('validate_btn', form_buttons.validate)
])
def cfm_mgr_table():
return Table("//div[@id='main_div']//div[@id='list_grid']/table")
page = Region(locators={
'list_table_config_profiles': cfm_mgr_table(),
'list_table_config_systems': cfm_mgr_table()})
add_manager_btn = form_buttons.FormButton('Add')
edit_manager_btn = form_buttons.FormButton('Save changes')
cfg_btn = partial(tb.select, 'Configuration')
match_page = partial(match_location, controller='provider_foreman',
title='Red Hat Satellite Provider')
class ConfigManager(Updateable, Pretty, Navigatable):
"""
This is base class for Configuration manager objects (Red Hat Satellite, Foreman, Ansible Tower)
Args:
name: Name of the config. manager
url: URL, hostname or IP of the config. manager
ssl: Boolean value; `True` if SSL certificate validity should be checked, `False` otherwise
credentials: Credentials to access the config. manager
key: Key to access the cfme_data yaml data (same as `name` if not specified)
Usage:
Use Satellite or AnsibleTower classes instead.
"""
pretty_attr = ['name', 'url']
type = None
def __init__(self, name=None, url=None, ssl=None, credentials=None, key=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.url = url
self.ssl = ssl
self.credentials = credentials
self.key = key or name
def _form_mapping(self, create=None, **kwargs):
return {'name_text': kwargs.get('name'),
'type_select': create and self.type,
'url_text': kwargs.get('url'),
'ssl_checkbox': kwargs.get('ssl')}
class Credential(cfme.Credential, Updateable):
pass
def _submit(self, cancel, submit_button):
if cancel:
form_buttons.cancel()
else:
submit_button()
flash.assert_no_errors()
def create(self, cancel=False, validate_credentials=True, validate=True, force=False):
"""Creates the manager through UI
Args:
cancel (bool): Whether to cancel out of the creation. The cancel is done
after all the information present in the manager has been filled in the UI.
validate_credentials (bool): Whether to validate credentials - if True and the
credentials are invalid, an error will be raised.
validate (bool): Whether we want to wait for the manager's data to load
and show up in it's detail page. True will also wait, False will only set it up.
force (bool): Whether to force the creation even if the manager already exists.
True will try anyway; False will check for its existence and leave, if present.
"""
def config_profiles_loaded():
# Workaround - without this, validation of provider failed
config_profiles_names = [prof.name for prof in self.config_profiles]
logger.info(
"UI: %s\nYAML: %s",
set(config_profiles_names), set(self.yaml_data['config_profiles']))
return all(
[cp in config_profiles_names for cp in self.yaml_data['config_profiles']])
if not force and self.exists:
return
navigate_to(self, 'Add')
fill(properties_form, self._form_mapping(create=True, **self.__dict__))
fill(credential_form, self.credentials, validate=validate_credentials)
self._submit(cancel, add_manager_btn)
if not cancel:
flash.assert_message_match(self._refresh_flash_msg)
if validate:
try:
self.yaml_data['config_profiles']
except KeyError as e:
logger.exception(e)
raise
wait_for(
config_profiles_loaded,
fail_func=self.refresh_relationships,
handle_exception=True,
num_sec=180, delay=30)
def update(self, updates, cancel=False, validate_credentials=False):
"""Updates the manager through UI
args:
updates (dict): Data to change.
cancel (bool): Whether to cancel out of the update. The cancel is done
after all the new information has been filled in the UI.
validate_credentials (bool): Whether to validate credentials - if True and the
credentials are invalid, an error will be raised.
Note:
utils.update use is recommended over use of this method.
"""
navigate_to(self, 'Edit')
# Workaround - without this, update was failing on downstream appliance
sel.wait_for_ajax()
sel.wait_for_element(properties_form.name_text)
fill(properties_form, self._form_mapping(**updates))
fill(credential_form, updates.get('credentials', None), validate=validate_credentials)
self._submit(cancel, edit_manager_btn)
name = updates['name'] or self.name
if not cancel:
flash.assert_message_match('{} Provider "{}" was updated'.format(self.type, name))
self.__dict__.update(**updates)
def delete(self, cancel=False, wait_deleted=True, force=False):
"""Deletes the manager through UI
Args:
cancel (bool): Whether to cancel out of the deletion, when the alert pops up.
wait_deleted (bool): Whether we want to wait for the manager to disappear from the UI.
True will wait; False will only delete it and move on.
force (bool): Whether to try to delete the manager even though it doesn't exist.
True will try to delete it anyway; False will check for its existence and leave,
if not present.
"""
if not force and not self.exists:
return
navigate_to(self, 'All')
sel.check(Quadicon(self.quad_name, None).checkbox())
item_text = version.pick({'5.6': 'Remove selected items from the VMDB',
'5.7': 'Remove selected items'})
cfg_btn(item_text, invokes_alert=True)
sel.handle_alert(cancel)
if not cancel:
flash_msg = version.pick({'5.6': 'Delete initiated for 1 provider',
'5.7': 'Delete initiated for 1 Provider'})
flash.assert_message_match(flash_msg)
if wait_deleted:
wait_for(func=lambda: self.exists, fail_condition=True, delay=15, num_sec=60)
@property
def _refresh_flash_msg(self):
return 'Refresh Provider initiated for 1 provider ({})'.format(self.type)
@property
def exists(self):
"""Returns whether the manager exists in the UI or not"""
navigate_to(self, 'All')
if (Quadicon.any_present() and
Quadicon(self.quad_name, None).exists):
return True
return False
def refresh_relationships(self, cancel=False):
"""Refreshes relationships and power states of this manager"""
navigate_to(self, 'All')
sel.check(Quadicon(self.quad_name, None).checkbox())
cfg_btn('Refresh Relationships and Power states', invokes_alert=True)
sel.handle_alert(cancel)
if not cancel:
flash.assert_message_match(self._refresh_flash_msg)
def _does_profile_exist(self):
return sel.is_displayed(page.list_table_config_profiles)
@property
def config_profiles(self):
"""Returns 'ConfigProfile' configuration profiles (hostgroups) available on this manager"""
navigate_to(self, 'Details')
tb.select('List View')
wait_for(self._does_profile_exist, num_sec=300, delay=20, fail_func=sel.refresh)
return [ConfigProfile(row['name'].text, self) for row in
page.list_table_config_profiles.rows()]
@property
def systems(self):
"""Returns 'ConfigSystem' configured systems (hosts) available on this manager"""
return reduce(lambda x, y: x + y, [prof.systems for prof in self.config_profiles])
@property
def yaml_data(self):
"""Returns yaml data for this manager"""
return conf.cfme_data.configuration_managers[self.key]
@classmethod
def load_from_yaml(cls, key):
"""Returns 'ConfigManager' object loaded from yamls, based on its key"""
data = conf.cfme_data.configuration_managers[key]
creds = conf.credentials[data['credentials']]
return cls(
name=data['name'],
url=data['url'],
ssl=data['ssl'],
credentials=cls.Credential(
principal=creds['username'], secret=creds['password']),
key=key)
@property
def quad_name(self):
return '{} Configuration Manager'.format(self.name)
def get_config_manager_from_config(cfg_mgr_key):
cfg_mgr = conf.cfme_data.get('configuration_managers', {})[cfg_mgr_key]
if cfg_mgr['type'] == 'satellite':
return Satellite.load_from_yaml(cfg_mgr_key)
elif cfg_mgr['type'] == 'ansible':
return AnsibleTower.load_from_yaml(cfg_mgr_key)
else:
raise Exception("Unknown configuration manager key")
@fill.method((Form, ConfigManager.Credential))
def _fill_credential(form, cred, validate=None):
"""How to fill in a credential. Validates the credential if that option is passed in."""
fill(credential_form, {'principal_text': cred.principal,
'secret_pass': cred.secret,
'verify_secret_pass': cred.verify_secret,
'validate_btn': validate})
if validate:
flash.assert_no_errors()
class ConfigProfile(Pretty):
"""Configuration profile object (foreman-side hostgroup)
Args:
name: Name of the profile
manager: ConfigManager object which this profile is bound to
"""
pretty_attrs = ['name', 'manager']
def __init__(self, name, manager):
self.name = name
self.manager = manager
@property
def systems(self):
"""Returns 'ConfigSystem' objects that are active under this profile"""
navigate_to(self, 'Details')
# ajax wait doesn't work here
_title_loc = "//span[contains(@id, 'explorer_title_text') " \
"and contains(normalize-space(text()), 'Configured Systems')]"
sel.wait_for_element(_title_loc)
# Unassigned config profile has no tabstrip
if "unassigned" not in self.name.lower():
tabs.select_tab("Configured Systems")
if sel.is_displayed(page.list_table_config_systems):
row_key = 'hostname'
return [ConfigSystem(row[row_key].text, self) for row in
page.list_table_config_systems.rows()]
return list()
class ConfigSystem(Pretty):
pretty_attrs = ['name', 'manager_key']
def __init__(self, name, profile):
self.name = name
self.profile = profile
def tag(self, tag):
"""Tags the system by given tag"""
navigate_to(self, 'EditTags')
fill(mixins.tag_form, {'category': 'Cost Center *', 'tag': 'Cost Center 001'})
# ---
mixins.add_tag(tag, navigate=False)
def untag(self, tag):
"""Removes the selected tag off the system"""
navigate_to(self, 'EditTags')
mixins.remove_tag(tag)
@property
def tags(self):
"""Returns a list of this system's active tags"""
navigate_to(self, 'EditTags')
return mixins.get_tags()
class Satellite(ConfigManager):
"""
Configuration manager object (Red Hat Satellite, Foreman)
Args:
name: Name of the Satellite/Foreman configuration manager
url: URL, hostname or IP of the configuration manager
ssl: Boolean value; `True` if SSL certificate validity should be checked, `False` otherwise
credentials: Credentials to access the config. manager
key: Key to access the cfme_data yaml data (same as `name` if not specified)
Usage:
Create provider:
.. code-block:: python
satellite_cfg_mgr = Satellite('my_satellite', 'my-satellite.example.com',
ssl=False, ConfigManager.Credential(principal='admin',
secret='testing'), key='satellite_yaml_key')
satellite_cfg_mgr.create()
Update provider:
.. code-block:: python
with update(satellite_cfg_mgr):
satellite_cfg_mgr.name = 'new_satellite_name'
Delete provider:
.. code-block:: python
satellite_cfg_mgr.delete()
"""
def __init__(self, name=None, url=None, ssl=None, credentials=None, key=None):
super(Satellite, self).__init__(name=name, url=url, ssl=ssl, credentials=credentials,
key=key)
self.name = name
self.url = url
self.ssl = ssl
self.credentials = credentials
self.key = key or name
@cached_property
def type(self):
"""Returns presumed type of the manager based on CFME version
Note:
We cannot actually know the type of the provider from the UI.
This represents the supported type by CFME version and is to be used in navigation.
"""
return version.pick({version.LOWEST: 'Red Hat Satellite', version.LATEST: 'Foreman'})
class AnsibleTower(ConfigManager):
"""
Configuration manager object (Ansible Tower)
Args:
name: Name of the Ansible Tower configuration manager
url: URL, hostname or IP of the configuration manager
ssl: Boolean value; `True` if SSL certificate validity should be checked, `False` otherwise
credentials: Credentials to access the config. manager
key: Key to access the cfme_data yaml data (same as `name` if not specified)
Usage:
Create provider:
.. code-block:: python
tower_cfg_mgr = AnsibleTower('my_tower', 'https://my-tower.example.com/api/v1',
ssl=False, ConfigManager.Credential(principal='admin',
secret='testing'), key='tower_yaml_key')
tower_cfg_mgr.create()
Update provider:
.. code-block:: python
with update(tower_cfg_mgr):
tower_cfg_mgr.name = 'new_tower_name'
Delete provider:
.. code-block:: python
tower_cfg_mgr.delete()
"""
type = 'Ansible Tower'
def __init__(self, name=None, url=None, ssl=None, credentials=None, key=None):
super(AnsibleTower, self).__init__(name=name, url=url, ssl=ssl, credentials=credentials,
key=key)
self.name = name
self.url = url
self.ssl = ssl
self.credentials = credentials
self.key = key or name
@navigator.register(ConfigManager, 'All')
class MgrAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
if self.obj.appliance.version > '5.7.0.8':
self.prerequisite_view.navigation.select('Configuration', 'Management')
else:
self.prerequisite_view.navigation.select('Configuration', 'Configuration Management')
def resetter(self):
accordion.tree('Providers', 'All Configuration Manager Providers')
tb.select('Grid View')
def am_i_here(self):
return match_page('All Configuration Management Providers')
@navigator.register(ConfigManager, 'Add')
class MgrAdd(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
cfg_btn('Add a new Provider')
@navigator.register(ConfigManager, 'Edit')
class MgrEdit(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
sel.check(Quadicon(self.obj.quad_name, None).checkbox())
cfg_btn('Edit Selected item')
@navigator.register(ConfigManager, 'Details')
class MgrDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
sel.click(Quadicon(self.obj.quad_name, None))
def am_i_here(self):
return any((match_page(summary='Configuration Profiles under Red Hat Satellite '
'Provider "{} Configuration Manager"'.format(self.obj.name)),
match_page(summary='Inventory Groups under Ansible Tower Provider'
' "{} Configuration Manager"'.format(self.obj.name))))
@navigator.register(ConfigManager, 'EditFromDetails')
class MgrEditFromDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
cfg_btn('Edit this Provider')
# todo: not sure whether this works or not. it seems it wasn't used for a long time
@navigator.register(ConfigProfile, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToAttribute('manager', 'Details')
def step(self):
tb.select('List View'),
page.list_table_config_profiles.click_cell('Description', self.obj.name)
@navigator.register(ConfigSystem, 'All')
class SysAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.prerequisite_view.navigation.select('Configuration', 'Configuration Management')
def resetter(self):
accordion.tree('Configured Systems', 'All Configured Systems')
tb.select('Grid View')
def am_i_here(self):
return match_page(summary='All Configured Systems')
@navigator.register(ConfigSystem, 'Provision')
class SysProvision(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
sel.check(Quadicon(self.obj.name, None))
cfg_btn('Provision Configured Systems')
@navigator.register(ConfigSystem, 'EditTags')
class SysEditTags(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
sel.check(Quadicon(self.obj.name, None))
cfg_btn('Edit Tags')
| gpl-2.0 |
rawls238/planout | planout-editor/planout-editor-kernel.py | 9 | 2393 | from flask import Flask, jsonify, render_template, request, url_for
app = Flask(__name__)
from planout.interpreter import Interpreter
import traceback
import json
import sys
def testPlanOutScript(script, inputs={}, overrides=None, assertions=None):
payload = {}
# make sure experiment runs with the given inputs
i = Interpreter(script, 'demo_salt', inputs)
if overrides:
i.set_overrides(overrides)
try:
results = dict(i.get_params()) # executes experiment
except Exception as err:
#message = "Error running experiment: %s" % traceback.format_exc(0)
message = "Error running experiment:\n%s" % err
payload['errors'] = [{
"error_code": "runtime",
"message": message
}]
return payload
payload['results'] = results
# validate if input contains validation code
validation_errors = []
if assertions:
for (key, value) in assertions.iteritems():
if key not in results:
validation_errors.append({
"error_code": "assertion",
"message": {"param": key}
})
else:
if results[key] != value:
message = {'param': key, 'expected': value, 'got': results[key]}
validation_errors.append({
"error_code": "assertion",
"message": message
})
if validation_errors:
payload['errors'] = validation_errors
return payload
@app.route('/run_test')
def run_test():
# not sure how to change everything to use POST requests
raw_script = request.args.get('compiled_code', '')
raw_inputs = request.args.get('inputs', '')
raw_overrides = request.args.get('overrides', "{}")
raw_assertions = request.args.get('assertions', "{}")
id = request.args.get('id')
script = json.loads(raw_script) if raw_script else {}
try:
inputs = json.loads(raw_inputs)
overrides = json.loads(raw_overrides) if raw_overrides else None
assertions = json.loads(raw_assertions) if raw_assertions else None
except:
return jsonify({
'errors': [{
'error_code': "INVALID_FORM",
'message': 'Invalid form input'
}],
'id': id
})
t = testPlanOutScript(script, inputs, overrides, assertions)
t['id'] = id
return jsonify(t)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
url_for('static', filename='planoutstyle.css')
| bsd-3-clause |
hbrunn/hr | hr_contract_hourly_rate/tests/test_hr_contract_hourly_rates.py | 23 | 7450 | # -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Savoir-faire Linux. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests.common import TransactionCase
from openerp import exceptions
class test_contract_hourly_rate(TransactionCase):
def setUp(self):
super(test_contract_hourly_rate, self).setUp()
self.employee_model = self.env['hr.employee']
self.user_model = self.env["res.users"]
self.contract_model = self.env["hr.contract"]
self.job_model = self.env["hr.job"]
self.rate_class_model = self.env["hr.hourly.rate.class"]
# Create an employee
self.employee_id = self.employee_model.create({'name': 'Employee 1'})
# Create 3 jobs
self.job_id = self.job_model.create({'name': 'Job 1'})
self.job_2_id = self.job_model.create({'name': 'Job 2'})
self.job_3_id = self.job_model.create({'name': 'Job 3'})
# Create 3 hourly rate classes
self.rate_class_id = self.rate_class_model.create(
{
'name': 'Test',
'line_ids': [
(0, 0, {
'date_start': '2014-01-01',
'date_end': '2014-06-30',
'rate': 40,
}),
(0, 0, {
'date_start': '2014-07-01',
'date_end': '2014-12-31',
'rate': 45,
}),
],
}
)
self.rate_class_2_id = self.rate_class_model.create(
{
'name': 'Test',
'line_ids': [
(0, 0, {
'date_start': '2014-01-01',
'date_end': '2014-06-30',
'rate': 30,
}),
(0, 0, {
'date_start': '2014-07-01',
'date_end': '2014-12-31',
'rate': 35,
}),
],
}
)
self.rate_class_3_id = self.rate_class_model.create(
{
'name': 'Test',
'line_ids': [
(0, 0, {
'date_start': '2014-01-01',
'date_end': '2014-06-30',
'rate': 20,
}),
(0, 0, {
'date_start': '2014-07-01',
'date_end': '2014-12-31',
'rate': 25,
}),
],
}
)
# Create a contract
self.contract_id = self.contract_model.create(
{
'employee_id': self.employee_id.id,
'name': 'Contract 1',
'wage': 50000,
'salary_computation_method': 'hourly',
'contract_job_ids': [
(0, 0, {
'job_id': self.job_id.id,
'is_main_job': False,
'hourly_rate_class_id': self.rate_class_id.id,
}),
(0, 0, {
'job_id': self.job_2_id.id,
'is_main_job': True,
'hourly_rate_class_id': self.rate_class_2_id.id,
}),
(0, 0, {
'job_id': self.job_3_id.id,
'is_main_job': False,
'hourly_rate_class_id': self.rate_class_3_id.id,
}),
],
}
)
def test_check_overlapping_dates(self):
"""
test the _check_overlapping_dates constraint
on hourly rate class
"""
# Should all return the same result
for dates in [('2013-01-01', '2014-01-01'),
('2014-12-31', '2015-12-31'),
('2014-06-01', '2014-07-31')]:
self.assertRaises(
exceptions.ValidationError, self.rate_class_id.write,
{'line_ids': [(0, 0, {'date_start': dates[0],
'date_end': dates[1],
'rate': 15})]})
def test_check_has_hourly_rate_class(self):
"""
test the _check_overlapping_dates constraint
on contract
"""
self.job_4_id = self.job_model.create({'name': 'Job 4'})
self.assertRaises(
exceptions.ValidationError, self.contract_id.write,
{'contract_job_ids': [(0, 0, {'job_id': self.job_4_id.id,
'is_main_job': False,
'hourly_rate_class_id': False})]})
def test_get_job_hourly_rate(self):
"""
test the method get_job_hourly_rate with job_id argument
"""
# Should all return the same result
for dates in [('2014-02-01', '2014-02-10'),
('2014-01-01', '2014-06-30')]:
res = self.contract_id.get_job_hourly_rate(
dates[0], dates[1], job_id=self.job_3_id.id, main_job=False)
self.assertTrue(res == 20)
# Should all return the same result
for dates in [('2014-08-10', '2014-08-20'),
('2014-07-01', '2014-12-31')]:
res = self.contract_id.get_job_hourly_rate(
dates[0], dates[1], job_id=self.job_3_id.id, main_job=False)
self.assertTrue(res == 25)
def test_get_job_hourly_rate_main_job(self):
"""
test the method get_job_hourly_rate with main_job argument
"""
# Should all return the same result
for dates in [('2014-02-01', '2014-02-10'),
('2014-01-01', '2014-06-30')]:
res = self.contract_id.get_job_hourly_rate(
dates[0], dates[1], job_id=False, main_job=True)
self.assertTrue(res == 30)
# Should all return the same result
for dates in [('2014-08-10', '2014-08-20'),
('2014-07-01', '2014-12-31')]:
res = self.contract_id.get_job_hourly_rate(
dates[0], dates[1], job_id=False, main_job=True)
self.assertTrue(res == 35)
self.assertRaises(
exceptions.ValidationError, self.rate_class_id.write,
{'line_ids': [(0, 0, {'date_start': dates[0],
'date_end': dates[1],
'rate': 15})]})
| agpl-3.0 |
mfherbst/spack | var/spack/repos/builtin/packages/r-affxparser/package.py | 2 | 2101 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffxparser(RPackage):
"""Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR).
It provides methods for fast and memory efficient parsing of
Affymetrix files using the Affymetrix' Fusion SDK. Both ASCII-
and binary-based files are supported. Currently, there are methods
for reading chip definition file (CDF) and a cell intensity file (CEL).
These files can be read either in full or in part. For example,
probe signals from a few probesets can be extracted very quickly
from a set of CEL files into a convenient list structure."""
homepage = "https://www.bioconductor.org/packages/affxparser/"
git = "https://git.bioconductor.org/packages/affxparser.git"
version('1.48.0', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('[email protected]:3.4.9', when='@1.48.0')
| lgpl-2.1 |
twz915/django | tests/migrations/test_optimizer.py | 2 | 26004 | from django.db import migrations, models
from django.db.migrations import operations
from django.db.migrations.optimizer import MigrationOptimizer
from django.test import SimpleTestCase
from .models import EmptyManager, UnicodeModel
class OptimizerTests(SimpleTestCase):
"""
Tests the migration autodetector.
"""
def optimize(self, operations, app_label):
"""
Handy shortcut for getting results + number of loops
"""
optimizer = MigrationOptimizer()
return optimizer.optimize(operations, app_label), optimizer._iterations
def assertOptimizesTo(self, operations, expected, exact=None, less_than=None, app_label=None):
result, iterations = self.optimize(operations, app_label)
result = [repr(f.deconstruct()) for f in result]
expected = [repr(f.deconstruct()) for f in expected]
self.assertEqual(expected, result)
if exact is not None and iterations != exact:
raise self.failureException(
"Optimization did not take exactly %s iterations (it took %s)" % (exact, iterations)
)
if less_than is not None and iterations >= less_than:
raise self.failureException(
"Optimization did not take less than %s iterations (it took %s)" % (less_than, iterations)
)
def assertDoesNotOptimize(self, operations, **kwargs):
self.assertOptimizesTo(operations, operations, **kwargs)
def test_single(self):
"""
The optimizer does nothing on a single operation,
and that it does it in just one pass.
"""
self.assertOptimizesTo(
[migrations.DeleteModel("Foo")],
[migrations.DeleteModel("Foo")],
exact=1,
)
def test_create_delete_model(self):
"""
CreateModel and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_rename_model(self):
"""
CreateModel should absorb RenameModels.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameModel("Foo", "Bar"),
],
[
migrations.CreateModel(
"Bar",
[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
)
],
)
def test_rename_model_self(self):
"""
RenameModels should absorb themselves.
"""
self.assertOptimizesTo(
[
migrations.RenameModel("Foo", "Baa"),
migrations.RenameModel("Baa", "Bar"),
],
[
migrations.RenameModel("Foo", "Bar"),
],
)
def test_create_alter_model_options(self):
self.assertOptimizesTo(
[
migrations.CreateModel('Foo', fields=[]),
migrations.AlterModelOptions(name='Foo', options={'verbose_name_plural': 'Foozes'}),
],
[
migrations.CreateModel('Foo', fields=[], options={'verbose_name_plural': 'Foozes'}),
]
)
def _test_create_alter_foo_delete_model(self, alter_foo):
"""
CreateModel, AlterModelTable, AlterUniqueTogether/AlterIndexTogether/
AlterOrderWithRespectTo, and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.AlterModelTable("Foo", "woohoo"),
alter_foo,
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_alter_unique_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterUniqueTogether("Foo", [["a", "b"]]))
def test_create_alter_index_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterIndexTogether("Foo", [["a", "b"]]))
def test_create_alter_owrt_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterOrderWithRespectTo("Foo", "a"))
def _test_alter_alter_model(self, alter_foo, alter_bar):
"""
Two AlterUniqueTogether/AlterIndexTogether/AlterOrderWithRespectTo
should collapse into the second.
"""
self.assertOptimizesTo(
[
alter_foo,
alter_bar,
],
[
alter_bar,
],
)
def test_alter_alter_table_model(self):
self._test_alter_alter_model(
migrations.AlterModelTable("Foo", "a"),
migrations.AlterModelTable("Foo", "b"),
)
def test_alter_alter_unique_model(self):
self._test_alter_alter_model(
migrations.AlterUniqueTogether("Foo", [["a", "b"]]),
migrations.AlterUniqueTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_index_model(self):
self._test_alter_alter_model(
migrations.AlterIndexTogether("Foo", [["a", "b"]]),
migrations.AlterIndexTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_owrt_model(self):
self._test_alter_alter_model(
migrations.AlterOrderWithRespectTo("Foo", "a"),
migrations.AlterOrderWithRespectTo("Foo", "b"),
)
def test_optimize_through_create(self):
"""
We should be able to optimize away create/delete through a create or delete
of a different model, but only if the create operation does not mention the model
at all.
"""
# These should work
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Bar"),
migrations.DeleteModel("Foo"),
],
[],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
migrations.DeleteModel("Bar"),
],
[],
)
# This should not work - FK should block it
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
)
# The same operations should be optimized if app_label is specified and
# a FK references a model from the other app.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
],
app_label="otherapp",
)
# But it shouldn't work if a FK references a model with the same
# app_label.
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
# This should not work - bases should block it
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
)
# The same operations should be optimized if app_label and none of
# bases belong to that app.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
],
app_label="otherapp",
)
# But it shouldn't work if some of bases belongs to the specified app.
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
def test_create_model_add_field(self):
"""
AddField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AddField("Foo", "age", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_add_field_not_through_fk(self):
"""
AddField should NOT optimize into CreateModel if it's an FK to a model
that's between them.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField("Foo", "link", models.ForeignKey("migrations.Link", models.CASCADE)),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField("Foo", "link", models.ForeignKey("migrations.Link", models.CASCADE)),
],
)
def test_create_model_add_field_not_through_m2m_through(self):
"""
AddField should NOT optimize into CreateModel if it's an M2M using a
through that's created between them.
"""
# Note: The middle model is not actually a valid through model,
# but that doesn't matter, as we never render it.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("LinkThrough", []),
migrations.AddField(
"Foo", "link", models.ManyToManyField("migrations.Link", through="migrations.LinkThrough")
),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("LinkThrough", []),
migrations.AddField(
"Foo", "link", models.ManyToManyField("migrations.Link", through="migrations.LinkThrough")
),
],
)
def test_create_model_alter_field(self):
"""
AlterField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AlterField("Foo", "name", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_rename_field(self):
"""
RenameField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("title", models.CharField(max_length=255)),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_rename_field(self):
"""
RenameField should optimize into AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.AddField("Foo", "title", models.CharField(max_length=255)),
],
)
def test_alter_field_rename_field(self):
"""
RenameField should optimize to the other side of AlterField,
and into itself.
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
migrations.RenameField("Foo", "title", "nom"),
],
[
migrations.RenameField("Foo", "name", "nom"),
migrations.AlterField("Foo", "nom", models.CharField(max_length=255)),
],
)
def test_create_model_remove_field(self):
"""
RemoveField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RemoveField("Foo", "age"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_alter_field(self):
"""
AlterField should optimize into AddField.
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AlterField("Foo", "age", models.FloatField(default=2.4)),
],
[
migrations.AddField("Foo", name="age", field=models.FloatField(default=2.4)),
],
)
def test_add_field_delete_field(self):
"""
RemoveField should cancel AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[],
)
def test_alter_field_delete_field(self):
"""
RemoveField should absorb AlterField
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[
migrations.RemoveField("Foo", "age"),
],
)
def _test_create_alter_foo_field(self, alter):
"""
CreateModel, AlterFooTogether/AlterOrderWithRespectTo followed by an
add/alter/rename field should optimize to CreateModel and the Alter*
"""
# AddField
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.AddField("Foo", "c", models.IntegerField()),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
],
)
# AlterField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.AlterField("Foo", "b", models.CharField(max_length=255)),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.AlterField("Foo", "c", models.CharField(max_length=255)),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.CharField(max_length=255)),
]),
alter,
],
)
# RenameField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "c"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "x"),
migrations.RenameField("Foo", "x", "c"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "c"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "c", "d"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("d", models.IntegerField()),
]),
alter,
],
)
# RemoveField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RemoveField("Foo", "b"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.RemoveField("Foo", "c"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
],
)
def test_create_alter_unique_field(self):
self._test_create_alter_foo_field(migrations.AlterUniqueTogether("Foo", [["a", "b"]]))
def test_create_alter_index_field(self):
self._test_create_alter_foo_field(migrations.AlterIndexTogether("Foo", [["a", "b"]]))
def test_create_alter_owrt_field(self):
self._test_create_alter_foo_field(migrations.AlterOrderWithRespectTo("Foo", "b"))
def test_optimize_through_fields(self):
"""
field-level through checking is working. This should manage to collapse
model Foo to nonexistence, and model Bar to a single IntegerField
called "width".
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AddField("Bar", "width", models.IntegerField()),
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RenameField("Bar", "size", "dimensions"),
migrations.RemoveField("Foo", "age"),
migrations.RenameModel("Foo", "Phou"),
migrations.RemoveField("Bar", "dimensions"),
migrations.RenameModel("Phou", "Fou"),
migrations.DeleteModel("Fou"),
],
[
migrations.CreateModel("Bar", [("width", models.IntegerField())]),
],
)
def test_optimize_elidable_operation(self):
elidable_operation = operations.base.Operation()
elidable_operation.elidable = True
self.assertOptimizesTo(
[
elidable_operation,
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
elidable_operation,
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
elidable_operation,
migrations.RenameModel("Foo", "Phou"),
migrations.DeleteModel("Bar"),
elidable_operation,
],
[
migrations.CreateModel("Phou", [("name", models.CharField(max_length=255))]),
],
)
| bsd-3-clause |
bikong2/django | tests/null_fk_ordering/tests.py | 381 | 2012 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Author, Comment, Forum, Post, SystemInfo
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
Article.objects.create(title='No author on this article')
Article.objects.create(author=author_1, title='This article written by Tom Jones')
Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertEqual(len(list(Article.objects.all())), 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
Comment.objects.create(post=p, comment_text='My first comment')
Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
Comment.objects.create(comment_text='Another first comment')
Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertEqual(len(list(Comment.objects.all())), 4)
| bsd-3-clause |
a-doumoulakis/tensorflow | tensorflow/contrib/tensor_forest/python/tensor_forest.py | 5 | 26323 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numbers
import random
from google.protobuf import text_format
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2 as _tree_proto
from tensorflow.contrib.framework.python.ops import variables as framework_variables
from tensorflow.contrib.tensor_forest.proto import tensor_forest_params_pb2 as _params_proto
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import model_ops
from tensorflow.contrib.tensor_forest.python.ops import stats_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# Stores tuples of (leaf model type, stats model type)
CLASSIFICATION_LEAF_MODEL_TYPES = {
'all_dense': (_params_proto.MODEL_DENSE_CLASSIFICATION,
_params_proto.STATS_DENSE_GINI),
'all_sparse': (_params_proto.MODEL_SPARSE_CLASSIFICATION,
_params_proto.STATS_SPARSE_GINI),
'sparse_then_dense':
(_params_proto.MODEL_SPARSE_OR_DENSE_CLASSIFICATION,
_params_proto.STATS_SPARSE_THEN_DENSE_GINI),
}
REGRESSION_MODEL_TYPE = (
_params_proto.MODEL_REGRESSION,
_params_proto.STATS_LEAST_SQUARES_REGRESSION,
_params_proto.COLLECTION_BASIC)
FINISH_TYPES = {
'basic': _params_proto.SPLIT_FINISH_BASIC,
'hoeffding': _params_proto.SPLIT_FINISH_DOMINATE_HOEFFDING,
'bootstrap': _params_proto.SPLIT_FINISH_DOMINATE_BOOTSTRAP
}
PRUNING_TYPES = {
'none': _params_proto.SPLIT_PRUNE_NONE,
'half': _params_proto.SPLIT_PRUNE_HALF,
'quarter': _params_proto.SPLIT_PRUNE_QUARTER,
'10_percent': _params_proto.SPLIT_PRUNE_10_PERCENT,
'hoeffding': _params_proto.SPLIT_PRUNE_HOEFFDING,
}
SPLIT_TYPES = {
'less_or_equal': _tree_proto.InequalityTest.LESS_OR_EQUAL,
'less': _tree_proto.InequalityTest.LESS_THAN
}
def parse_number_or_string_to_proto(proto, param):
if isinstance(param, numbers.Number):
proto.constant_value = param
else: # assume it's a string
if param.isdigit():
proto.constant_value = int(param)
else:
text_format.Merge(param, proto)
def build_params_proto(params):
"""Build a TensorForestParams proto out of the V4ForestHParams object."""
proto = _params_proto.TensorForestParams()
proto.num_trees = params.num_trees
proto.max_nodes = params.max_nodes
proto.is_regression = params.regression
proto.num_outputs = params.num_classes
proto.num_features = params.num_features
proto.leaf_type = params.leaf_model_type
proto.stats_type = params.stats_model_type
proto.collection_type = _params_proto.COLLECTION_BASIC
proto.pruning_type.type = params.pruning_type
proto.finish_type.type = params.finish_type
proto.inequality_test_type = params.split_type
proto.drop_final_class = False
proto.collate_examples = params.collate_examples
proto.checkpoint_stats = params.checkpoint_stats
proto.use_running_stats_method = params.use_running_stats_method
proto.initialize_average_splits = params.initialize_average_splits
proto.inference_tree_paths = params.inference_tree_paths
parse_number_or_string_to_proto(proto.pruning_type.prune_every_samples,
params.prune_every_samples)
parse_number_or_string_to_proto(proto.finish_type.check_every_steps,
params.early_finish_check_every_samples)
parse_number_or_string_to_proto(proto.split_after_samples,
params.split_after_samples)
parse_number_or_string_to_proto(proto.num_splits_to_consider,
params.num_splits_to_consider)
proto.dominate_fraction.constant_value = params.dominate_fraction
if params.param_file:
with open(params.param_file) as f:
text_format.Merge(f.read(), proto)
return proto
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(
self,
num_trees=100,
max_nodes=10000,
bagging_fraction=1.0,
num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, # deprecated, unused.
split_after_samples=250,
valid_leaf_threshold=1,
dominate_method='bootstrap',
dominate_fraction=0.99,
model_name='all_dense',
split_finish_name='basic',
split_pruning_name='none',
prune_every_samples=0,
early_finish_check_every_samples=0,
collate_examples=False,
checkpoint_stats=False,
use_running_stats_method=False,
initialize_average_splits=False,
inference_tree_paths=False,
param_file=None,
split_name='less_or_equal',
**kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.valid_leaf_threshold = valid_leaf_threshold
self.dominate_method = dominate_method
self.dominate_fraction = dominate_fraction
self.model_name = model_name
self.split_finish_name = split_finish_name
self.split_pruning_name = split_pruning_name
self.collate_examples = collate_examples
self.checkpoint_stats = checkpoint_stats
self.use_running_stats_method = use_running_stats_method
self.initialize_average_splits = initialize_average_splits
self.inference_tree_paths = inference_tree_paths
self.param_file = param_file
self.split_name = split_name
self.early_finish_check_every_samples = early_finish_check_every_samples
self.prune_every_samples = prune_every_samples
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Our experiments have found that num_splits_to_consider = num_features
# gives good accuracy.
self.num_splits_to_consider = self.num_splits_to_consider or min(
max(10, math.floor(math.sqrt(self.num_features))), 1000)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
# How to store leaf models.
self.leaf_model_type = (
REGRESSION_MODEL_TYPE[0] if self.regression else
CLASSIFICATION_LEAF_MODEL_TYPES[self.model_name][0])
# How to store stats objects.
self.stats_model_type = (
REGRESSION_MODEL_TYPE[1] if self.regression else
CLASSIFICATION_LEAF_MODEL_TYPES[self.model_name][1])
self.finish_type = (
_params_proto.SPLIT_FINISH_BASIC if self.regression else
FINISH_TYPES[self.split_finish_name])
self.pruning_type = PRUNING_TYPES[self.split_pruning_name]
if self.pruning_type == _params_proto.SPLIT_PRUNE_NONE:
self.prune_every_samples = 0
else:
if (not self.prune_every_samples and
not (isinstance(numbers.Number) or
self.split_after_samples.isdigit())):
logging.error(
'Must specify prune_every_samples if using a depth-dependent '
'split_after_samples')
# Pruning half-way through split_after_samples seems like a decent
# default, making it easy to select the number being pruned with
# pruning_type while not paying the cost of pruning too often. Note that
# this only holds if not using a depth-dependent split_after_samples.
self.prune_every_samples = (self.prune_every_samples or
int(self.split_after_samples) / 2)
if self.finish_type == _params_proto.SPLIT_FINISH_BASIC:
self.early_finish_check_every_samples = 0
else:
if (not self.early_finish_check_every_samples and
not (isinstance(numbers.Number) or
self.split_after_samples.isdigit())):
logging.error(
'Must specify prune_every_samples if using a depth-dependent '
'split_after_samples')
# Checking for early finish every quarter through split_after_samples
# seems like a decent default. We don't want to incur the checking cost
# too often, but (at least for hoeffding) it's lower than the cost of
# pruning so we can do it a little more frequently.
self.early_finish_check_every_samples = (
self.early_finish_check_every_samples or
int(self.split_after_samples) / 4)
self.split_type = SPLIT_TYPES[self.split_name]
return self
def get_epoch_variable():
"""Returns the epoch variable, or [0] if not defined."""
# Grab epoch variable defined in
# //third_party/tensorflow/python/training/input.py::limit_epochs
for v in tf_variables.local_variables():
if 'limit_epochs/epoch' in v.op.name:
return array_ops.reshape(v, [1])
# TODO(thomaswc): Access epoch from the data feeder.
return [0]
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
if (not hasattr(params, 'params_proto') or
not isinstance(params.params_proto,
_params_proto.TensorForestParams)):
params.params_proto = build_params_proto(params)
params.serialized_params_proto = params.params_proto.SerializeToString()
self.stats = None
if training:
# TODO(gilberth): Manually shard this to be able to fit it on
# multiple machines.
self.stats = stats_ops.fertile_stats_variable(
params, '', self.get_tree_name('stats', tree_num))
self.tree = model_ops.tree_variable(
params, '', self.stats, self.get_tree_name('tree', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
# Set up some scalar variables to run through the device assigner, then
# we can use those to colocate everything related to a tree.
self.device_dummies = []
with ops.device(device_assigner):
for i in range(params.num_trees):
self.device_dummies.append(variable_scope.get_variable(
name='device_dummy_%d' % i, shape=0))
for i in range(params.num_trees):
with ops.device(self.device_dummies[i].device):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self,
params,
device_assigner=None,
variables=None,
tree_variables_class=TreeTrainingVariables,
tree_graphs=None,
training=True):
self.params = params
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(self.variables[i], self.params, i)
for i in range(self.params.num_trees)
]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(
value=input_data, num_or_size_splits=self.params.num_features, axis=1)
return array_ops.concat(
[split_data[ind] for ind in self.params.bagged_features[tree_num]], 1)
def get_all_resource_handles(self):
return ([self.variables[i].tree for i in range(len(self.trees))] +
[self.variables[i].stats for i in range(len(self.trees))])
def training_graph(self,
input_data,
input_labels,
num_trainers=1,
trainer_id=0,
**tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or dict of string->Tensor for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
num_trainers: Number of parallel trainers to split trees among.
trainer_id: Which trainer this instance is.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
Raises:
NotImplementedError: If trying to use bagging with sparse features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
if input_labels is not None:
labels = data_ops.ParseLabelTensorOrDict(input_labels)
data_spec = data_spec or self.get_default_data_spec(input_data)
tree_graphs = []
trees_per_trainer = self.params.num_trees / num_trainers
tree_start = int(trainer_id * trees_per_trainer)
tree_end = int((trainer_id + 1) * trees_per_trainer)
for i in range(tree_start, tree_end):
with ops.device(self.variables.device_dummies[i].device):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = processed_dense_features
tree_labels = labels
if self.params.bagging_fraction < 1.0:
# TODO(gilberth): Support bagging for sparse features.
if processed_sparse_features is not None:
raise NotImplementedError(
'Bagging not supported with sparse features.')
# TODO(thomaswc): This does sampling without replacement. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.strided_slice(
array_ops.shape(processed_dense_features), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(processed_dense_features, gather_indices)
tree_labels = array_ops.gather(labels, gather_indices)
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
tree_graphs.append(self.trees[i].training_graph(
tree_data,
tree_labels,
seed,
data_spec=data_spec,
sparse_features=processed_sparse_features,
**tree_kwargs))
return control_flow_ops.group(*tree_graphs, name='train')
def inference_graph(self, input_data, **inference_args):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or dict of string->Tensor for input data.
**inference_args: Keyword arguments to pass through to each tree.
Returns:
A tuple of (probabilities, tree_paths, variance), where variance
is the variance over all the trees for regression problems only.
Raises:
NotImplementedError: If trying to use feature bagging with sparse
features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
probabilities = []
paths = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
tree_data = processed_dense_features
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
probs, path = self.trees[i].inference_graph(
tree_data,
data_spec,
sparse_features=processed_sparse_features,
**inference_args)
probabilities.append(probs)
paths.append(path)
with ops.device(self.variables.device_dummies[0].device):
# shape of all_predict should be [batch_size, num_trees, num_outputs]
all_predict = array_ops.stack(probabilities, axis=1)
average_values = math_ops.div(
math_ops.reduce_sum(all_predict, 1),
self.params.num_trees,
name='probabilities')
tree_paths = array_ops.stack(paths, axis=1)
regression_variance = None
if self.params.regression:
expected_squares = math_ops.div(
math_ops.reduce_sum(all_predict * all_predict, 1),
self.params.num_trees)
regression_variance = math_ops.maximum(
0., expected_squares - average_values * average_values)
return average_values, tree_paths, regression_variance
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(math_ops.to_float(array_ops.stack(sizes)))
# pylint: disable=unused-argument
def training_loss(self, features, labels, name='training_loss'):
return math_ops.negative(self.average_size(), name=name)
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.negative(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.stack(impurities))
def feature_importances(self):
tree_counts = [self.trees[i].feature_usage_counts()
for i in range(self.params.num_trees)]
total_counts = math_ops.reduce_sum(array_ops.stack(tree_counts, 0), 0)
return total_counts / math_ops.reduce_sum(total_counts)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, tree_num):
self.variables = variables
self.params = params
self.tree_num = tree_num
def training_graph(self,
input_data,
input_labels,
random_seed,
data_spec,
sparse_features=None,
input_weights=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A data_ops.TensorForestDataSpec object specifying the
original feature/columns of the data.
sparse_features: A tf.SparseTensor for sparse input data.
input_weights: A float tensor or placeholder holding per-input weights,
or None if all inputs are to be weighted equally.
Returns:
The last op in the random tree training graph.
"""
# TODO(gilberth): Use this.
unused_epoch = math_ops.to_int32(get_epoch_variable())
if input_weights is None:
input_weights = []
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
if input_data is None:
input_data = []
leaf_ids = model_ops.traverse_tree_v4(
self.variables.tree,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_spec=data_spec.SerializeToString(),
params=self.params.serialized_params_proto)
update_model = model_ops.update_model_v4(
self.variables.tree,
leaf_ids,
input_labels,
input_weights,
params=self.params.serialized_params_proto)
finished_nodes = stats_ops.process_input_v4(
self.variables.tree,
self.variables.stats,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_labels,
input_weights,
leaf_ids,
input_spec=data_spec.SerializeToString(),
random_seed=random_seed,
params=self.params.serialized_params_proto)
with ops.control_dependencies([update_model]):
return stats_ops.grow_tree_v4(
self.variables.tree,
self.variables.stats,
finished_nodes,
params=self.params.serialized_params_proto)
def inference_graph(self, input_data, data_spec, sparse_features=None):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or placeholder for input data.
data_spec: A TensorForestDataSpec proto specifying the original
input columns.
sparse_features: A tf.SparseTensor for sparse input data.
Returns:
A tuple of (probabilities, tree_paths).
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
if input_data is None:
input_data = []
return model_ops.tree_predictions_v4(
self.variables.tree,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_spec=data_spec.SerializeToString(),
params=self.params.serialized_params_proto)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return model_ops.tree_size(self.variables.tree)
def feature_usage_counts(self):
return model_ops.feature_usage_counts(
self.variables.tree, params=self.params.serialized_params_proto)
| apache-2.0 |
sh1nu11bi/sulley | utils/crash_binning.py | 12 | 10341 | #
# Crash Binning
# Copyright (C) 2006 Pedram Amini <[email protected]>
#
# $Id: crash_binning.py 193 2007-04-05 13:30:01Z cameron $
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
'''
@author: Pedram Amini
@license: GNU General Public License 2.0 or later
@contact: [email protected]
@organization: www.openrce.org
'''
import sys
import zlib
import cPickle
class __crash_bin_struct__:
exception_module = None
exception_address = 0
write_violation = 0
violation_address = 0
violation_thread_id = 0
context = None
context_dump = None
disasm = None
disasm_around = []
stack_unwind = []
seh_unwind = []
extra = None
class crash_binning:
'''
@todo: Add MySQL import/export.
'''
bins = {}
last_crash = None
pydbg = None
####################################################################################################################
def __init__ (self):
'''
'''
self.bins = {}
self.last_crash = None
self.pydbg = None
####################################################################################################################
def record_crash (self, pydbg, extra=None):
'''
Given a PyDbg instantiation that at the current time is assumed to have "crashed" (access violation for example)
record various details such as the disassemly around the violating address, the ID of the offending thread, the
call stack and the SEH unwind. Store the recorded data in an internal dictionary, binning them by the exception
address.
@type pydbg: pydbg
@param pydbg: Instance of pydbg
@type extra: Mixed
@param extra: (Optional, Def=None) Whatever extra data you want to store with this bin
'''
self.pydbg = pydbg
crash = __crash_bin_struct__()
# add module name to the exception address.
exception_module = pydbg.addr_to_module(pydbg.dbg.u.Exception.ExceptionRecord.ExceptionAddress)
if exception_module:
exception_module = exception_module.szModule
else:
exception_module = "[INVALID]"
crash.exception_module = exception_module
crash.exception_address = pydbg.dbg.u.Exception.ExceptionRecord.ExceptionAddress
crash.write_violation = pydbg.dbg.u.Exception.ExceptionRecord.ExceptionInformation[0]
crash.violation_address = pydbg.dbg.u.Exception.ExceptionRecord.ExceptionInformation[1]
crash.violation_thread_id = pydbg.dbg.dwThreadId
crash.context = pydbg.context
crash.context_dump = pydbg.dump_context(pydbg.context, print_dots=False)
crash.disasm = pydbg.disasm(crash.exception_address)
crash.disasm_around = pydbg.disasm_around(crash.exception_address, 10)
crash.stack_unwind = pydbg.stack_unwind()
crash.seh_unwind = pydbg.seh_unwind()
crash.extra = extra
# add module names to the stack unwind.
for i in xrange(len(crash.stack_unwind)):
addr = crash.stack_unwind[i]
module = pydbg.addr_to_module(addr)
if module:
module = module.szModule
else:
module = "[INVALID]"
crash.stack_unwind[i] = "%s:%08x" % (module, addr)
# add module names to the SEH unwind.
for i in xrange(len(crash.seh_unwind)):
(addr, handler) = crash.seh_unwind[i]
module = pydbg.addr_to_module(handler)
if module:
module = module.szModule
else:
module = "[INVALID]"
crash.seh_unwind[i] = (addr, handler, "%s:%08x" % (module, handler))
if not self.bins.has_key(crash.exception_address):
self.bins[crash.exception_address] = []
self.bins[crash.exception_address].append(crash)
self.last_crash = crash
####################################################################################################################
def crash_synopsis (self, crash=None):
'''
For the supplied crash, generate and return a report containing the disassemly around the violating address,
the ID of the offending thread, the call stack and the SEH unwind. If not crash is specified, then call through
to last_crash_synopsis() which returns the same information for the last recorded crash.
@see: crash_synopsis()
@type crash: __crash_bin_struct__
@param crash: (Optional, def=None) Crash object to generate report on
@rtype: String
@return: Crash report
'''
if not crash:
return self.last_crash_synopsis()
if crash.write_violation:
direction = "write to"
else:
direction = "read from"
synopsis = "%s:%08x %s from thread %d caused access violation\nwhen attempting to %s 0x%08x\n\n" % \
(
crash.exception_module, \
crash.exception_address, \
crash.disasm, \
crash.violation_thread_id, \
direction, \
crash.violation_address \
)
synopsis += crash.context_dump
synopsis += "\ndisasm around:\n"
for (ea, inst) in crash.disasm_around:
synopsis += "\t0x%08x %s\n" % (ea, inst)
if len(crash.stack_unwind):
synopsis += "\nstack unwind:\n"
for entry in crash.stack_unwind:
synopsis += "\t%s\n" % entry
if len(crash.seh_unwind):
synopsis += "\nSEH unwind:\n"
for (addr, handler, handler_str) in crash.seh_unwind:
synopsis += "\t%08x -> %s\n" % (addr, handler_str)
return synopsis + "\n"
####################################################################################################################
def export_file (self, file_name):
'''
Dump the entire object structure to disk.
@see: import_file()
@type file_name: String
@param file_name: File name to export to
@rtype: crash_binning
@return: self
'''
# null out what we don't serialize but save copies to restore after dumping to disk.
last_crash = self.last_crash
pydbg = self.pydbg
self.last_crash = self.pydbg = None
fh = open(file_name, "wb+")
fh.write(zlib.compress(cPickle.dumps(self, protocol=2)))
fh.close()
self.last_crash = last_crash
self.pydbg = pydbg
return self
####################################################################################################################
def import_file (self, file_name):
'''
Load the entire object structure from disk.
@see: export_file()
@type file_name: String
@param file_name: File name to import from
@rtype: crash_binning
@return: self
'''
fh = open(file_name, "rb")
tmp = cPickle.loads(zlib.decompress(fh.read()))
fh.close()
self.bins = tmp.bins
return self
####################################################################################################################
def last_crash_synopsis (self):
'''
For the last recorded crash, generate and return a report containing the disassemly around the violating
address, the ID of the offending thread, the call stack and the SEH unwind.
@see: crash_synopsis()
@rtype: String
@return: Crash report
'''
if self.last_crash.write_violation:
direction = "write to"
else:
direction = "read from"
synopsis = "%s:%08x %s from thread %d caused access violation\nwhen attempting to %s 0x%08x\n\n" % \
(
self.last_crash.exception_module, \
self.last_crash.exception_address, \
self.last_crash.disasm, \
self.last_crash.violation_thread_id, \
direction, \
self.last_crash.violation_address \
)
synopsis += self.last_crash.context_dump
synopsis += "\ndisasm around:\n"
for (ea, inst) in self.last_crash.disasm_around:
synopsis += "\t0x%08x %s\n" % (ea, inst)
if len(self.last_crash.stack_unwind):
synopsis += "\nstack unwind:\n"
for entry in self.last_crash.stack_unwind:
synopsis += "\t%s\n" % entry
if len(self.last_crash.seh_unwind):
synopsis += "\nSEH unwind:\n"
for (addr, handler, handler_str) in self.last_crash.seh_unwind:
try:
disasm = self.pydbg.disasm(handler)
except:
disasm = "[INVALID]"
synopsis += "\t%08x -> %s %s\n" % (addr, handler_str, disasm)
return synopsis + "\n" | gpl-2.0 |
grangier/xhtml2pdf | xhtml2pdf/version.py | 61 | 1551 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 247 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2008-08-15 13:37:57 +0200 (Fr, 15 Aug 2008) $"
__version__ = VERSION = "VERSION{3.0.33}VERSION"[8:-8]
__build__ = BUILD = "BUILD{2010-06-16}BUILD"[6:-6]
VERSION_STR = """XHTML2PDF/pisa %s (Build %s)
http://www.xhtml2pdf.com
Copyright 2010 Dirk Holtwick, holtwick.it
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.""" % (
VERSION,
BUILD,
)
| apache-2.0 |
toert/django-shop-template | myshop/urls.py | 1 | 1142 | """myshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^catalog/', include('catalog.urls', namespace='catalog')),
url(r'^cart/', include('cart.urls', namespace='cart')),
url(r'^order/', include('orders.urls', namespace='orders')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | mit |
Johnetordoff/osf.io | addons/github/api.py | 9 | 5246 | from future.moves.urllib.parse import urlencode
import github3
import cachecontrol
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
from addons.github import settings as github_settings
from addons.github.exceptions import NotFoundError
# Initialize caches
https_cache = cachecontrol.CacheControlAdapter()
default_adapter = HTTPAdapter()
class GitHubClient(object):
def __init__(self, external_account=None, access_token=None):
self.access_token = getattr(external_account, 'oauth_key', None) or access_token
if self.access_token:
self.gh3 = github3.login(token=self.access_token)
self.gh3.set_client_id(
github_settings.CLIENT_ID, github_settings.CLIENT_SECRET
)
else:
self.gh3 = github3.GitHub()
# Caching libary
if github_settings.CACHE:
self.gh3._session.mount('https://api.github.com/user', default_adapter)
self.gh3._session.mount('https://', https_cache)
def user(self, user=None):
"""Fetch a user or the authenticated user.
:param user: Optional GitHub user name; will fetch authenticated
user if omitted
:return dict: GitHub API response
"""
if user is None:
return self.gh3.me()
return self.gh3.user(user)
def repo(self, user, repo):
"""Get a single Github repo's info.
:param str user: GitHub user name
:param str repo: GitHub repo name
:return: Dict of repo information
See http://developer.github.com/v3/repos/#get
"""
try:
rv = self.gh3.repository(user, repo)
except ConnectionError:
raise NotFoundError
if rv:
return rv
raise NotFoundError
def repos(self):
repos = self.gh3.repositories(type='all', sort='pushed')
return [repo for repo in repos if repo.permissions['push']]
def create_repo(self, repo, **kwargs):
return self.gh3.create_repository(repo, **kwargs)
def branches(self, user, repo, branch=None):
"""List a repo's branches or get a single branch (in a list).
:param str user: GitHub user name
:param str repo: GitHub repo name
:param str branch: Branch name if getting a single branch
:return: List of branch dicts
http://developer.github.com/v3/repos/#list-branches
"""
if branch:
return [self.repo(user, repo).branch(branch)]
return self.repo(user, repo).branches() or []
# TODO: Test
def starball(self, user, repo, archive='tar', ref='master'):
"""Get link for archive download.
:param str user: GitHub user name
:param str repo: GitHub repo name
:param str archive: Archive format [tar|zip]
:param str ref: Git reference
:returns: tuple: Tuple of headers and file location
"""
# github3 archive method writes file to disk
repository = self.repo(user, repo)
url = repository._build_url(archive + 'ball', ref, base_url=repository._api)
resp = repository._get(url, allow_redirects=True, stream=True)
return resp.headers, resp.content
#########
# Hooks #
#########
def hooks(self, user, repo):
"""List webhooks
:param str user: GitHub user name
:param str repo: GitHub repo name
:return list: List of commit dicts from GitHub; see
http://developer.github.com/v3/repos/hooks/#json-http
"""
return self.repo(user, repo).hooks()
def add_hook(self, user, repo, name, config, events=None, active=True):
"""Create a webhook.
:param str user: GitHub user name
:param str repo: GitHub repo name
:return dict: Hook info from GitHub: see see
http://developer.github.com/v3/repos/hooks/#json-http
"""
try:
hook = self.repo(user, repo).create_hook(name, config, events, active)
except github3.GitHubError:
# TODO Handle this case - if '20 hooks' in e.errors[0].get('message'):
return None
else:
return hook
def delete_hook(self, user, repo, _id):
"""Delete a webhook.
:param str user: GitHub user name
:param str repo: GitHub repo name
:return bool: True if successful, False otherwise
:raises: NotFoundError if repo or hook cannot be located
"""
repo = self.repo(user, repo)
hook = repo.hook(_id)
if hook is None:
raise NotFoundError
return repo.hook(_id).delete()
########
# Auth #
########
def revoke_token(self):
if self.access_token:
return self.gh3.revoke_authorization(self.access_token)
def check_authorization(self):
return self.gh3.check_authorization(self.access_token)
def ref_to_params(branch=None, sha=None):
params = urlencode({
key: value
for key, value in {
'branch': branch,
'sha': sha,
}.items()
if value
})
if params:
return '?' + params
return ''
| apache-2.0 |
asmodehn/catkin | test/unit_tests/test_setup_util.py | 2 | 6374 | import os
import shutil
import sys
import tempfile
import unittest
from catkin_pkg.cmake import configure_file
data = configure_file(os.path.join(os.path.dirname(__file__), '..', '..', 'cmake', 'templates', '_setup_util.py.in'),
{
'CATKIN_LIB_ENVIRONMENT_PATHS': "'lib'",
'CATKIN_PKGCONFIG_ENVIRONMENT_PATHS': "os.path.join('lib', 'pkgconfig')",
'CATKIN_GLOBAL_BIN_DESTINATION': 'bin',
'PYTHON_EXECUTABLE': sys.executable,
'PYTHON_INSTALL_DIR': 'pythonX.Y/packages',
'CMAKE_PREFIX_PATH_AS_IS': '',
})
with tempfile.NamedTemporaryFile('w+') as setup_util_file:
setup_util_file.write(data)
setup_util_file.seek(0)
import imp
imp.load_source('setup_util', setup_util_file.name, setup_util_file.file)
import setup_util
from setup_util import _get_workspaces, _prefix_env_variable, _rollback_env_variable, CATKIN_MARKER_FILE
class SetupUtilTest(unittest.TestCase):
def test_get_reversed_workspaces(self):
try:
rootdir = tempfile.mkdtemp()
mock_env = {}
self.assertEqual([], _get_workspaces(mock_env))
self.assertEqual([], _get_workspaces(mock_env, 'foo'))
foows = os.path.join(rootdir, 'foo')
os.makedirs(foows)
with open(os.path.join(foows, CATKIN_MARKER_FILE), 'w') as fhand:
fhand.write('')
barws = os.path.join(rootdir, 'bar')
os.makedirs(barws)
with open(os.path.join(barws, CATKIN_MARKER_FILE), 'w') as fhand:
fhand.write('')
nows = os.path.join(rootdir, 'nows')
os.makedirs(nows)
mock_env = {'CMAKE_PREFIX_PATH': foows}
self.assertEqual([foows], _get_workspaces(mock_env))
mock_env = {'CMAKE_PREFIX_PATH': os.pathsep.join([nows, foows, barws, 'invalid'])}
self.assertEqual([foows, barws], _get_workspaces(mock_env))
finally:
shutil.rmtree(rootdir)
def test_prefix_env(self):
try:
rootdir = tempfile.mkdtemp()
foo_path = os.path.join(rootdir, 'foo')
os.makedirs(foo_path)
bar_path = os.path.join(rootdir, 'bar')
os.makedirs(bar_path)
baz_path = os.path.join(rootdir, 'baz')
bam_path = os.path.join(rootdir, 'bam')
lim_path = os.path.join(rootdir, 'lim')
os.makedirs(lim_path)
mock_env = {}
self.assertEqual('',
_prefix_env_variable(mock_env, 'varname', [], ''))
self.assertEqual(os.pathsep.join([foo_path, bar_path]),
_prefix_env_variable(mock_env, 'varname', [foo_path, bar_path, baz_path], ''))
mock_env = {'varname': os.pathsep.join([baz_path, bar_path, bam_path])}
self.assertEqual('',
_prefix_env_variable(mock_env, 'varname', [], ''))
self.assertEqual(foo_path + os.pathsep,
_prefix_env_variable(mock_env, 'varname', [foo_path, bar_path], ''))
self.assertEqual(os.pathsep.join([foo_path, lim_path]) + os.pathsep,
_prefix_env_variable(mock_env, 'varname', [foo_path, lim_path, foo_path, lim_path], ''))
finally:
shutil.rmtree(rootdir)
def test_remove_from_env(self):
altsep = os.path.altsep
try:
rootdir = tempfile.mkdtemp()
mock_env = {}
# foows
foows = os.path.join(rootdir, 'foo')
foolib = os.path.join(foows, 'lib') + '/'
os.makedirs(foows)
with open(os.path.join(foows, '.catkin'), 'w') as fhand:
fhand.write('')
# barws
barws = os.path.join(rootdir, 'bar')
barlib = os.path.join(barws, 'lib')
os.makedirs(barws)
with open(os.path.join(barws, '.catkin'), 'w') as fhand:
fhand.write('')
# mock_env with one ws in CPP
varname = 'varname'
wsvarname = 'workspaces'
mock_env = {varname: os.pathsep.join([foolib, barlib]),
'CMAKE_PREFIX_PATH': barws}
# since workspace foo is not in CMAKE_PREFIX_PATH, it remains in varname
self.assertEqual(foolib, _rollback_env_variable(mock_env, varname, ['/lib']))
# mock_env with both ws in CPP
mock_env = {varname: os.pathsep.join([foolib, barlib]),
wsvarname: os.pathsep.join([foows, barws]),
'CMAKE_PREFIX_PATH': os.pathsep.join([foows, barws])}
self.assertEqual(None, _rollback_env_variable(mock_env, varname, ['']))
self.assertEqual(None, _rollback_env_variable(mock_env, varname, ['nolib']))
self.assertEqual(None, _rollback_env_variable(mock_env, varname, ['/nolib']))
self.assertEqual('', _rollback_env_variable(mock_env, varname, ['lib']))
self.assertEqual('', _rollback_env_variable(mock_env, varname, ['/lib']))
self.assertEqual(None, _rollback_env_variable(mock_env, varname, ['']))
self.assertEqual('', _rollback_env_variable(mock_env, wsvarname, ['']))
# nows: not a workspace
nows = os.path.join(rootdir, 'nows')
nowslib = os.path.join(nows, 'lib')
nowslib = os.path.join(nows, 'include')
os.makedirs(nows)
mock_env = {'varname': os.pathsep.join([foolib, nowslib, barlib, foolib]),
'CMAKE_PREFIX_PATH': os.pathsep.join([foows, barws])}
# checks nows/lib remains, and second mention of foolib
self.assertEqual(os.pathsep.join([nowslib, foolib]), _rollback_env_variable(mock_env, 'varname', ['/lib']))
self.assertEqual(os.pathsep.join([nowslib, foolib]), _rollback_env_variable(mock_env, 'varname', ['lib']))
# windows pathsep
os.path.altsep = '\\'
self.assertEqual(os.pathsep.join([nowslib, foolib]), _rollback_env_variable(mock_env, 'varname', ['\\lib']))
finally:
os.path.altsep = altsep
shutil.rmtree(rootdir)
| bsd-3-clause |
cherylyli/stress-aid | env/lib/python3.5/site-packages/markupsafe/tests.py | 674 | 6107 | # -*- coding: utf-8 -*-
import gc
import sys
import unittest
from markupsafe import Markup, escape, escape_silent
from markupsafe._compat import text_type
class MarkupTestCase(unittest.TestCase):
def test_adding(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
def test_string_interpolation(self):
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
assert Markup('%i') % 3.14 == '3'
assert Markup('%.2f') % 3.14 == '3.14'
def test_type_behavior(self):
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
def test_html_interop(self):
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
__str__ = __unicode__
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
def test_tuple_interpol(self):
self.assertEqual(Markup('<em>%s:%s</em>') % (
'<foo>',
'<bar>',
), Markup(u'<em><foo>:<bar></em>'))
def test_dict_interpol(self):
self.assertEqual(Markup('<em>%(foo)s</em>') % {
'foo': '<foo>',
}, Markup(u'<em><foo></em>'))
self.assertEqual(Markup('<em>%(foo)s:%(bar)s</em>') % {
'foo': '<foo>',
'bar': '<bar>',
}, Markup(u'<em><foo>:<bar></em>'))
def test_escaping(self):
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_formatting(self):
for actual, expected in (
(Markup('%i') % 3.14, '3'),
(Markup('%.2f') % 3.14159, '3.14'),
(Markup('%s %s %s') % ('<', 123, '>'), '< 123 >'),
(Markup('<em>{awesome}</em>').format(awesome='<awesome>'),
'<em><awesome></em>'),
(Markup('{0[1][bar]}').format([0, {'bar': '<bar/>'}]),
'<bar/>'),
(Markup('{0[1][bar]}').format([0, {'bar': Markup('<bar/>')}]),
'<bar/>')):
assert actual == expected, "%r should be %r!" % (actual, expected)
# This is new in 2.7
if sys.version_info >= (2, 7):
def test_formatting_empty(self):
formatted = Markup('{}').format(0)
assert formatted == Markup('0')
def test_custom_formatting(self):
class HasHTMLOnly(object):
def __html__(self):
return Markup('<foo>')
class HasHTMLAndFormat(object):
def __html__(self):
return Markup('<foo>')
def __html_format__(self, spec):
return Markup('<FORMAT>')
assert Markup('{0}').format(HasHTMLOnly()) == Markup('<foo>')
assert Markup('{0}').format(HasHTMLAndFormat()) == Markup('<FORMAT>')
def test_complex_custom_formatting(self):
class User(object):
def __init__(self, id, username):
self.id = id
self.username = username
def __html_format__(self, format_spec):
if format_spec == 'link':
return Markup('<a href="/user/{0}">{1}</a>').format(
self.id,
self.__html__(),
)
elif format_spec:
raise ValueError('Invalid format spec')
return self.__html__()
def __html__(self):
return Markup('<span class=user>{0}</span>').format(self.username)
user = User(1, 'foo')
assert Markup('<p>User: {0:link}').format(user) == \
Markup('<p>User: <a href="/user/1"><span class=user>foo</span></a>')
def test_all_set(self):
import markupsafe as markup
for item in markup.__all__:
getattr(markup, item)
def test_escape_silent(self):
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent('<foo>') == Markup(u'<foo>')
def test_splitting(self):
self.assertEqual(Markup('a b').split(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a b').rsplit(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a\nb').splitlines(), [
Markup('a'),
Markup('b')
])
def test_mul(self):
self.assertEqual(Markup('a') * 3, Markup('aaa'))
class MarkupLeakTestCase(unittest.TestCase):
def test_markup_leaks(self):
counts = set()
for count in range(20):
for item in range(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
# vim:sts=4:sw=4:et:
| mit |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/whoosh/lang/snowball/bases.py | 96 | 4874 | # Base classes
class _ScandinavianStemmer(object):
"""
This subclass encapsulates a method for defining the string region R1.
It is used by the Danish, Norwegian, and Swedish stemmer.
"""
def _r1_scandinavian(self, word, vowels):
"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
:param word: The word whose region R1 is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region R1.
:type vowels: unicode
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses DanishStemmer, NorwegianStemmer, and
SwedishStemmer. It is not to be invoked directly!
"""
r1 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i - 1] in vowels:
if len(word[:i + 1]) < 3 and len(word[:i + 1]) > 0:
r1 = word[3:]
elif len(word[:i + 1]) >= 3:
r1 = word[i + 1:]
else:
return word
break
return r1
class _StandardStemmer(object):
"""
This subclass encapsulates two methods for defining the standard versions
of the string regions R1, R2, and RV.
"""
def _r1r2_standard(self, word, vowels):
"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
:param word: The word whose regions R1 and R2 are determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
:type vowels: unicode
:return: (r1,r2), the regions R1 and R2 for the respective word.
:rtype: tuple
:note: This helper method is invoked by the respective stem method of
the subclasses DutchStemmer, FinnishStemmer,
FrenchStemmer, GermanStemmer, ItalianStemmer,
PortugueseStemmer, RomanianStemmer, and SpanishStemmer.
It is not to be invoked directly!
:note: A detailed description of how to define R1 and R2
can be found at http://snowball.tartarus.org/texts/r1r2.html
"""
r1 = ""
r2 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i - 1] in vowels:
r1 = word[i + 1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i - 1] in vowels:
r2 = r1[i + 1:]
break
return (r1, r2)
def _rv_standard(self, word, vowels):
"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
:param word: The word whose region RV is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region RV.
:type vowels: unicode
:return: the region RV for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses ItalianStemmer, PortugueseStemmer,
RomanianStemmer, and SpanishStemmer. It is not to be
invoked directly!
"""
rv = ""
if len(word) >= 2:
if word[1] not in vowels:
for i in range(2, len(word)):
if word[i] in vowels:
rv = word[i + 1:]
break
elif word[:2] in vowels:
for i in range(2, len(word)):
if word[i] not in vowels:
rv = word[i + 1:]
break
else:
rv = word[3:]
return rv
| apache-2.0 |
Ziemin/telepathy-gabble | tests/twisted/muc/chat-states.py | 2 | 5219 | """
Regression test for <https://bugs.freedesktop.org/show_bug.cgi?id=32952>,
wherein chat states in MUCs were misparsed, and MUC chat states in general.
"""
from servicetest import assertEquals, assertLength, EventPattern
from gabbletest import exec_test, elem, make_muc_presence, sync_stream
from mucutil import join_muc_and_check
import ns
import constants as cs
MUC = '[email protected]'
BOB = MUC + '/bob'
def get_state_notification(stanza):
for x in stanza.elements():
if x.uri == ns.CHAT_STATES:
return x
return None
def check_state_notification(elem, name, allow_body=False):
assertEquals('message', elem.name)
assertEquals('groupchat', elem['type'])
notification = get_state_notification(elem)
assert notification is not None, elem.toXml()
assert notification.name == name, notification.toXml()
if not allow_body:
assert len(elem.children) == 1, elem.toXml()
def test(q, bus, conn, stream):
(chan, user, bob) = join_muc_and_check(q, bus, conn, stream,
MUC)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(bob, cs.CHAT_STATE_INACTIVE))
stream.send(
elem('message', from_=BOB, to='test@localhost/Resource',
type='groupchat', jid='[email protected]')(
elem(ns.CHAT_STATES, 'composing'),
elem('google:nosave', 'x', value='disabled'),
elem('http://jabber.org/protocol/archive', 'record', otr='false'),
))
e = q.expect('dbus-signal', signal='ChatStateChanged')
contact, state = e.args
assertEquals(bob, contact)
assertEquals(cs.CHAT_STATE_COMPOSING, state)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
assertEquals(cs.CHAT_STATE_COMPOSING,
states.get(bob, cs.CHAT_STATE_INACTIVE))
stream.send(
elem('message', from_=BOB, to='test@localhost/Resource',
type='groupchat', jid='[email protected]')(
elem(ns.CHAT_STATES, 'paused'),
elem('google:nosave', 'x', value='disabled'),
elem('http://jabber.org/protocol/archive', 'record', otr='false'),
))
e = q.expect('dbus-signal', signal='ChatStateChanged')
contact, state = e.args
assertEquals(bob, contact)
assertEquals(cs.CHAT_STATE_PAUSED, state)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
assertEquals(cs.CHAT_STATE_PAUSED,
states.get(bob, cs.CHAT_STATE_INACTIVE))
# Bob leaves
presence = make_muc_presence('owner', 'none', MUC, 'bob')
presence['type'] = 'unavailable'
stream.send(presence)
e = q.expect('dbus-signal', signal='ChatStateChanged')
contact, state = e.args
assertEquals(bob, contact)
assertEquals(cs.CHAT_STATE_GONE, state)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
# Bob no longer has any chat state at all
assertEquals(None, states.get(bob, None))
# Sending chat states:
# Composing...
chan.ChatState.SetChatState(cs.CHAT_STATE_COMPOSING)
stream_message = q.expect('stream-message')
check_state_notification(stream_message.stanza, 'composing')
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_COMPOSING,
states.get(user, cs.CHAT_STATE_INACTIVE))
# XEP 0085:
# every content message SHOULD contain an <active/> notification.
chan.send_msg_sync('hi.')
stream_message = q.expect('stream-message')
stanza = stream_message.stanza
check_state_notification(stanza, 'active', allow_body=True)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_ACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
bodies = list(stanza.elements(uri=ns.CLIENT, name='body'))
assertLength(1, bodies)
assertEquals(u'hi.', bodies[0].children[0])
# If we get an error with type='wait', stop sending chat states.
stanza['type'] = 'error'
stanza['from'] = MUC
stanza['to'] = 'test@localhost/Resource'
error = stanza.addElement('error')
error['type'] = 'wait'
error.addElement((ns.STANZA, 'resource-constraint'))
stream.send(stanza)
q.expect('dbus-signal', signal='MessageReceived',
predicate=lambda e: e.args[0][0]['message-type'] == cs.MT_DELIVERY_REPORT)
q.forbid_events([
EventPattern('stream-message', to=MUC,
predicate=lambda e: get_state_notification(e.stanza) is not None)
])
# User starts typing again but nothing should be seen or heard on the stream.
chan.ChatState.SetChatState(cs.CHAT_STATE_COMPOSING)
sync_stream(q, stream)
if __name__ == '__main__':
exec_test(test)
| lgpl-2.1 |
hushaoqing/my_notes | Python/Scrapy/mytest/testScrapyGraphite/settings.py | 1 | 3248 | # -*- coding: utf-8 -*-
# Scrapy settings for mytest project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'testScrapyGraphite'
SPIDER_MODULES = ['testScrapyGraphite.spiders']
NEWSPIDER_MODULE = 'testScrapyGraphite.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mytest (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'mytest.middlewares.MytestSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'mytest.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'mytest.pipelines.MytestPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
STATS_CLASS = 'testScrapyGraphite.spiders.stat_redis.GraphiteStatsCollector'
| mit |
rodrigods/keystone | keystone/token/controllers.py | 2 | 19819 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import sys
from keystoneclient.common import cms
from oslo.utils import timeutils
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.token import provider
CONF = config.CONF
LOG = log.getLogger(__name__)
class ExternalAuthNotApplicable(Exception):
"""External authentication is not applicable."""
pass
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'token_api', 'token_provider_api', 'trust_api')
class Auth(controller.V2Controller):
@controller.v2_deprecated
def ca_cert(self, context, auth=None):
ca_file = open(CONF.signing.ca_certs, 'r')
data = ca_file.read()
ca_file.close()
return data
@controller.v2_deprecated
def signing_cert(self, context, auth=None):
cert_file = open(CONF.signing.certfile, 'r')
data = cert_file.read()
cert_file.close()
return data
@controller.v2_deprecated
def authenticate(self, context, auth=None):
"""Authenticate credentials and return a token.
Accept auth as a dict that looks like::
{
"auth":{
"passwordCredentials":{
"username":"test_user",
"password":"mypass"
},
"tenantName":"customer-x"
}
}
In this case, tenant is optional, if not provided the token will be
considered "unscoped" and can later be used to get a scoped token.
Alternatively, this call accepts auth with only a token and tenant
that will return a token that is scoped to that tenant.
"""
if auth is None:
raise exception.ValidationError(attribute='auth',
target='request body')
if "token" in auth:
# Try to authenticate using a token
auth_info = self._authenticate_token(
context, auth)
else:
# Try external authentication
try:
auth_info = self._authenticate_external(
context, auth)
except ExternalAuthNotApplicable:
# Try local authentication
auth_info = self._authenticate_local(
context, auth)
user_ref, tenant_ref, metadata_ref, expiry, bind = auth_info
# Validate that the auth info is valid and nothing is disabled
try:
self.identity_api.assert_user_enabled(
user_id=user_ref['id'], user=user_ref)
self.assignment_api.assert_domain_enabled(
domain_id=user_ref['domain_id'])
if tenant_ref:
self.assignment_api.assert_project_enabled(
project_id=tenant_ref['id'], project=tenant_ref)
except AssertionError as e:
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
# NOTE(morganfainberg): Make sure the data is in correct form since it
# might be consumed external to Keystone and this is a v2.0 controller.
# The user_ref is encoded into the auth_token_data which is returned as
# part of the token data. The token provider doesn't care about the
# format.
user_ref = self.v3_to_v2_user(user_ref)
if tenant_ref:
tenant_ref = self.filter_domain_id(tenant_ref)
auth_token_data = self._get_auth_token_data(user_ref,
tenant_ref,
metadata_ref,
expiry)
if tenant_ref:
catalog_ref = self.catalog_api.get_catalog(
user_ref['id'], tenant_ref['id'], metadata_ref)
else:
catalog_ref = {}
auth_token_data['id'] = 'placeholder'
if bind:
auth_token_data['bind'] = bind
roles_ref = []
for role_id in metadata_ref.get('roles', []):
role_ref = self.assignment_api.get_role(role_id)
roles_ref.append(dict(name=role_ref['name']))
(token_id, token_data) = self.token_provider_api.issue_v2_token(
auth_token_data, roles_ref=roles_ref, catalog_ref=catalog_ref)
# NOTE(wanghong): We consume a trust use only when we are using trusts
# and have successfully issued a token.
if CONF.trust.enabled and 'trust_id' in auth:
self.trust_api.consume_use(auth['trust_id'])
return token_data
def _authenticate_token(self, context, auth):
"""Try to authenticate using an already existing token.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
if 'token' not in auth:
raise exception.ValidationError(
attribute='token', target='auth')
if "id" not in auth['token']:
raise exception.ValidationError(
attribute="id", target="token")
old_token = auth['token']['id']
if len(old_token) > CONF.max_token_size:
raise exception.ValidationSizeError(attribute='token',
size=CONF.max_token_size)
try:
old_token_ref = self.token_api.get_token(old_token)
except exception.NotFound as e:
raise exception.Unauthorized(e)
wsgi.validate_token_bind(context, old_token_ref)
# A trust token cannot be used to get another token
if 'trust' in old_token_ref:
raise exception.Forbidden()
if 'trust_id' in old_token_ref['metadata']:
raise exception.Forbidden()
user_ref = old_token_ref['user']
user_id = user_ref['id']
tenant_id = self._get_project_id_from_auth(auth)
if not CONF.trust.enabled and 'trust_id' in auth:
raise exception.Forbidden('Trusts are disabled.')
elif CONF.trust.enabled and 'trust_id' in auth:
trust_ref = self.trust_api.get_trust(auth['trust_id'])
if trust_ref is None:
raise exception.Forbidden()
if user_id != trust_ref['trustee_user_id']:
raise exception.Forbidden()
if (trust_ref['project_id'] and
tenant_id != trust_ref['project_id']):
raise exception.Forbidden()
if ('expires' in trust_ref) and (trust_ref['expires']):
expiry = trust_ref['expires']
if expiry < timeutils.parse_isotime(timeutils.isotime()):
raise exception.Forbidden()()
user_id = trust_ref['trustor_user_id']
trustor_user_ref = self.identity_api.get_user(
trust_ref['trustor_user_id'])
if not trustor_user_ref['enabled']:
raise exception.Forbidden()()
trustee_user_ref = self.identity_api.get_user(
trust_ref['trustee_user_id'])
if not trustee_user_ref['enabled']:
raise exception.Forbidden()()
if trust_ref['impersonation'] is True:
current_user_ref = trustor_user_ref
else:
current_user_ref = trustee_user_ref
else:
current_user_ref = self.identity_api.get_user(user_id)
metadata_ref = {}
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = old_token_ref['expires']
if CONF.trust.enabled and 'trust_id' in auth:
trust_id = auth['trust_id']
trust_roles = []
for role in trust_ref['roles']:
if 'roles' not in metadata_ref:
raise exception.Forbidden()()
if role['id'] in metadata_ref['roles']:
trust_roles.append(role['id'])
else:
raise exception.Forbidden()
if 'expiry' in trust_ref and trust_ref['expiry']:
trust_expiry = timeutils.parse_isotime(trust_ref['expiry'])
if trust_expiry < expiry:
expiry = trust_expiry
metadata_ref['roles'] = trust_roles
metadata_ref['trustee_user_id'] = trust_ref['trustee_user_id']
metadata_ref['trust_id'] = trust_id
bind = old_token_ref.get('bind')
return (current_user_ref, tenant_ref, metadata_ref, expiry, bind)
def _authenticate_local(self, context, auth):
"""Try to authenticate against the identity backend.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
if 'passwordCredentials' not in auth:
raise exception.ValidationError(
attribute='passwordCredentials', target='auth')
if "password" not in auth['passwordCredentials']:
raise exception.ValidationError(
attribute='password', target='passwordCredentials')
password = auth['passwordCredentials']['password']
if password and len(password) > CONF.identity.max_password_length:
raise exception.ValidationSizeError(
attribute='password', size=CONF.identity.max_password_length)
if ("userId" not in auth['passwordCredentials'] and
"username" not in auth['passwordCredentials']):
raise exception.ValidationError(
attribute='username or userId',
target='passwordCredentials')
user_id = auth['passwordCredentials'].get('userId')
if user_id and len(user_id) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='userId',
size=CONF.max_param_size)
username = auth['passwordCredentials'].get('username', '')
if username:
if len(username) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='username',
size=CONF.max_param_size)
try:
user_ref = self.identity_api.get_user_by_name(
username, CONF.identity.default_domain_id)
user_id = user_ref['id']
except exception.UserNotFound as e:
raise exception.Unauthorized(e)
try:
user_ref = self.identity_api.authenticate(
context,
user_id=user_id,
password=password)
except AssertionError as e:
raise exception.Unauthorized(e.args[0])
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = provider.default_expire_time()
return (user_ref, tenant_ref, metadata_ref, expiry, None)
def _authenticate_external(self, context, auth):
"""Try to authenticate an external user via REMOTE_USER variable.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
environment = context.get('environment', {})
if not environment.get('REMOTE_USER'):
raise ExternalAuthNotApplicable()
# NOTE(jamielennox): xml and json differ and get confused about what
# empty auth should look like so just reset it.
if not auth:
auth = {}
username = environment['REMOTE_USER']
try:
user_ref = self.identity_api.get_user_by_name(
username, CONF.identity.default_domain_id)
user_id = user_ref['id']
except exception.UserNotFound as e:
raise exception.Unauthorized(e)
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = provider.default_expire_time()
bind = None
if ('kerberos' in CONF.token.bind and
environment.get('AUTH_TYPE', '').lower() == 'negotiate'):
bind = {'kerberos': username}
return (user_ref, tenant_ref, metadata_ref, expiry, bind)
def _get_auth_token_data(self, user, tenant, metadata, expiry):
return dict(user=user,
tenant=tenant,
metadata=metadata,
expires=expiry)
def _get_project_id_from_auth(self, auth):
"""Extract tenant information from auth dict.
Returns a valid tenant_id if it exists, or None if not specified.
"""
tenant_id = auth.get('tenantId')
if tenant_id and len(tenant_id) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='tenantId',
size=CONF.max_param_size)
tenant_name = auth.get('tenantName')
if tenant_name and len(tenant_name) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='tenantName',
size=CONF.max_param_size)
if tenant_name:
try:
tenant_ref = self.assignment_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
tenant_id = tenant_ref['id']
except exception.ProjectNotFound as e:
raise exception.Unauthorized(e)
return tenant_id
def _get_project_roles_and_ref(self, user_id, tenant_id):
"""Returns the project roles for this user, and the project ref."""
tenant_ref = None
role_list = []
if tenant_id:
try:
tenant_ref = self.assignment_api.get_project(tenant_id)
role_list = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
except exception.ProjectNotFound:
pass
if not role_list:
msg = _('User %(u_id)s is unauthorized for tenant %(t_id)s')
msg = msg % {'u_id': user_id, 't_id': tenant_id}
LOG.warning(msg)
raise exception.Unauthorized(msg)
return (tenant_ref, role_list)
def _get_token_ref(self, token_id, belongs_to=None):
"""Returns a token if a valid one exists.
Optionally, limited to a token owned by a specific tenant.
"""
data = self.token_api.get_token(token_id)
if belongs_to:
if data.get('tenant') is None:
raise exception.Unauthorized(
_('Token does not belong to specified tenant.'))
if data['tenant'].get('id') != belongs_to:
raise exception.Unauthorized(
_('Token does not belong to specified tenant.'))
return data
@controller.v2_deprecated
@controller.protected()
def validate_token_head(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Identical to ``validate_token``, except does not return a response.
The code in ``keystone.common.wsgi.render_response`` will remove
the content body.
"""
# TODO(ayoung) validate against revocation API
belongs_to = context['query_string'].get('belongsTo')
return self.token_provider_api.validate_v2_token(token_id, belongs_to)
@controller.v2_deprecated
@controller.protected()
def validate_token(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Returns metadata about the token along any associated roles.
"""
belongs_to = context['query_string'].get('belongsTo')
# TODO(ayoung) validate against revocation API
return self.token_provider_api.validate_v2_token(token_id, belongs_to)
@controller.v2_deprecated
def delete_token(self, context, token_id):
"""Delete a token, effectively invalidating it for authz."""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
self.token_provider_api.revoke_token(token_id)
@controller.v2_deprecated
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if expires and isinstance(expires, datetime.datetime):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
@controller.v2_deprecated
def endpoints(self, context, token_id):
"""Return a list of endpoints available to the token."""
self.assert_admin(context)
token_ref = self._get_token_ref(token_id)
catalog_ref = None
if token_ref.get('tenant'):
catalog_ref = self.catalog_api.get_catalog(
token_ref['user']['id'],
token_ref['tenant']['id'],
token_ref['metadata'])
return Auth.format_endpoint_list(catalog_ref)
@classmethod
def format_endpoint_list(cls, catalog_ref):
"""Formats a list of endpoints according to Identity API v2.
The v2.0 API wants an endpoint list to look like::
{
'endpoints': [
{
'id': $endpoint_id,
'name': $SERVICE[name],
'type': $SERVICE,
'tenantId': $tenant_id,
'region': $REGION,
}
],
'endpoints_links': [],
}
"""
if not catalog_ref:
return {}
endpoints = []
for region_name, region_ref in six.iteritems(catalog_ref):
for service_type, service_ref in six.iteritems(region_ref):
endpoints.append({
'id': service_ref.get('id'),
'name': service_ref.get('name'),
'type': service_type,
'region': region_name,
'publicURL': service_ref.get('publicURL'),
'internalURL': service_ref.get('internalURL'),
'adminURL': service_ref.get('adminURL'),
})
return {'endpoints': endpoints, 'endpoints_links': []}
| apache-2.0 |
rahul-c1/scrapy | scrapy/contrib/downloadermiddleware/robotstxt.py | 15 | 1856 | """
This is a middleware to respect robots.txt policies. To activate it you must
enable this middleware and enable the ROBOTSTXT_OBEY setting.
"""
import robotparser
from scrapy import signals, log
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
class RobotsTxtMiddleware(object):
DOWNLOAD_PRIORITY = 1000
def __init__(self, crawler):
if not crawler.settings.getbool('ROBOTSTXT_OBEY'):
raise NotConfigured
self.crawler = crawler
self._useragent = crawler.settings.get('USER_AGENT')
self._parsers = {}
self._spider_netlocs = set()
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def process_request(self, request, spider):
useragent = self._useragent
rp = self.robot_parser(request, spider)
if rp and not rp.can_fetch(useragent, request.url):
log.msg(format="Forbidden by robots.txt: %(request)s",
level=log.DEBUG, request=request)
raise IgnoreRequest
def robot_parser(self, request, spider):
url = urlparse_cached(request)
netloc = url.netloc
if netloc not in self._parsers:
self._parsers[netloc] = None
robotsurl = "%s://%s/robots.txt" % (url.scheme, url.netloc)
robotsreq = Request(robotsurl, priority=self.DOWNLOAD_PRIORITY)
dfd = self.crawler.engine.download(robotsreq, spider)
dfd.addCallback(self._parse_robots)
self._spider_netlocs.add(netloc)
return self._parsers[netloc]
def _parse_robots(self, response):
rp = robotparser.RobotFileParser(response.url)
rp.parse(response.body.splitlines())
self._parsers[urlparse_cached(response).netloc] = rp
| bsd-3-clause |
crazy-cat/incubator-mxnet | python/mxnet/module/bucketing_module.py | 20 | 20458 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-instance-attributes, too-many-arguments, protected-access
# pylint: disable=too-many-public-methods
"""A `BucketingModule` implement the `BaseModule` API, and allows multiple
symbols to be used depending on the `bucket_key` provided by each different
mini-batch of data.
"""
import logging
import warnings
from .. import context as ctx
from ..initializer import Uniform
from .base_module import BaseModule, _check_input_names
from .module import Module
class BucketingModule(BaseModule):
"""This module helps to deal efficiently with varying-length inputs.
Parameters
----------
sym_gen : function
A function when called with a bucket key, returns a triple
``(symbol, data_names, label_names)``.
default_bucket_key : str (or any python object)
The key for the default bucket.
logger : Logger
context : Context or list of Context
Defaults to ``mx.cpu()``
work_load_list : list of number
Defaults to ``None``, indicating uniform workload.
fixed_param_names: list of str
Defaults to ``None``, indicating no network parameters are fixed.
state_names : list of str
States are similar to data and label, but not provided by data iterator.
Instead they are initialized to 0 and can be set by set_states()
"""
def __init__(self, sym_gen, default_bucket_key=None, logger=logging,
context=ctx.cpu(), work_load_list=None,
fixed_param_names=None, state_names=None):
super(BucketingModule, self).__init__(logger=logger)
assert default_bucket_key is not None
self._default_bucket_key = default_bucket_key
self._sym_gen = sym_gen
symbol, data_names, label_names = sym_gen(default_bucket_key)
data_names = list(data_names) if data_names is not None else []
label_names = list(label_names) if label_names is not None else []
state_names = list(state_names) if state_names is not None else []
fixed_param_names = list(fixed_param_names) if fixed_param_names is not None else []
_check_input_names(symbol, data_names, "data", True)
_check_input_names(symbol, label_names, "label", False)
_check_input_names(symbol, state_names, "state", True)
_check_input_names(symbol, fixed_param_names, "fixed_param", True)
self._fixed_param_names = fixed_param_names
self._state_names = state_names
self._context = context
self._work_load_list = work_load_list
self._buckets = {}
self._curr_module = None
self._curr_bucket_key = None
self._params_dirty = False
def _reset_bind(self):
"""Internal utility function to reset binding."""
self.binded = False
self._buckets = {}
self._curr_module = None
self._curr_bucket_key = None
@property
def data_names(self):
"""A list of names for data required by this module."""
if self.binded:
return self._curr_module.data_names
else:
_, data_names, _ = self._sym_gen(self._default_bucket_key)
return data_names
@property
def output_names(self):
"""A list of names for the outputs of this module."""
if self.binded:
return self._curr_module.output_names
else:
symbol, _, _ = self._sym_gen(self._default_bucket_key)
return symbol.list_outputs()
@property
def data_shapes(self):
"""Get data shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._curr_module.data_shapes
@property
def label_shapes(self):
"""Get label shapes.
Returns
-------
A list of `(name, shape)` pairs.
The return value could be ``None`` if the module does not need labels,
or if the module is not bound for training (in this case, label information
is not available).
"""
assert self.binded
return self._curr_module.label_shapes
@property
def output_shapes(self):
"""Gets output shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._curr_module.output_shapes
def get_params(self):
"""Gets current parameters.
Returns
-------
`(arg_params, aux_params)`
A pair of dictionaries each mapping parameter names to NDArray values.
"""
assert self.binded and self.params_initialized
self._curr_module._params_dirty = self._params_dirty
params = self._curr_module.get_params()
self._params_dirty = False
return params
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
"""Assigns parameters and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to value (`NDArray`) mapping.
aux_params : dict
Dictionary of name to value (`NDArray`) mapping.
allow_missing : bool
If true, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If true, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
if not allow_missing:
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init)
return
if self.params_initialized and not force_init:
warnings.warn("Parameters already initialized and force_init=False. "
"set_params call ignored.", stacklevel=2)
return
self._curr_module.set_params(arg_params, aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
# because we didn't update self._arg_params, they are dirty now.
self._params_dirty = True
self.params_initialized = True
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Defaults to ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Defaults to ``None``. Existing auxiliary states. This has higher priority
than `initializer`.
allow_missing : bool
Allow missing values in `arg_params` and `aux_params` (if not ``None``).
In this case, missing values will be filled with `initializer`.
force_init : bool
Defaults to ``False``.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
if self.params_initialized and not force_init:
return
assert self.binded, 'call bind before initializing the parameters'
self._curr_module.init_params(initializer=initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
self._params_dirty = False
self.params_initialized = True
def get_states(self, merge_multi_context=True):
"""Gets states from all devices.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the states
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArrays or list of list of NDArrays
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
"""
assert self.binded and self.params_initialized
return self._curr_module.get_states(merge_multi_context=merge_multi_context)
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of states & values can be specified.
Parameters
----------
states : list of list of NDArrays
Source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
self._curr_module.set_states(states, value)
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binding for a `BucketingModule` means setting up the buckets and binding the
executor for the default bucket key. Executors corresponding to other keys are
bound afterwards with `switch_bucket`.
Parameters
----------
data_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
label_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
for_training : bool
Default is ``True``.
inputs_need_grad : bool
Default is ``False``.
force_rebind : bool
Default is ``False``.
shared_module : BucketingModule
Default is ``None``. This value is currently not used.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
bucket_key : str (or any python object)
bucket key for binding. by default use the default_bucket_key
"""
# in case we already initialized params, keep it
if self.params_initialized:
arg_params, aux_params = self.get_params()
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already bound, ignoring bind()')
return
assert shared_module is None, 'shared_module for BucketingModule is not supported'
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
symbol, data_names, label_names = self._sym_gen(self._default_bucket_key)
module = Module(symbol, data_names, label_names, logger=self.logger,
context=self._context, work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names)
module.bind(data_shapes, label_shapes, for_training, inputs_need_grad,
force_rebind=False, shared_module=None, grad_req=grad_req)
self._curr_module = module
self._curr_bucket_key = self._default_bucket_key
self._buckets[self._default_bucket_key] = module
# copy back saved params, if already initialized
if self.params_initialized:
self.set_params(arg_params, aux_params)
def switch_bucket(self, bucket_key, data_shapes, label_shapes=None):
"""Switches to a different bucket. This will change ``self.curr_module``.
Parameters
----------
bucket_key : str (or any python object)
The key of the target bucket.
data_shapes : list of (str, tuple)
Typically ``data_batch.provide_data``.
label_shapes : list of (str, tuple)
Typically ``data_batch.provide_label``.
"""
assert self.binded, 'call bind before switching bucket'
if not bucket_key in self._buckets:
symbol, data_names, label_names = self._sym_gen(bucket_key)
module = Module(symbol, data_names, label_names,
logger=self.logger, context=self._context,
work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names)
module.bind(data_shapes, label_shapes, self._curr_module.for_training,
self._curr_module.inputs_need_grad,
force_rebind=False, shared_module=self._buckets[self._default_bucket_key])
self._buckets[bucket_key] = module
self._curr_module = self._buckets[bucket_key]
self._curr_bucket_key = bucket_key
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),),
force_init=False):
"""Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Defaults to `'local'`.
optimizer : str or Optimizer
Defaults to `'sgd'`
optimizer_params : dict
Defaults to `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Defaults to ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring.')
return
self._curr_module.init_optimizer(kvstore, optimizer, optimizer_params,
force_init=force_init)
for mod in self._buckets.values():
if mod is not self._curr_module:
mod.borrow_optimizer(self._curr_module)
self.optimizer_initialized = True
def prepare(self, data_batch):
"""Prepares a data batch for forward.
Parameters
----------
data_batch : DataBatch
"""
# perform bind if haven't done so
assert self.binded and self.params_initialized
bucket_key = data_batch.bucket_key
original_bucket_key = self._curr_bucket_key
data_shapes = data_batch.provide_data
label_shapes = data_batch.provide_label
self.switch_bucket(bucket_key, data_shapes, label_shapes)
# switch back
self.switch_bucket(original_bucket_key, None, None)
def forward(self, data_batch, is_train=None):
"""Forward computation.
Parameters
----------
data_batch : DataBatch
is_train : bool
Defaults to ``None``, in which case `is_train` is take as ``self.for_training``.
"""
assert self.binded and self.params_initialized
self.switch_bucket(data_batch.bucket_key, data_batch.provide_data,
data_batch.provide_label)
self._curr_module.forward(data_batch, is_train=is_train)
def backward(self, out_grads=None):
"""Backward computation."""
assert self.binded and self.params_initialized
self._curr_module.backward(out_grads=out_grads)
def update(self):
"""Updates parameters according to installed optimizer and the gradient computed
in the previous forward-backward cycle.
"""
assert self.binded and self.params_initialized and self.optimizer_initialized
self._params_dirty = True
self._curr_module.update()
def get_outputs(self, merge_multi_context=True):
"""Gets outputs from a previous forward computation.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of numpy arrays or list of list of numpy arrays
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are numpy arrays.
"""
assert self.binded and self.params_initialized
return self._curr_module.get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArrays or list of list of NDArrays
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
"""
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._curr_module.get_input_grads(merge_multi_context=merge_multi_context)
def update_metric(self, eval_metric, labels):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
"""
assert self.binded and self.params_initialized
self._curr_module.update_metric(eval_metric, labels)
@property
def symbol(self):
"""The symbol of the current bucket being used."""
assert self.binded
return self._curr_module.symbol
def install_monitor(self, mon):
"""Installs monitor on all executors """
assert self.binded
for mod in self._buckets.values():
mod.install_monitor(mon)
| apache-2.0 |
HyperBaton/ansible | lib/ansible/modules/cloud/univention/udm_group.py | 37 | 4839 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright: (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: udm_group
version_added: "2.2"
author:
- Tobias Rüetschi (@keachi)
short_description: Manage of the posix group
description:
- "This module allows to manage user groups on a univention corporate server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the group is present or not.
name:
required: true
description:
- Name of the posix group.
description:
required: false
description:
- Group description.
position:
required: false
description:
- define the whole ldap position of the group, e.g.
C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
ou:
required: false
description:
- LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
subpath:
required: false
description:
- Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
'''
EXAMPLES = '''
# Create a POSIX group
- udm_group:
name: g123m-1A
# Create a POSIX group with the exact DN
# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
- udm_group:
name: g123m-1A
subpath: 'cn=classes,cn=students,cn=groups'
ou: school
# or
- udm_group:
name: g123m-1A
position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
'''
RETURN = '''# '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True,
type='str'),
description=dict(default=None,
type='str'),
position=dict(default='',
type='str'),
ou=dict(default='',
type='str'),
subpath=dict(default='cn=groups',
type='str'),
state=dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True
)
name = module.params['name']
description = module.params['description']
position = module.params['position']
ou = module.params['ou']
subpath = module.params['subpath']
state = module.params['state']
changed = False
diff = None
groups = list(ldap_search(
'(&(objectClass=posixGroup)(cn={0}))'.format(name),
attr=['cn']
))
if position != '':
container = position
else:
if ou != '':
ou = 'ou={0},'.format(ou)
if subpath != '':
subpath = '{0},'.format(subpath)
container = '{0}{1}{2}'.format(subpath, ou, base_dn())
group_dn = 'cn={0},{1}'.format(name, container)
exists = bool(len(groups))
if state == 'present':
try:
if not exists:
grp = umc_module_for_add('groups/group', container)
else:
grp = umc_module_for_edit('groups/group', group_dn)
grp['name'] = name
grp['description'] = description
diff = grp.diff()
changed = grp.diff() != []
if not module.check_mode:
if not exists:
grp.create()
else:
grp.modify()
except Exception:
module.fail_json(
msg="Creating/editing group {0} in {1} failed".format(name, container)
)
if state == 'absent' and exists:
try:
grp = umc_module_for_edit('groups/group', group_dn)
if not module.check_mode:
grp.remove()
changed = True
except Exception:
module.fail_json(
msg="Removing group {0} failed".format(name)
)
module.exit_json(
changed=changed,
name=name,
diff=diff,
container=container
)
if __name__ == '__main__':
main()
| gpl-3.0 |
google-research/language | language/tek_representations/utils/mrqa_official_eval.py | 1 | 3638 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Official evaluation script for the MRQA Workshop Shared Task.
Adapted fromt the SQuAD v1.1 official evaluation script.
Usage:
python official_eval.py dataset_file.jsonl.gz prediction_file.json
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import json
import re
import string
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
"""Computes the token-level F1 score from the ground truth."""
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = collections.Counter(prediction_tokens) & collections.Counter(
ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def read_predictions(prediction_file):
with open(prediction_file) as f:
predictions = json.load(f)
return predictions
def read_answers(gold_file):
answers = {}
with gzip.open(gold_file, 'rb') as f:
for i, line in enumerate(f):
example = json.loads(line)
if i == 0 and 'header' in example:
continue
for qa in example['qas']:
answers[qa['qid']] = qa['answers']
return answers
def evaluate(answers, predictions, skip_no_answer=False):
"""Evaluates EM/F1 of predictions given answers."""
f1 = exact_match = total = 0
for qid, ground_truths in answers.items():
if qid not in predictions:
if not skip_no_answer:
message = 'Unanswered question %s will receive score 0.' % qid
print(message)
total += 1
continue
total += 1
prediction = predictions[qid]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction,
ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
| apache-2.0 |
MostlyOpen/odoo_addons_jcafb | myo_lab_test_cst/models/lab_test_result_urina.py | 1 | 4485 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
# from datetime import datetime
from openerp import fields, models
class LabTestResultUrina(models.Model):
_name = "myo.lab_test.result.urina"
_log_access = False
person_code = fields.Char(string='Person Code', required=True)
address_code = fields.Char(string='Address Code')
lab_test_code = fields.Char(string='Lab Test Code')
lab_test_type = fields.Char(string='Lab Test Type')
gender = fields.Char(string='Gender')
age = fields.Char(string='Age')
person_category = fields.Char(string='Person Category')
person_status = fields.Char(string='Person Status')
address_city = fields.Char(string='Cidade')
address_category = fields.Char(string='Address Category')
address_ditrict = fields.Char(string='Address District')
EUR_02_01 = fields.Char(
string='EUR-02-01',
help='Volume'
)
EUR_02_02 = fields.Char(
string='EUR-02-02',
help='Densidade'
)
EUR_02_03 = fields.Char(
string='EUR-02-03',
help='Aspecto'
)
EUR_02_04 = fields.Char(
string='EUR-02-04',
help='Cor'
)
EUR_02_05 = fields.Char(
string='EUR-02-05',
help='Odor'
)
EUR_03_01 = fields.Char(
string='EUR-03-01',
help='ph'
)
EUR_03_02 = fields.Char(
string='EUR-03-02',
help='Proteínas'
)
EUR_03_03 = fields.Char(
string='EUR-03-03',
help='Glicose'
)
EUR_03_04 = fields.Char(
string='EUR-03-04',
help='Cetona'
)
EUR_03_05 = fields.Char(
string='EUR-03-05',
help='Pigmentos biliares'
)
EUR_03_06 = fields.Char(
string='EUR-03-06',
help='Sangue'
)
EUR_03_07 = fields.Char(
string='EUR-03-07',
help='Urobilinogênio'
)
EUR_03_08 = fields.Char(
string='EUR-03-08',
help='Nitrito'
)
EUR_04_01 = fields.Char(
string='EUR-04-01',
help='Células Epiteliais'
)
EUR_04_02 = fields.Char(
string='EUR-04-02',
help='Muco'
)
EUR_04_03 = fields.Char(
string='EUR-04-03',
help='Cristais'
)
EUR_04_04 = fields.Char(
string='EUR-04-04',
help='Leucócitos'
)
EUR_04_05 = fields.Char(
string='EUR-04-05',
help='Hemácias'
)
EUR_04_06 = fields.Char(
string='EUR-04-06',
help='Cilindros'
)
EUR_04_07 = fields.Char(
string='EUR-04-07',
help='Cilindros Hialinos'
)
EUR_04_08 = fields.Char(
string='EUR-04-08',
help='Cilindros Granulosos'
)
EUR_04_09 = fields.Char(
string='EUR-04-09',
help='Cilindros Leucocitários'
)
EUR_04_10 = fields.Char(
string='EUR-04-10',
help='Cilindros Hemáticos'
)
EUR_04_11 = fields.Char(
string='EUR-04-11',
help='Cilindros Céreos'
)
EUR_04_12 = fields.Char(
string='EUR-04-12',
help='Outros tipos de Cilindros'
)
EUR_05_01 = fields.Char(
string='EUR-05-01',
help='Observações'
)
notes = fields.Text(string='Notes')
active = fields.Boolean(
'Active',
help="If unchecked, it will allow you to hide the lab test result urina without removing it.",
default=1
)
_sql_constraints = [
(
'person_code_uniq',
'UNIQUE (person_code)',
'Error! The Person Code must be unique!'
),
]
_rec_name = 'person_code'
_order = 'person_code'
| agpl-3.0 |
systemd/casync | doc/conf.py | 1 | 3578 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: LGPL-2.1+
#
# casync documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 20 16:46:39 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'casync'
author = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'casyncdoc'
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('casync', 'casync', 'casync Documentation',
[], 1)
]
| lgpl-2.1 |
oikoumene/wcc.api | wcc/api/api.py | 1 | 2196 | # -*- coding: utf-8 -*-
from plone.jsonapi.routes import add_plone_route
# CRUD
from plone.jsonapi.routes.api import get_items
from plone.jsonapi.routes.api import create_items
from plone.jsonapi.routes.api import update_items
from plone.jsonapi.routes.api import delete_items
from plone.jsonapi.routes.api import url_for
# GET
@add_plone_route("/wccdocument", "wccdocument", methods=["GET"])
@add_plone_route("/wccdocument/<string:uid>", "wccdocument", methods=["GET"])
def get(context, request, uid=None):
""" get wccdocument
"""
items = get_items("wcc.document.document", request, uid=uid, endpoint="wccdocument")
return {
"url": url_for("wccdocument"),
"count": len(items),
"items": items,
}
# CREATE
@add_plone_route("/wccdocument/create", "wccdocument_create", methods=["POST"])
@add_plone_route("/wccdocument/create/<string:uid>", "wccdocument_create", methods=["POST"])
def create(context, request, uid=None):
""" create wccdocument
"""
items = create_items("wcc.document.document", request, uid=uid, endpoint="wccdocument")
return {
"url": url_for("wccdocument_create"),
"count": len(items),
"items": items,
}
# UPDATE
@add_plone_route("/wccdocument/update", "wccdocument_update", methods=["POST"])
@add_plone_route("/wccdocument/update/<string:uid>", "wccdocument_update", methods=["POST"])
def update(context, request, uid=None):
""" update wccdocument
"""
items = update_items("wcc.document.document", request, uid=uid, endpoint="wccdocument")
return {
"url": url_for("wccdocument_update"),
"count": len(items),
"items": items,
}
# DELETE
@add_plone_route("/wccdocument/delete", "wccdocument_delete", methods=["POST"])
@add_plone_route("/wccdocument/delete/<string:uid>", "wccdocument_delete", methods=["POST"])
def delete(context, request, uid=None):
""" delete wccdocument
"""
items = delete_items("wcc.document.document", request, uid=uid, endpoint="wccdocument")
return {
"url": url_for("wccdocument_delete"),
"count": len(items),
"items": items,
}
# vim: set ft=python ts=4 sw=4 expandtab :
| lgpl-3.0 |
TalShafir/ansible | lib/ansible/modules/cloud/docker/docker_image_facts.py | 7 | 7425 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_image_facts
short_description: Inspect docker images
version_added: "2.1.0"
description:
- Provide one or more image names, and the module will inspect each, returning an array of inspection results.
options:
name:
description:
- An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
image IDs can be used.
required: true
extends_documentation_fragment:
- docker
requirements:
- "python >= 2.6"
- "docker-py >= 1.8.0"
- "Please note that the L(docker-py,https://pypi.org/project/docker-py/) Python
module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
install the C(docker) Python module. Note that both modules should I(not)
be installed at the same time. Also note that when both modules are installed
and one of them is uninstalled, the other might no longer function and a
reinstall of it is required."
- "Docker API >= 1.20"
author:
- Chris Houseknecht (@chouseknecht)
'''
EXAMPLES = '''
- name: Inspect a single image
docker_image_facts:
name: pacur/centos-7
- name: Inspect multiple images
docker_image_facts:
name:
- pacur/centos-7
- sinatra
'''
RETURN = '''
images:
description: Facts for the selected images.
returned: always
type: dict
sample: [
{
"Architecture": "amd64",
"Author": "",
"Comment": "",
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/etc/docker/registry/config.yml"
],
"Domainname": "",
"Entrypoint": [
"/bin/registry"
],
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"5000/tcp": {}
},
"Hostname": "e5c68db50333",
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
"Labels": {},
"OnBuild": [],
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/var/lib/registry": {}
},
"WorkingDir": ""
},
"Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
"ContainerConfig": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/bin/sh",
"-c",
'#(nop) CMD ["/etc/docker/registry/config.yml"]'
],
"Domainname": "",
"Entrypoint": [
"/bin/registry"
],
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"5000/tcp": {}
},
"Hostname": "e5c68db50333",
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
"Labels": {},
"OnBuild": [],
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/var/lib/registry": {}
},
"WorkingDir": ""
},
"Created": "2016-03-08T21:08:15.399680378Z",
"DockerVersion": "1.9.1",
"GraphDriver": {
"Data": null,
"Name": "aufs"
},
"Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
"Name": "registry:2",
"Os": "linux",
"Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
"RepoDigests": [],
"RepoTags": [
"registry:2"
],
"Size": 0,
"VirtualSize": 165808884
}
]
'''
try:
from docker import utils
except ImportError:
# missing docker-py handled in ansible.module_utils.docker_common
pass
from ansible.module_utils.docker_common import AnsibleDockerClient, DockerBaseClass, is_image_name_id
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
self.name = self.client.module.params.get('name')
self.log("Gathering facts for images: %s" % (str(self.name)))
if self.name:
self.results['images'] = self.get_facts()
else:
self.results['images'] = self.get_all_images()
def fail(self, msg):
self.client.fail(msg)
def get_facts(self):
'''
Lookup and inspect each image name found in the names parameter.
:returns array of image dictionaries
'''
results = []
names = self.name
if not isinstance(names, list):
names = [names]
for name in names:
if is_image_name_id(name):
self.log('Fetching image %s (ID)' % (name))
image = self.client.find_image_by_id(name)
else:
repository, tag = utils.parse_repository_tag(name)
if not tag:
tag = 'latest'
self.log('Fetching image %s:%s' % (repository, tag))
image = self.client.find_image(name=repository, tag=tag)
if image:
results.append(image)
return results
def get_all_images(self):
results = []
images = self.client.images()
for image in images:
try:
inspection = self.client.inspect_image(image['Id'])
except Exception as exc:
self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
results.append(inspection)
return results
def main():
argument_spec = dict(
name=dict(type='list'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
min_docker_api_version='1.20',
)
results = dict(
changed=False,
images=[]
)
ImageManager(client, results)
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
Menpiko/SnaPKernel-N | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
mahak/neutron | neutron/tests/unit/services/logapi/drivers/test_manager.py | 2 | 7789 | # Copyright (c) 2017 Fujitsu Limited
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.callbacks import events
from neutron_lib import exceptions
from neutron_lib import fixture
from neutron_lib.services.logapi import constants as log_const
from neutron.services.logapi.common import exceptions as log_exc
from neutron.services.logapi.drivers import base as log_driver_base
from neutron.services.logapi.drivers import manager as driver_mgr
from neutron.tests import tools
from neutron.tests.unit.services.logapi import base
class TestGetParameter(base.BaseLogTestCase):
def test__get_param_missing_parameter(self):
kwargs = {'context': mock.sentinel.context}
self.assertRaises(log_exc.LogapiDriverException,
driver_mgr._get_param,
args=[], kwargs=kwargs,
name='log_obj', index=1)
self.assertRaises(log_exc.LogapiDriverException,
driver_mgr._get_param,
args=[mock.sentinel.context], kwargs={},
name='log_obj', index=1)
self.assertRaises(log_exc.LogapiDriverException,
driver_mgr._get_param,
args=[], kwargs={'log_obj': mock.sentinel.log_obj},
name='context', index=0)
class TestLogDriversManagerBase(base.BaseLogTestCase):
def setUp(self):
super(TestLogDriversManagerBase, self).setUp()
self.config_parse()
self.setup_coreplugin(load_plugins=False)
@staticmethod
def _create_manager_with_drivers(drivers_details):
for name, driver_details in drivers_details.items():
class LogDriver(log_driver_base.DriverBase):
@property
def is_loaded(self):
return driver_details['is_loaded']
LogDriver(name,
driver_details.get('vif_types', []),
driver_details.get('vnic_types', []),
driver_details.get('supported_logging_types', []))
return driver_mgr.LoggingServiceDriverManager()
class TestLogDriversManagerMulti(TestLogDriversManagerBase):
"""Test calls happen to all drivers"""
def test_driver_manager_empty_with_no_drivers(self):
driver_manager = self._create_manager_with_drivers({})
self.assertEqual(0, len(driver_manager.drivers))
def test_driver_manager_empty_with_no_loaded_drivers(self):
driver_manager = self._create_manager_with_drivers(
{'driver-A': {'is_loaded': False}})
self.assertEqual(0, len(driver_manager.drivers))
def test_driver_manager_with_one_loaded_driver(self):
driver_manager = self._create_manager_with_drivers(
{'driver-A': {'is_loaded': True}})
self.assertEqual(1, len(driver_manager.drivers))
def test_driver_manager_with_two_loaded_drivers(self):
driver_manager = self._create_manager_with_drivers(
{'driver-A': {'is_loaded': True},
'driver-B': {'is_loaded': True}})
self.assertEqual(2, len(driver_manager.drivers))
class TestLogDriversManagerLoggingTypes(TestLogDriversManagerBase):
"""Test supported logging types"""
def test_available_logging_types(self):
driver_manager = self._create_manager_with_drivers(
{'driver-A': {'is_loaded': True,
'supported_logging_types': ['security_group']},
'driver-B': {'is_loaded': True,
'supported_logging_types':
['security_group', 'firewall']}
})
self.assertEqual(set(['security_group', 'firewall']),
driver_manager.supported_logging_types)
class TestLogDriversCalls(TestLogDriversManagerBase):
"""Test log driver calls"""
def setUp(self):
super(TestLogDriversCalls, self).setUp()
self.driver_manager = self._create_manager_with_drivers(
{'driver-A': {'is_loaded': True}})
def test_implemented_call_methods(self):
for method in log_const.LOG_CALL_METHODS:
with mock.patch.object(log_driver_base.DriverBase, method) as \
method_fnc:
context = mock.sentinel.context
log_obj = mock.sentinel.log_obj
self.driver_manager.call(
method, context=context, log_objs=[log_obj])
method_fnc.assert_called_once_with(
context=context, log_objs=[log_obj])
def test_not_implemented_call_methods(self):
context = mock.sentinel.context
log_obj = mock.sentinel.log_obj
self.assertRaises(exceptions.DriverCallError, self.driver_manager.call,
'wrong_method', context=context, log_objs=[log_obj])
class TestHandleResourceCallback(TestLogDriversManagerBase):
"""Test handle resource callback"""
def setUp(self):
super(TestHandleResourceCallback, self).setUp()
self._cb_mgr = mock.Mock()
self.useFixture(fixture.CallbackRegistryFixture(
callback_manager=self._cb_mgr))
self.driver_manager = driver_mgr.LoggingServiceDriverManager()
def test_subscribe_resources_cb(self):
class FakeResourceCB1(driver_mgr.ResourceCallBackBase):
def handle_event(self, resource, event, trigger, **kwargs):
pass
class FakeResourceCB2(driver_mgr.ResourceCallBackBase):
def handle_event(self, resource, event, trigger, **kwargs):
pass
driver_mgr.RESOURCE_CB_CLASS_MAP = {'fake_resource1': FakeResourceCB1,
'fake_resource2': FakeResourceCB2}
self.driver_manager._setup_resources_cb_handle()
fake_resource_cb1 = FakeResourceCB1(
'fake_resource1', self.driver_manager.call)
fake_resource_cb2 = FakeResourceCB2(
'fake_resource2', self.driver_manager.call)
assert_calls = [
mock.call(
*tools.get_subscribe_args(
fake_resource_cb1.handle_event,
'fake_resource1', events.AFTER_CREATE)),
mock.call(
*tools.get_subscribe_args(
fake_resource_cb1.handle_event,
'fake_resource1', events.AFTER_UPDATE)),
mock.call(
*tools.get_subscribe_args(
fake_resource_cb1.handle_event,
'fake_resource1', events.AFTER_DELETE)),
mock.call(
*tools.get_subscribe_args(
fake_resource_cb2.handle_event,
'fake_resource2', events.AFTER_CREATE)),
mock.call(
*tools.get_subscribe_args(
fake_resource_cb2.handle_event,
'fake_resource2', events.AFTER_UPDATE)),
mock.call(
*tools.get_subscribe_args(
fake_resource_cb2.handle_event,
'fake_resource2', events.AFTER_DELETE)),
]
self._cb_mgr.subscribe.assert_has_calls(assert_calls)
| apache-2.0 |
wwj718/edx-platform | common/test/acceptance/tests/discussion/helpers.py | 46 | 4061 | """
Helper functions and classes for discussion tests.
"""
from uuid import uuid4
import json
from ...fixtures import LMS_BASE_URL
from ...fixtures.course import CourseFixture
from ...fixtures.discussion import (
SingleThreadViewFixture,
Thread,
Response,
)
from ...pages.lms.discussion import DiscussionTabSingleThreadPage
from ...tests.helpers import UniqueCourseTest
class BaseDiscussionMixin(object):
"""
A mixin containing methods common to discussion tests.
"""
def setup_thread(self, num_responses, **thread_kwargs):
"""
Create a test thread with the given number of responses, passing all
keyword arguments through to the Thread fixture, then invoke
setup_thread_page.
"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, **thread_kwargs)
)
for i in range(num_responses):
thread_fixture.addResponse(Response(id=str(i), body=str(i)))
thread_fixture.push()
self.setup_thread_page(thread_id)
return thread_id
class CohortTestMixin(object):
"""
Mixin for tests of cohorted courses
"""
def setup_cohort_config(self, course_fixture, auto_cohort_groups=None):
"""
Sets up the course to use cohorting with the given list of auto_cohort_groups.
If auto_cohort_groups is None, no auto cohorts are set.
"""
course_fixture._update_xblock(course_fixture._course_location, {
"metadata": {
u"cohort_config": {
"auto_cohort_groups": auto_cohort_groups or [],
"cohorted_discussions": [],
"cohorted": True,
},
},
})
def disable_cohorting(self, course_fixture):
"""
Disables cohorting for the current course fixture.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/settings' # pylint: disable=protected-access
data = json.dumps({'is_cohorted': False})
response = course_fixture.session.patch(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to disable cohorts")
def add_manual_cohort(self, course_fixture, cohort_name):
"""
Adds a cohort by name, returning its ID.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/'
data = json.dumps({"name": cohort_name, 'assignment_type': 'manual'})
response = course_fixture.session.post(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to create cohort")
return response.json()['id']
def add_user_to_cohort(self, course_fixture, username, cohort_id):
"""
Adds a user to the specified cohort.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + "/cohorts/{}/add".format(cohort_id)
data = {"users": username}
course_fixture.headers['Content-type'] = 'application/x-www-form-urlencoded'
response = course_fixture.session.post(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to add user to cohort")
class BaseDiscussionTestCase(UniqueCourseTest):
def setUp(self):
super(BaseDiscussionTestCase, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fixture = CourseFixture(**self.course_info)
self.course_fixture.add_advanced_settings(
{'discussion_topics': {'value': {'Test Discussion Topic': {'id': self.discussion_id}}}}
)
self.course_fixture.install()
def create_single_thread_page(self, thread_id):
"""
Sets up a `DiscussionTabSingleThreadPage` for a given
`thread_id`.
"""
return DiscussionTabSingleThreadPage(self.browser, self.course_id, self.discussion_id, thread_id)
| agpl-3.0 |
groschovskiy/keyczar | cpp/src/tools/swtoolkit/site_scons/site_tools/environment_tools.py | 11 | 10726 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up tools for environments for for software construction toolkit.
This module is a SCons tool which should be include in all environments. It
will automatically be included by the component_setup tool.
"""
import os
import SCons
#------------------------------------------------------------------------------
def FilterOut(self, **kw):
"""Removes values from existing construction variables in an Environment.
The values to remove should be a list. For example:
self.FilterOut(CPPDEFINES=['REMOVE_ME', 'ME_TOO'])
Args:
self: Environment to alter.
kw: (Any other named arguments are values to remove).
"""
kw = SCons.Environment.copy_non_reserved_keywords(kw)
for key, val in kw.items():
envval = self.get(key, None)
if envval is None:
# No existing variable in the environment, so nothing to delete.
continue
for vremove in val:
# Use while not if, so we can handle duplicates.
while vremove in envval:
envval.remove(vremove)
self[key] = envval
# TODO: SCons.Environment.Append() has much more logic to deal with various
# types of values. We should handle all those cases in here too. (If
# variable is a dict, etc.)
#------------------------------------------------------------------------------
def Overlap(self, values1, values2):
"""Checks for overlap between the values.
Args:
self: Environment to use for variable substitution.
values1: First value(s) to compare. May be a string or list of strings.
values2: Second value(s) to compare. May be a string or list of strings.
Returns:
The list of values in common after substitution, or an empty list if
the values do not overlap.
Converts the values to a set of plain strings via self.SubstList2() before
comparison, so SCons $ variables are evaluated.
"""
set1 = set(self.SubstList2(values1))
set2 = set(self.SubstList2(values2))
return list(set1.intersection(set2))
#------------------------------------------------------------------------------
def ApplySConscript(self, sconscript_file):
"""Applies a SConscript to the current environment.
Args:
self: Environment to modify.
sconscript_file: Name of SConscript file to apply.
Returns:
The return value from the call to SConscript().
ApplySConscript() should be used when an existing SConscript which sets up an
environment gets too large, or when there is common setup between multiple
environments which can't be reduced into a parent environment which the
multiple child environments Clone() from. The latter case is necessary
because env.Clone() only enables single inheritance for environments.
ApplySConscript() is NOT intended to replace the Tool() method. If you need
to add methods or builders to one or more environments, do that as a tool
(and write unit tests for them).
ApplySConscript() is equivalent to the following SCons call:
SConscript(sconscript_file, exports={'env':self})
The called SConscript should import the 'env' variable to get access to the
calling environment:
Import('env')
Changes made to env in the called SConscript will be applied to the
environment calling ApplySConscript() - that is, env in the called SConscript
is a reference to the calling environment.
If you need to export multiple variables to the called SConscript, or return
variables from it, use the existing SConscript() function.
"""
return self.SConscript(sconscript_file, exports={'env': self})
#------------------------------------------------------------------------------
def BuildSConscript(self, sconscript_file):
"""Builds a SConscript based on the current environment.
Args:
self: Environment to clone and pass to the called SConscript.
sconscript_file: Name of SConscript file to build. If this is a directory,
this method will look for sconscript_file+'/build.scons', and if that
is not found, sconscript_file+'/SConscript'.
Returns:
The return value from the call to SConscript().
BuildSConscript() should be used when an existing SConscript which builds a
project gets too large, or when a group of SConscripts are logically related
but should not directly affect each others' environments (for example, a
library might want to build a number of unit tests which exist in
subdirectories, but not allow those tests' SConscripts to affect/pollute the
library's environment.
BuildSConscript() is NOT intended to replace the Tool() method. If you need
to add methods or builders to one or more environments, do that as a tool
(and write unit tests for them).
BuildSConscript() is equivalent to the following SCons call:
SConscript(sconscript_file, exports={'env':self.Clone()})
or if sconscript_file is a directory:
SConscript(sconscript_file+'/build.scons', exports={'env':self.Clone()})
The called SConscript should import the 'env' variable to get access to the
calling environment:
Import('env')
Changes made to env in the called SConscript will NOT be applied to the
environment calling BuildSConscript() - that is, env in the called SConscript
is a clone/copy of the calling environment, not a reference to that
environment.
If you need to export multiple variables to the called SConscript, or return
variables from it, use the existing SConscript() function.
"""
# Need to look for the source node, since by default SCons will look for the
# entry in the variant_dir, which won't exist (and thus won't be a directory
# or a file). This isn't a problem in BuildComponents(), since the variant
# dir is only set inside its call to SConscript().
if self.Entry(sconscript_file).srcnode().isdir():
# Building a subdirectory, so look for build.scons or SConscript
script_file = sconscript_file + '/build.scons'
if not self.File(script_file).srcnode().exists():
script_file = sconscript_file + '/SConscript'
else:
script_file = sconscript_file
self.SConscript(script_file, exports={'env': self.Clone()})
#------------------------------------------------------------------------------
def SubstList2(self, *args):
"""Replacement subst_list designed for flags/parameters, not command lines.
Args:
self: Environment context.
args: One or more strings or lists of strings.
Returns:
A flattened, substituted list of strings.
SCons's built-in subst_list evaluates (substitutes) variables in its
arguments, and returns a list of lists (one per positional argument). Since
it is designed for use in command line expansion, the list items are
SCons.Subst.CmdStringHolder instances. These instances can't be passed into
env.File() (or subsequent calls to env.subst(), either). The returned
nested lists also need to be flattened via env.Flatten() before the caller
can iterate over the contents.
SubstList2() does a subst_list, flattens the result, then maps the flattened
list to strings.
It is better to do:
for x in env.SubstList2('$MYPARAMS'):
than to do:
for x in env.get('MYPARAMS', []):
and definitely better than:
for x in env['MYPARAMS']:
which will throw an exception if MYPARAMS isn't defined.
"""
return map(str, self.Flatten(self.subst_list(args)))
#------------------------------------------------------------------------------
def RelativePath(self, source, target, sep=os.sep, source_is_file=False):
"""Calculates the relative path from source to target.
Args:
self: Environment context.
source: Source path or node.
target: Target path or node.
sep: Path separator to use in returned relative path.
source_is_file: If true, calculates the relative path from the directory
containing the source, rather than the source itself. Note that if
source is a node, you can pass in source.dir instead, which is shorter.
Returns:
The relative path from source to target.
"""
# Split source and target into list of directories
source = self.Entry(str(source))
if source_is_file:
source = source.dir
source = source.abspath.split(os.sep)
target = self.Entry(str(target)).abspath.split(os.sep)
# Handle source and target identical
if source == target:
if source_is_file:
return source[-1] # Bare filename
else:
return '.' # Directory pointing to itself
# TODO: Handle UNC paths and drive letters (fine if they're the same, but if
# they're different, there IS no relative path)
# Remove common elements
while source and target and source[0] == target[0]:
source.pop(0)
target.pop(0)
# Join the remaining elements
return sep.join(['..'] * len(source) + target)
#------------------------------------------------------------------------------
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
# Add methods to environment
env.AddMethod(ApplySConscript)
env.AddMethod(BuildSConscript)
env.AddMethod(FilterOut)
env.AddMethod(Overlap)
env.AddMethod(RelativePath)
env.AddMethod(SubstList2)
| apache-2.0 |
smips/Temporary_Insanity | TI/src/TI.py | 1 | 4468 | import libtcodpy as libtcod
import sys
from time import sleep
import os, math, random
sys.path.insert(0, os.path.realpath(__file__).replace("TI.py","World"))
sys.path.insert(0, os.path.realpath(__file__).replace("TI.py","Engine"))
sys.path.insert(0, os.path.realpath(__file__).replace("TI.py","Scripts"))
import GameObject,Tile,DataGrinder,Actor,Prop,Camera,ScriptHandler
import Map
DEBUG = 1
game_iteration = 0
objects = []
#actual size of the window
SCREEN_WIDTH = 80
SCREEN_HEIGHT = 50
DISPLAY_WIDTH = 60
DISPLAY_HEIGHT = 50
FOV_RECOMPUTE = True
LIMIT_FPS = 60 #60 frames-per-second maximum
FPS_CONSOLE = libtcod.console_new(10,1)
def dprint(arg):
global DEBUG
if DEBUG:
print(arg)
def handle_keys():
global player, map, FOV_RECOMPUTE, enemy
key = libtcod.console_check_for_keypress(True)
if key.vk == libtcod.KEY_ENTER and key.lalt:
#Alt+Enter: toggle fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
elif key.vk == libtcod.KEY_ESCAPE:
return True #exit game
#Call a test script
elif key.vk == libtcod.KEY_KP5:
enemy.tick(map)
FOV_RECOMPUTE = True
#movement keys
if key.vk == (libtcod.KEY_KP8):
player.move(0,-1, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP2):
player.move(0,1, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP4):
player.move(-1,0, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP6):
player.move(1,0, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP7):
player.move(-1,-1, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP9):
player.move(1,-1, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP1):
player.move(-1,1, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP3):
player.move(1,1, map)
FOV_RECOMPUTE = True
if key.vk != libtcod.KEY_NONE:
return False
def update():
pass
def render():
global map, camera, player, SCREEN_WIDTH, SCREEN_HEIGHT, FOV_RECOMPUTE
camera.move_camera(player.x, player.y, map.width, map.height)
libtcod.console_set_default_foreground(0, libtcod.white)
temp_player_x, temp_player_y = camera.to_camera_coordinates(player.x, player.y)
if FOV_RECOMPUTE:
libtcod.console_clear(0)
libtcod.map_compute_fov(map.fov_map, player.x, player.y, 7, True, 1)
FOV_RECOMPUTE = False
for x in range(DISPLAY_WIDTH):
for y in range(DISPLAY_HEIGHT):
(map_x, map_y) = (camera.x + x, camera.y + y)
distance = get_distance(player.x, map_x, player.y, map_y)
map.map[map_x][map_y].draw(camera, map, distance)
libtcod.console_print(FPS_CONSOLE, 0, 0, 'FPS: ' + str(libtcod.sys_get_fps()))
libtcod.console_blit(FPS_CONSOLE, 0, 0, 10, 1, 0, 0, 0)
libtcod.console_flush()
def get_distance(x1, x2, y1, y2):
dx = x2 - x1
dy = y2 - y1
return int(math.sqrt(dx ** 2 + dy ** 2))
def tick():
global map, FOV_RECOMPUTE
for object in map.objects:
if object.name == 'Player':
FOV_RECOMPUTE = object.tick(map)
else:
object.tick(map)
#############################################
# Initialization & Main Loop
#############################################
dprint('Initialization started')
libtcod.console_set_custom_font('assets/arial10x10.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'python/libtcod tutorial', False)
libtcod.sys_set_fps(LIMIT_FPS)
map = Map.Map(1)
player = Actor.Actor(map.rooms[0].center.x, map.rooms[0].center.y, 1, map)
enemy = Actor.Actor(map.rooms[0].center.x + 2, map.rooms[0].center.y + 2, 2, map)
map.objects.append(player)
map.objects.append(enemy)
camera = Camera.Camera(player.x, player.y, DISPLAY_WIDTH, DISPLAY_HEIGHT)
dprint('Initialization complete')
while not libtcod.console_is_window_closed():
#Tick is currently causing the game to run in real time......... FIX ASAP!!!
update()
render()
tick()
exit = False
#exit game if needed
if exit:
break
| mit |
HubLot/PBxplore | pbxplore/tests/test_regression.py | 2 | 21493 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Regression tests for PBxplore.
This test suite run the various PBxplore programs with various argument, and
makes sure the output is the expected one. The aim is to check that the
programs are not broken during development.
Be careful this test suite does not test that the output is right. It just
test that the output is the expected one based on a previous version.
"""
# Use print as a function like in python 3
from os import path
from uuid import uuid1
from functools import wraps
import os
import subprocess
import shutil
import sys
import pytest
import MDAnalysis
import matplotlib
try:
import weblogo
IS_WEBLOGO = True
except ImportError:
IS_WEBLOGO = False
here = os.path.abspath(os.path.dirname(__file__))
# Resources for the tests are stored in the following directory
REFDIR = os.path.join(here, "test_data/")
class TemplateTestCase(object):
"""
Template TestCase class for the other TestCase class to inherit from.
Children class must overload the `_build_command_line` and the
`_validate_output` methods.
"""
def _run_program_and_validate(self, out_run_dir, reference, **kwargs):
"""
Run the program to test and validate its outputs.
"""
# Build the command line to run. This relies on the _build_command_line
# method that is a virtual method, which must be overloaded by the
# child class.
command = self._build_command_line(str(out_run_dir), **kwargs)
print(command)
# Run the command.
exe = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = exe.communicate()
print(out.decode('utf-8'))
print(err.decode('utf-8'))
# The return code should be 0.
assert exe.returncode == 0, 'Program exited with a {} code.'.format(
exe.returncode)
# Validate the output files. This relies on the _validate_output
# virtual method.
self._validate_output(str(out_run_dir), reference, **kwargs)
def _build_command_line(self, **kwargs):
"""
Build the command line to run.
This is a virtual method. It must be overloaded by the child class.
"""
raise NotImplementedError
def _validate_output(self, reference, **kwargs):
"""
Validate the output files.
This is a virtual method. It must be overloaded by the child class.
"""
raise NotImplementedError
class TestPBAssign(TemplateTestCase):
"""
Regression tests for PBAssign.py
"""
references = ["1BTA", "1AY7", "2LFU", "3ICH"]
extensions = [".pdb", ".cif.gz"]
def _run_PBassign(self, out_run_dir, pdbid, extension,
multiple=None, indir=REFDIR):
"""
Run a PBxplore program on a PDBID with the given options.
`options` is expected to be a list that will be directly passed to
subprocess, it must not contain the input or output options.
"""
if multiple is None:
test_input = path.join(REFDIR, pdbid + extension)
out_basename = path.join(out_run_dir, pdbid)
input_args = ['-p', test_input]
else:
input_args = []
for basename in pdbid:
input_args += ['-p', path.join(REFDIR, basename + extension)]
out_basename = path.join(out_run_dir, multiple)
run_list = (['PBassign'] + input_args + ['-o', out_basename + extension])
exe = subprocess.Popen(run_list,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = exe.communicate()
print(out.decode('utf-8'))
print(err.decode('utf-8'))
return exe.returncode, out_run_dir
def _test_PBassign_options(self, out_run_dir, basename, extension, outfiles,
multiple=None, expected_exit=0):
out_run_dir = str(out_run_dir)
if multiple is not None:
out_name = multiple
status, out_run_dir = self._run_PBassign(out_run_dir, basename, extension, multiple)
assert status == expected_exit, \
'PBassign stoped with a {0} exit code'.format(status)
assert len(os.listdir(out_run_dir)) == len(outfiles),\
('PBassign did not produced the right number of files: '
'{0} files produced instead of {1}').format(
len(os.listdir(out_run_dir)), len(outfiles))
out_name = basename if multiple is None else multiple
for outfile in (template.format(out_name + extension)
for template in outfiles):
test_file = path.join(out_run_dir, outfile)
ref_file = path.join(REFDIR, outfile)
_assert_identical_files(test_file, ref_file)
@pytest.mark.parametrize('reference', references)
@pytest.mark.parametrize('extension', extensions)
def test_fasta(self, tmpdir, reference, extension):
"""
Run PBAssign on PDB files, and check the fasta output.
"""
self._test_PBassign_options(tmpdir, reference, extension,
['{0}.PB.fasta'])
@pytest.mark.parametrize('extension', extensions)
def test_multiple_inputs(self, tmpdir, extension):
"""
Run PBassign with multiple inputs.
"""
self._test_PBassign_options(tmpdir, self.references, extension,
['{0}.PB.fasta'], multiple='all')
def test_xtc_input(self, tmpdir):
"""
Run PBassign on a trajectory in the XTC format.
This test should produce the righ output with python 2. With python 3,
PBassign should fail as MDanalysis is not available.
"""
name = 'barstar_md_traj'
out_run_dir = str(tmpdir)
output_fname = name + '.PB.fasta'
call_list = ['PBassign',
'-x', os.path.join(REFDIR, name + '.xtc'),
'-g', os.path.join(REFDIR, name + '.gro'),
'-o', os.path.join(out_run_dir, name)]
exe = subprocess.Popen(call_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = exe.communicate()
status = exe.wait()
print(out.decode('utf-8'))
print(err.decode('utf-8'))
# MDanalysis is available, PBassign should run and produce the
# correct output
assert status == 0, 'PBassign exited with an error'
_assert_identical_files(os.path.join(REFDIR, output_fname),
os.path.join(out_run_dir, output_fname))
@pytest.mark.xfail(strict=True, raises=AssertionError)
def test_different_outputs(self, tmpdir):
"""
Test if the tests properly fail if an output content is different from
expected.
"""
reference = "test_fail"
extension = ".pdb"
self._test_PBassign_options(tmpdir, reference, extension, ['{0}.PB.fasta'])
class TestPBcount(TemplateTestCase):
"""
Test running PBcount.
"""
def _build_command_line(self, out_run_dir, input_files, output, first_residue=None):
output_full_path = os.path.join(out_run_dir, output)
command = ['PBcount', '-o', output_full_path]
for input_file in input_files:
command += ['-f', os.path.join(REFDIR, input_file)]
if first_residue is not None:
command += ['--first-residue', str(first_residue)]
return command
def _validate_output(self, out_run_dir, reference, output, **kwargs):
reference_full_path = os.path.join(REFDIR, reference)
output_full_path = os.path.join(out_run_dir,
output + '.PB.count')
_assert_identical_files(output_full_path, reference_full_path)
def test_single_file_single_model(self, tmpdir):
"""
Run PBcount with a single input file that contains a single model.
"""
input_files = ['count_single1.PB.fasta', ]
output = 'output'
reference = 'count_single1.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_single_file_multiple_models(self, tmpdir):
"""
Run PBcount with a single input file that contains multiple models.
"""
input_files = ['count_multi1.PB.fasta', ]
output = 'output'
reference = 'count_multi1.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_multiple_files_single_model(self, tmpdir):
"""
Run PBcount with multiple input files that contain a single model.
"""
input_files = ['count_single1.PB.fasta',
'count_single2.PB.fasta',
'count_single3.PB.fasta']
output = 'output'
reference = 'count_single123.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_multiple_files_multiple_models(self, tmpdir):
"""
Run PBcount with multiple input files that contain multiple models each.
"""
input_files = ['count_multi1.PB.fasta',
'count_multi2.PB.fasta',
'count_multi3.PB.fasta']
output = 'output'
reference = 'count_multi123.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_first_residue_positive(self, tmpdir):
"""
Test PBcount on with the --first-residue option and a positive value.
"""
input_files = ['count_multi1.PB.fasta',
'count_multi2.PB.fasta',
'count_multi3.PB.fasta']
output = 'output'
reference = 'count_multi123_first20.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output,
first_residue=20)
def test_first_residue_negative(self, tmpdir):
"""
Test PBcount on with the --first-residue option and a negative value.
"""
input_files = ['count_multi1.PB.fasta',
'count_multi2.PB.fasta',
'count_multi3.PB.fasta']
output = 'output'
reference = 'count_multi123_first-20.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output,
first_residue=-20)
class TestPBstat(TemplateTestCase):
def _build_command_line(self, out_run_dir, input_file, output,
mapdist=False, neq=False,
logo=False, image_format=None,
residue_min=None, residue_max=None):
input_full_path = os.path.join(REFDIR, input_file)
output_full_path = os.path.join(str(out_run_dir), output)
command = ['PBstat', '-f', input_full_path, '-o', output_full_path]
if mapdist:
command += ['--map']
if neq:
command += ['--neq']
if logo:
command += ['--logo']
if image_format is not None:
command += ['--image-format', image_format]
if residue_min is not None:
command += ['--residue-min', str(residue_min)]
if residue_max is not None:
command += ['--residue-max', str(residue_max)]
return command
def _validate_output(self, out_run_dir, reference, input_file, output,
mapdist=False, neq=False, logo=False, image_format=None,
residue_min=None, residue_max=None, **kwargs):
suffix_residue = ''
if residue_min or residue_max:
suffix_residue = ".{}-{}".format(residue_min, residue_max)
suffix_args = ''
extension = '.png'
if neq:
suffix_args = '.Neq'
if mapdist:
suffix_args = '.map'
if logo:
suffix_args = '.logo'
if image_format is None:
extension = '.png'
else:
extension = '.' + image_format
reference_full_path = os.path.join(REFDIR, reference + '.PB'
+ suffix_args + suffix_residue)
output = os.path.join(str(out_run_dir), output)
output_full_path = output + '.PB' + suffix_args + suffix_residue
if neq:
# Assess the validity of the Neq file
_assert_identical_files(output_full_path, reference_full_path)
# Assess the creation of the graph file (png or pdf)
value, msg = _file_validity(output_full_path + extension)
assert value, msg
def test_neq(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
neq=True)
self._run_program_and_validate(tmpdir,
reference='count_single123',
input_file='count_single123.PB.count',
output='output',
neq=True)
def test_neq_with_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
neq=True,
residue_min=10, residue_max=30)
def test_neq_with_first_residue(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123_first20',
input_file='count_multi123_first20.PB.count',
output='output',
neq=True)
def test_neq_with_first_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123_first20',
input_file='count_multi123_first20.PB.count',
output='output',
neq=True,
residue_min=25, residue_max=35)
def test_neq_pdf(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
neq=True, image_format='pdf')
def test_mapdist(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
mapdist=True)
def test_mapdist_pdf(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
mapdist=True, image_format='pdf')
def test_mapdist_with_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
mapdist=True,
residue_min=10, residue_max=30)
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True)
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_logo_pdf(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='pdf')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_logo_png(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='png')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_logo_jpg(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='jpg')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
@pytest.mark.xfail(strict=True, raises=AssertionError)
def test_weblogo_logo_invalid_format(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='invalid')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_with_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True,
residue_min=10, residue_max=30)
def _file_validity(file_a):
"""
Check wether file_a exists and is not empty.
Return a tuple containing:
- True if all went well, False otherwise
- the error message, empty if True is returned
"""
if os.path.isfile(file_a):
if os.path.getsize(file_a) > 0:
return True, ''
else:
return False, '{0} is empty'.format(file_a)
else:
return False, '{0} does not exist'.format(file_a)
def _same_file_content(file_a, file_b, comment_char=">"):
"""
Return True if two files are identical. Take file path as arguments.
Ignore the content of lines which start with `comment_char`.
"""
with open(file_a) as f1, open(file_b) as f2:
# Compare content line by line
for f1_line, f2_line in zip(f1, f2):
if (f1_line != f2_line):
# If both lines start with a comment,
# it's a valid one no matter the content of the comment
f1_firstchar = f1_line.strip().startswith(comment_char)
f2_firstchar = f2_line.strip().startswith(comment_char)
if f1_firstchar != f2_firstchar:
print(file_a, file_b)
print(f1_line, f2_line, sep='//')
return False
# Check if one file is longer than the other; it would result as one
# file iterator not completely consumed
for infile in (f1, f2):
try:
next(infile)
except StopIteration:
pass
else:
# The iterator is not consumed, it means that this file is
# longer than the other
print('File too long')
return False
# If we reach this line, it means that we did not find any difference
return True
def _assert_identical_files(file_a, file_b, comment_char=">"):
"""
Raise an Assert exception if the two files are not identical.
Take file path as arguments.
Ignore the content of lines which start with `comment_char`.
"""
assert _same_file_content(file_a, file_b), '{0} and {1} are not identical'\
.format(file_a, file_b)
| mit |
alexpap/exareme | exareme-tools/madis/src/lib/chardet/mbcharsetprober.py | 215 | 3182 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from constants import eStart, eError, eItsMe
from charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = ['\x00', '\x00']
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = ['\x00', '\x00']
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mDistributionAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| mit |
to266/hyperspy | hyperspy/misc/elements.py | 1 | 302319 | # Database
#
# The X-ray lines energies are taken from Chantler2005,
# Chantler, C.T., Olsen, K., Dragoset, R.A., Kishore, A.R.,
# Kotochigova, S.A., and Zucker, D.S.
#
# The line weight, more precisely the approximate line weight for K,L M
# shells are taken from epq library
from hyperspy.misc import utils
elements = {'Ru': {'Physical_properties': {'density (g/cm^3)': 12.37},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.33039,
'energy (keV)': 2.6833},
'Kb': {'weight': 0.15,
'energy (keV)': 21.6566},
'Ka': {'weight': 1.0,
'energy (keV)': 19.2793},
'Lb2': {'weight': 0.07259,
'energy (keV)': 2.8359},
'La': {'weight': 1.0,
'energy (keV)': 2.5585},
'Ln': {'weight': 0.0126,
'energy (keV)': 2.3819},
'Ll': {'weight': 0.0411,
'energy (keV)': 2.2529},
'Lb3': {'weight': 0.0654,
'energy (keV)': 2.7634},
'Lg3': {'weight': 0.0115,
'energy (keV)': 3.1809},
'Lg1': {'weight': 0.02176,
'energy (keV)': 2.9649}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 279.0,
'filename': 'Ru.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 279.0,
'filename': 'Ru.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 461.0,
'filename': 'Ru.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 483.0,
'filename': 'Ru.M3'}}},
'General_properties': {'Z': 44,
'atomic_weight': 101.07,
'name': 'ruthenium'}},
'Re': {'Physical_properties': {'density (g/cm^3)': 21.02},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4408,
'energy (keV)': 10.0098},
'Kb': {'weight': 0.15,
'energy (keV)': 69.3091},
'Ka': {'weight': 1.0,
'energy (keV)': 61.1411},
'M2N4': {'weight': 0.01,
'energy (keV)': 2.4079},
'Ma': {'weight': 1.0,
'energy (keV)': 1.8423},
'Lb4': {'weight': 0.09869,
'energy (keV)': 9.8451},
'La': {'weight': 1.0,
'energy (keV)': 8.6524},
'Ln': {'weight': 0.0151,
'energy (keV)': 9.027},
'M3O4': {'energy (keV)': 2.36124,
'weight': 0.001},
'Ll': {'weight': 0.05299,
'energy (keV)': 7.6036},
'Mb': {'weight': 0.59443,
'energy (keV)': 1.9083},
'Mg': {'weight': 0.08505,
'energy (keV)': 2.1071},
'Lb2': {'weight': 0.21219,
'energy (keV)': 10.2751},
'Lb3': {'weight': 0.1222,
'energy (keV)': 10.1594},
'M3O5': {'energy (keV)': 2.36209,
'weight': 0.01},
'Lg3': {'weight': 0.0331,
'energy (keV)': 12.0823},
'Lg1': {'weight': 0.08864,
'energy (keV)': 11.685},
'Mz': {'weight': 0.01344,
'energy (keV)': 1.4385}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1883.0,
'filename': 'Re.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1949.0,
'filename': 'Re.M5'}}},
'General_properties': {'Z': 75,
'atomic_weight': 186.207,
'name': 'rhenium'}},
'Ra': {'Physical_properties': {'density (g/cm^3)': 5.0},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4,
'energy (keV)': 15.2359},
'Kb': {'weight': 0.15,
'energy (keV)': 100.1302},
'Ka': {'weight': 1.0,
'energy (keV)': 88.4776},
'M2N4': {'weight': 0.00674,
'energy (keV)': 3.8536},
'Lb4': {'weight': 0.06209,
'energy (keV)': 14.7472},
'La': {'weight': 1.0,
'energy (keV)': 12.3395},
'Ln': {'weight': 0.0133,
'energy (keV)': 13.6623},
'Ll': {'weight': 0.06429,
'energy (keV)': 10.6224},
'Mb': {'weight': 0.64124,
'energy (keV)': 2.9495},
'Mg': {'weight': 0.33505,
'energy (keV)': 3.1891},
'Lb2': {'weight': 0.23579,
'energy (keV)': 14.8417},
'Lg3': {'weight': 0.017,
'energy (keV)': 18.3576},
'Lg1': {'weight': 0.08,
'energy (keV)': 17.8484},
'Lb3': {'weight': 0.06,
'energy (keV)': 15.4449},
'Mz': {'weight': 0.03512,
'energy (keV)': 2.2258}}},
'General_properties': {'Z': 88,
'atomic_weight': 226,
'name': 'radium'}},
'Rb': {'Physical_properties': {'density (g/cm^3)': 1.532},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.39095,
'energy (keV)': 1.7521},
'Kb': {'weight': 0.1558,
'energy (keV)': 14.9612},
'Ka': {'weight': 1.0,
'energy (keV)': 13.3953},
'La': {'weight': 1.0,
'energy (keV)': 1.6941},
'Ln': {'weight': 0.01709,
'energy (keV)': 1.5418},
'Ll': {'weight': 0.0441,
'energy (keV)': 1.4823},
'Lb3': {'weight': 0.04709,
'energy (keV)': 1.8266},
'Lg3': {'weight': 0.0058,
'energy (keV)': 2.0651}},
'Binding_energies': {'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 247.0,
'filename': 'Rb.M3'},
'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 110.0,
'filename': 'Rb.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 110.0,
'filename': 'Rb.M5'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 1864.0,
'filename': 'Rb.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1804.0,
'filename': 'Rb.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 238.0,
'filename': 'Rb.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 2065.0,
'filename': 'Rb.L1'}}},
'General_properties': {'Z': 37,
'atomic_weight': 85.4678,
'name': 'rubidium'}},
'Rn': {'Physical_properties': {'density (g/cm^3)': 0.00973},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.38463,
'energy (keV)': 14.3156},
'Kb': {'weight': 0.15,
'energy (keV)': 94.866},
'Ka': {'weight': 1.0,
'energy (keV)': 83.7846},
'M2N4': {'weight': 0.00863,
'energy (keV)': 3.5924},
'Lb4': {'weight': 0.06,
'energy (keV)': 13.89},
'La': {'weight': 1.0,
'energy (keV)': 11.727},
'Ln': {'weight': 0.0134,
'energy (keV)': 12.8551},
'Ll': {'weight': 0.0625,
'energy (keV)': 10.1374},
'Mb': {'weight': 0.64124,
'energy (keV)': 2.80187},
'Mg': {'weight': 0.21845,
'energy (keV)': 3.001},
'Lb2': {'weight': 0.2325,
'energy (keV)': 14.0824},
'Lg3': {'weight': 0.017,
'energy (keV)': 17.281},
'Lg1': {'weight': 0.08,
'energy (keV)': 16.7705},
'Lb3': {'weight': 0.0607,
'energy (keV)': 14.511},
'Mz': {'weight': 0.0058,
'energy (keV)': 2.1244}}},
'General_properties': {'Z': 86,
'atomic_weight': 222,
'name': 'radon'}},
'Rh': {'Physical_properties': {'density (g/cm^3)': 12.45},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.33463,
'energy (keV)': 2.8344},
'Kb': {'weight': 0.15,
'energy (keV)': 22.7237},
'Ka': {'weight': 1.0,
'energy (keV)': 20.2161},
'Lb2': {'weight': 0.08539,
'energy (keV)': 3.0013},
'Lb4': {'weight': 0.0395,
'energy (keV)': 2.8909},
'La': {'weight': 1.0,
'energy (keV)': 2.6968},
'Ln': {'weight': 0.0126,
'energy (keV)': 2.519},
'Ll': {'weight': 0.0411,
'energy (keV)': 2.3767},
'Lb3': {'weight': 0.06669,
'energy (keV)': 2.9157},
'Lg3': {'weight': 0.0121,
'energy (keV)': 3.364},
'Lg1': {'weight': 0.02623,
'energy (keV)': 3.1436}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 308.0,
'filename': 'Rh.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 312.0,
'filename': 'Rh.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 496.0,
'filename': 'Rh.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 521.0,
'filename': 'Rh.M3'}}},
'General_properties': {'Z': 45,
'atomic_weight': 102.9055,
'name': 'rhodium'}},
'H': {'Physical_properties': {'density (g/cm^3)': 8.99e-5},
'Atomic_properties': {'Xray_lines': {'Ka': {'weight': 1.0,
'energy (keV)': 0.0013598}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 13.598,
'filename': ''}}},
'General_properties': {'Z': 1,
'atomic_weight': 1.00794,
'name': 'hydrogen'}},
'He': {'Physical_properties': {'density (g/cm^3)': 1.785e-4},
'Atomic_properties': {'Xray_lines': {'Ka': {'weight': 1.0,
'energy (keV)': 0.0024587}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 24.587,
'filename': ''}}},
'General_properties': {'Z': 2,
'atomic_weight': 4.002602,
'name': 'helium'}},
'Be': {'Physical_properties': {'density (g/cm^3)': 1.848},
'Atomic_properties': {'Xray_lines': {'Ka': {'weight': 1.0,
'energy (keV)': 0.10258}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 111.0,
'filename': 'Be.K1'}}},
'General_properties': {'Z': 4,
'atomic_weight': 9.012182,
'name': 'beryllium'}},
'Ba': {'Physical_properties': {'density (g/cm^3)': 3.51},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.43048,
'energy (keV)': 4.8275},
'Kb': {'weight': 0.15,
'energy (keV)': 36.3784},
'Ka': {'weight': 1.0,
'energy (keV)': 32.1936},
'Lb2': {'weight': 0.1905,
'energy (keV)': 5.1571},
'Lb4': {'weight': 0.08859,
'energy (keV)': 4.8521},
'La': {'weight': 1.0,
'energy (keV)': 4.4663},
'Ln': {'weight': 0.0151,
'energy (keV)': 4.3308},
'Ll': {'weight': 0.04299,
'energy (keV)': 3.9542},
'Lb3': {'weight': 0.13779,
'energy (keV)': 4.9266},
'Lg3': {'weight': 0.0331,
'energy (keV)': 5.8091},
'Lg1': {'weight': 0.07487,
'energy (keV)': 5.5311}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 781.0,
'filename': 'Ba.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 796.0,
'filename': 'Ba.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1062.0,
'filename': 'Ba.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1137.0,
'filename': 'Ba.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 90.0,
'filename': 'Ba.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 90.0,
'filename': 'Ba.N5'}}},
'General_properties': {'Z': 56,
'atomic_weight': 137.327,
'name': 'barium'}},
'Bi': {'Physical_properties': {'density (g/cm^3)': 9.78},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4,
'energy (keV)': 13.0235},
'Kb': {'weight': 0.15,
'energy (keV)': 87.349},
'Ka': {'weight': 1.0,
'energy (keV)': 77.1073},
'M2N4': {'weight': 0.00863,
'energy (keV)': 3.2327},
'Ma': {'weight': 1.0,
'energy (keV)': 2.4222},
'Lb4': {'weight': 0.05639,
'energy (keV)': 12.6912},
'La': {'weight': 1.0,
'energy (keV)': 10.839},
'Ln': {'weight': 0.0134,
'energy (keV)': 11.712},
'M3O4': {'energy (keV)': 3.1504,
'weight': 0.01},
'Ll': {'weight': 0.06,
'energy (keV)': 9.4195},
'Mb': {'weight': 0.64124,
'energy (keV)': 2.5257},
'Mg': {'weight': 0.21845,
'energy (keV)': 2.7369},
'Lb2': {'weight': 0.2278,
'energy (keV)': 12.9786},
'Lb3': {'weight': 0.0607,
'energy (keV)': 13.2106},
'M3O5': {'energy (keV)': 3.1525,
'weight': 0.01},
'Lg3': {'weight': 0.017,
'energy (keV)': 15.7086},
'Lg1': {'weight': 0.08,
'energy (keV)': 15.2475},
'Mz': {'weight': 0.0058,
'energy (keV)': 1.9007}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2580.0,
'filename': 'Bi.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 2688.0,
'filename': 'Bi.M5'}}},
'General_properties': {'Z': 83,
'atomic_weight': 208.9804,
'name': 'bismuth'}},
'Br': {'Physical_properties': {'density (g/cm^3)': 3.12},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.39175,
'energy (keV)': 1.5259},
'Kb': {'weight': 0.15289,
'energy (keV)': 13.2922},
'Ka': {'weight': 1.0,
'energy (keV)': 11.9238},
'La': {'weight': 1.0,
'energy (keV)': 1.4809},
'Ln': {'weight': 0.0182,
'energy (keV)': 1.3395},
'Ll': {'weight': 0.0462,
'energy (keV)': 1.2934},
'Lb3': {'weight': 0.04629,
'energy (keV)': 1.6005}},
'Binding_energies': {'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 1596.0,
'filename': 'Br.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1550.0,
'filename': 'Br.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1782.0,
'filename': 'Br.L1'}}},
'General_properties': {'Z': 35,
'atomic_weight': 79.904,
'name': 'bromine'}},
'P': {'Physical_properties': {'density (g/cm^3)': 1.823},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.0498,
'energy (keV)': 2.13916},
'Ka': {'weight': 1.0,
'energy (keV)': 2.0133}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2146.0,
'filename': 'P.K1'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 132.0,
'filename': 'P.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 189.0,
'filename': 'P.L1'}}},
'General_properties': {'Z': 15,
'atomic_weight': 30.973762,
'name': 'phosphorus'}},
'Os': {'Physical_properties': {'density (g/cm^3)': 22.59},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.43207,
'energy (keV)': 10.3542},
'Kb': {'weight': 0.15,
'energy (keV)': 71.4136},
'Ka': {'weight': 1.0,
'energy (keV)': 62.9999},
'M2N4': {'weight': 0.02901,
'energy (keV)': 2.5028},
'Ma': {'weight': 1.0,
'energy (keV)': 1.9138},
'Lb4': {'weight': 0.08369,
'energy (keV)': 10.1758},
'La': {'weight': 1.0,
'energy (keV)': 8.9108},
'Ln': {'weight': 0.01479,
'energy (keV)': 9.3365},
'M3O4': {'energy (keV)': 2.45015,
'weight': 0.005},
'Ll': {'weight': 0.05389,
'energy (keV)': 7.8224},
'Mb': {'weight': 0.59443,
'energy (keV)': 1.9845},
'Mg': {'weight': 0.08505,
'energy (keV)': 2.1844},
'Lb2': {'weight': 0.2146,
'energy (keV)': 10.5981},
'Lb3': {'weight': 0.1024,
'energy (keV)': 10.5108},
'M3O5': {'energy (keV)': 2.45117,
'weight': 0.01},
'Lg3': {'weight': 0.028,
'energy (keV)': 12.4998},
'Lg1': {'weight': 0.08768,
'energy (keV)': 12.0956},
'Mz': {'weight': 0.01344,
'energy (keV)': 1.4919}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1960.0,
'filename': 'Os.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 2031.0,
'filename': 'Os.M5'}}},
'General_properties': {'Z': 76,
'atomic_weight': 190.23,
'name': 'osmium'}},
'Ge': {'Physical_properties': {'density (g/cm^3)': 5.323},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.16704,
'energy (keV)': 1.2191},
'Kb': {'weight': 0.1322,
'energy (keV)': 10.9823},
'Ka': {'weight': 1.0,
'energy (keV)': 9.8864},
'La': {'weight': 1.0,
'energy (keV)': 1.188},
'Ln': {'weight': 0.02,
'energy (keV)': 1.0678},
'Ll': {'weight': 0.0511,
'energy (keV)': 1.0367},
'Lb3': {'weight': 0.04429,
'energy (keV)': 1.2935}},
'Binding_energies': {'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 1248.0,
'filename': 'Ge.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1217.0,
'filename': 'Ge.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1414.0,
'filename': 'Ge.L1'}}},
'General_properties': {'Z': 32,
'atomic_weight': 72.64,
'name': 'germanium'}},
'Gd': {'Physical_properties': {'density (g/cm^3)': 7.901},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.44127,
'energy (keV)': 6.7131},
'Kb': {'weight': 0.15,
'energy (keV)': 48.6951},
'Ka': {'weight': 1.0,
'energy (keV)': 42.9963},
'M2N4': {'weight': 0.014,
'energy (keV)': 1.5478},
'Ma': {'weight': 1.0,
'energy (keV)': 1.17668},
'Lb4': {'weight': 0.08789,
'energy (keV)': 6.6873},
'La': {'weight': 1.0,
'energy (keV)': 6.0576},
'Ln': {'weight': 0.01489,
'energy (keV)': 6.0495},
'Ll': {'weight': 0.04629,
'energy (keV)': 5.362},
'Mb': {'weight': 0.88,
'energy (keV)': 1.20792},
'Mg': {'weight': 0.261,
'energy (keV)': 1.4035},
'Lb2': {'weight': 0.2014,
'energy (keV)': 7.1023},
'Lb3': {'weight': 0.1255,
'energy (keV)': 6.8316},
'Lg3': {'weight': 0.032,
'energy (keV)': 8.1047},
'Lg1': {'weight': 0.08207,
'energy (keV)': 7.7898},
'Mz': {'weight': 0.06,
'energy (keV)': 0.9143}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1185.0,
'filename': 'Gd.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1217.0,
'filename': 'Gd.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1544.0,
'filename': 'Gd.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1688.0,
'filename': 'Gd.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 141.0,
'filename': 'Gd.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 141.0,
'filename': 'Gd.N5'}}},
'General_properties': {'Z': 64,
'atomic_weight': 157.25,
'name': 'gadolinium'}},
'Ga': {'Physical_properties': {'density (g/cm^3)': 5.904},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.16704,
'energy (keV)': 1.1249},
'Kb': {'weight': 0.1287,
'energy (keV)': 10.2642},
'Ka': {'weight': 1.0,
'energy (keV)': 9.2517},
'La': {'weight': 1.0,
'energy (keV)': 1.098},
'Ln': {'weight': 0.02509,
'energy (keV)': 0.9842},
'Ll': {'weight': 0.0544,
'energy (keV)': 0.9573},
'Lb3': {'weight': 0.0461,
'energy (keV)': 1.1948}},
'Binding_energies': {'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 1142.0,
'filename': 'Ga.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1115.0,
'filename': 'Ga.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1298.0,
'filename': 'Ga.L1'}}},
'General_properties': {'Z': 31,
'atomic_weight': 69.723,
'name': 'gallium'}},
'Pr': {'Physical_properties': {'density (g/cm^3)': 6.64},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.42872,
'energy (keV)': 5.4893},
'Kb': {'weight': 0.15,
'energy (keV)': 40.7484},
'Ka': {'weight': 1.0,
'energy (keV)': 36.0263},
'M2N4': {'weight': 0.055,
'energy (keV)': 1.2242},
'Ma': {'weight': 1.0,
'energy (keV)': 0.8936},
'Lb4': {'weight': 0.0864,
'energy (keV)': 5.4974},
'La': {'weight': 1.0,
'energy (keV)': 5.0333},
'Ln': {'weight': 0.01489,
'energy (keV)': 4.9294},
'Ll': {'weight': 0.044,
'energy (keV)': 4.4533},
'Mb': {'weight': 0.85,
'energy (keV)': 0.9476},
'Mg': {'weight': 0.6,
'energy (keV)': 1.129},
'Lb2': {'weight': 0.19519,
'energy (keV)': 5.8511},
'Lb3': {'weight': 0.13089,
'energy (keV)': 5.5926},
'Lg3': {'weight': 0.0321,
'energy (keV)': 6.6172},
'Lg1': {'weight': 0.07687,
'energy (keV)': 6.3272},
'Mz': {'weight': 0.068,
'energy (keV)': 0.7134}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 931.0,
'filename': 'Pr.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 951.0,
'filename': 'Pr.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1242.0,
'filename': 'Pr.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1337.0,
'filename': 'Pr.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 114.0,
'filename': 'Pr.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 114.0,
'filename': 'Pr.N5'}}},
'General_properties': {'Z': 59,
'atomic_weight': 140.90765,
'name': 'praseodymium'}},
'Pt': {'Physical_properties': {'density (g/cm^3)': 21.09},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4088,
'energy (keV)': 11.0707},
'Kb': {'weight': 0.15,
'energy (keV)': 75.7494},
'Ka': {'weight': 1.0,
'energy (keV)': 66.8311},
'M2N4': {'weight': 0.02901,
'energy (keV)': 2.6957},
'Ma': {'weight': 1.0,
'energy (keV)': 2.0505},
'Lb4': {'weight': 0.0662,
'energy (keV)': 10.8534},
'La': {'weight': 1.0,
'energy (keV)': 9.4421},
'Ln': {'weight': 0.01399,
'energy (keV)': 9.9766},
'M3O4': {'energy (keV)': 2.63796,
'weight': 0.005},
'Ll': {'weight': 0.0554,
'energy (keV)': 8.2677},
'Mb': {'weight': 0.59443,
'energy (keV)': 2.1276},
'Mg': {'weight': 0.08505,
'energy (keV)': 2.3321},
'Lb2': {'weight': 0.21829,
'energy (keV)': 11.2504},
'Lb3': {'weight': 0.0783,
'energy (keV)': 11.2345},
'M3O5': {'energy (keV)': 2.63927,
'weight': 0.01},
'Lg3': {'weight': 0.0218,
'energy (keV)': 13.3609},
'Lg1': {'weight': 0.08448,
'energy (keV)': 12.9418},
'Mz': {'weight': 0.01344,
'energy (keV)': 1.6026}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2122.0,
'filename': 'Pt.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 2202.0,
'filename': 'Pt.M5'}}},
'General_properties': {'Z': 78,
'atomic_weight': 195.084,
'name': 'platinum'}},
'C': {'Physical_properties': {'density (g/cm^3)': 2.26},
'Atomic_properties': {'Xray_lines': {'Ka': {'weight': 1.0,
'energy (keV)': 0.2774}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 284.0,
'filename': 'C.K1'},
'K1a': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 284.0,
'filename': 'C.K1a'}}},
'General_properties': {'Z': 6,
'atomic_weight': 12.0107,
'name': 'carbon'}},
'Pb': {'Physical_properties': {'density (g/cm^3)': 11.34},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.3836,
'energy (keV)': 12.6144},
'Kb': {'weight': 0.15,
'energy (keV)': 84.9381},
'Ka': {'weight': 1.0,
'energy (keV)': 74.9693},
'M2N4': {'weight': 0.00863,
'energy (keV)': 3.119},
'Ma': {'weight': 1.0,
'energy (keV)': 2.3459},
'Lb4': {'weight': 0.055,
'energy (keV)': 12.3066},
'La': {'weight': 1.0,
'energy (keV)': 10.5512},
'Ln': {'weight': 0.0132,
'energy (keV)': 11.3493},
'M3O4': {'energy (keV)': 3.0446,
'weight': 0.01},
'Ll': {'weight': 0.0586,
'energy (keV)': 9.1845},
'Mb': {'weight': 0.64124,
'energy (keV)': 2.4427},
'Mg': {'weight': 0.21845,
'energy (keV)': 2.6535},
'Lb2': {'weight': 0.2244,
'energy (keV)': 12.6223},
'Lb3': {'weight': 0.06049,
'energy (keV)': 12.7944},
'M3O5': {'energy (keV)': 3.0472,
'weight': 0.01},
'Lg3': {'weight': 0.017,
'energy (keV)': 15.2163},
'Lg1': {'weight': 0.08256,
'energy (keV)': 14.7648},
'Mz': {'weight': 0.0058,
'energy (keV)': 1.8395}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2484.0,
'filename': 'Pb.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 2586.0,
'filename': 'Pb.M5'}}},
'General_properties': {'Z': 82,
'atomic_weight': 207.2,
'name': 'lead'}},
'Pa': {'Physical_properties': {'density (g/cm^3)': 15.37},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4,
'energy (keV)': 16.7025},
'Kb': {'weight': 0.15,
'energy (keV)': 108.4272},
'Ka': {'weight': 1.0,
'energy (keV)': 95.8679},
'M2N4': {'weight': 0.00674,
'energy (keV)': 4.2575},
'Ma': {'weight': 1.0,
'energy (keV)': 3.0823},
'Lb4': {'weight': 0.04,
'energy (keV)': 16.1037},
'La': {'weight': 1.0,
'energy (keV)': 13.2913},
'Ln': {'weight': 0.0126,
'energy (keV)': 14.9468},
'M3O4': {'energy (keV)': 4.07712,
'weight': 0.01},
'Ll': {'weight': 0.0682,
'energy (keV)': 11.3662},
'Mb': {'weight': 0.64124,
'energy (keV)': 3.24},
'Mg': {'weight': 0.33505,
'energy (keV)': 3.4656},
'Lb2': {'weight': 0.236,
'energy (keV)': 16.0249},
'Lb3': {'weight': 0.06,
'energy (keV)': 16.9308},
'M3O5': {'energy (keV)': 4.08456,
'weight': 0.01},
'Lg3': {'weight': 0.017,
'energy (keV)': 20.0979},
'Lg1': {'weight': 0.08,
'energy (keV)': 19.5703},
'Mz': {'weight': 0.03512,
'energy (keV)': 2.4351}}},
'General_properties': {'Z': 91,
'atomic_weight': 231.03586,
'name': 'protactinium'}},
'Pd': {'Physical_properties': {'density (g/cm^3)': 12.023},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.34375,
'energy (keV)': 2.9903},
'Kb': {'weight': 0.15,
'energy (keV)': 23.8188},
'Ka': {'weight': 1.0,
'energy (keV)': 21.177},
'Lb2': {'weight': 0.10349,
'energy (keV)': 3.16828},
'Lb4': {'weight': 0.0407,
'energy (keV)': 3.0452},
'La': {'weight': 1.0,
'energy (keV)': 2.8386},
'Ln': {'weight': 0.0129,
'energy (keV)': 2.6604},
'Ll': {'weight': 0.0412,
'energy (keV)': 2.5034},
'Lb3': {'weight': 0.0682,
'energy (keV)': 3.0728},
'Lg3': {'weight': 0.0125,
'energy (keV)': 3.5532},
'Lg1': {'weight': 0.03256,
'energy (keV)': 3.32485}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 335.0,
'filename': 'Pd.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 340.0,
'filename': 'Pd.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 531.0,
'filename': 'Pd.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 559.0,
'filename': 'Pd.M3'}}},
'General_properties': {'Z': 46,
'atomic_weight': 106.42,
'name': 'palladium'}},
'Cd': {'Physical_properties': {'density (g/cm^3)': 8.65},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.35704,
'energy (keV)': 3.3165},
'Kb': {'weight': 0.15,
'energy (keV)': 26.0947},
'Ka': {'weight': 1.0,
'energy (keV)': 23.1737},
'Lb2': {'weight': 0.1288,
'energy (keV)': 3.5282},
'Lb4': {'weight': 0.0469,
'energy (keV)': 3.3673},
'La': {'weight': 1.0,
'energy (keV)': 3.1338},
'Ln': {'weight': 0.0132,
'energy (keV)': 2.9568},
'Ll': {'weight': 0.04169,
'energy (keV)': 2.7673},
'Lb3': {'weight': 0.07719,
'energy (keV)': 3.4015},
'Lg3': {'weight': 0.0151,
'energy (keV)': 3.9511},
'Lg1': {'weight': 0.0416,
'energy (keV)': 3.7177}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 404.0,
'filename': 'Cd.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 411.0,
'filename': 'Cd.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 616.0,
'filename': 'Cd.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 651.0,
'filename': 'Cd.M3'}}},
'General_properties': {'Z': 48,
'atomic_weight': 112.411,
'name': 'cadmium'}},
'Po': {'Physical_properties': {'density (g/cm^3)': 9.196},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.38536,
'energy (keV)': 13.4463},
'Kb': {'weight': 0.15,
'energy (keV)': 89.8031},
'Ka': {'weight': 1.0,
'energy (keV)': 79.2912},
'M2N4': {'weight': 0.00863,
'energy (keV)': 3.3539},
'Lb4': {'weight': 0.05709,
'energy (keV)': 13.0852},
'La': {'weight': 1.0,
'energy (keV)': 11.1308},
'Ln': {'weight': 0.0133,
'energy (keV)': 12.0949},
'Ll': {'weight': 0.0607,
'energy (keV)': 9.6644},
'Mb': {'weight': 0.64124,
'energy (keV)': 2.62266},
'Mg': {'weight': 0.21845,
'energy (keV)': 2.8285},
'Lb2': {'weight': 0.2289,
'energy (keV)': 13.3404},
'Lg3': {'weight': 0.017,
'energy (keV)': 16.2343},
'Lg1': {'weight': 0.08,
'energy (keV)': 15.7441},
'Lb3': {'weight': 0.0603,
'energy (keV)': 13.6374},
'Mz': {'weight': 0.00354,
'energy (keV)': 1.978}}},
'General_properties': {'Z': 84,
'atomic_weight': 209,
'name': 'polonium'}},
'Pm': {'Physical_properties': {'density (g/cm^3)': 7.264},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4308,
'energy (keV)': 5.9613},
'Kb': {'weight': 0.15,
'energy (keV)': 43.8271},
'Ka': {'weight': 1.0,
'energy (keV)': 38.7247},
'M2N4': {'weight': 0.028,
'energy (keV)': 1.351},
'Ma': {'weight': 1.0,
'energy (keV)': 0.9894},
'Lb4': {'weight': 0.07799,
'energy (keV)': 5.9565},
'La': {'weight': 1.0,
'energy (keV)': 5.4324},
'Ln': {'weight': 0.01479,
'energy (keV)': 5.3663},
'Ll': {'weight': 0.0448,
'energy (keV)': 4.8128},
'Mb': {'weight': 0.89,
'energy (keV)': 1.0475},
'Mg': {'weight': 0.4,
'energy (keV)': 1.2365},
'Lb2': {'weight': 0.196,
'energy (keV)': 6.3389},
'Lb3': {'weight': 0.1247,
'energy (keV)': 6.071},
'Lg3': {'weight': 0.0311,
'energy (keV)': 7.1919},
'Lg1': {'weight': 0.0784,
'energy (keV)': 6.8924},
'Mz': {'weight': 0.068,
'energy (keV)': 0.7909}}},
'General_properties': {'Z': 61,
'atomic_weight': 145,
'name': 'promethium'}},
'Ho': {'Physical_properties': {'density (g/cm^3)': 8.795},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.45056,
'energy (keV)': 7.5263},
'Kb': {'weight': 0.15,
'energy (keV)': 53.8765},
'Ka': {'weight': 1.0,
'energy (keV)': 47.5466},
'M2N4': {'weight': 0.072,
'energy (keV)': 1.7618},
'Ma': {'weight': 1.0,
'energy (keV)': 1.3477},
'Lb4': {'weight': 0.09039,
'energy (keV)': 7.4714},
'La': {'weight': 1.0,
'energy (keV)': 6.7197},
'Ln': {'weight': 0.0151,
'energy (keV)': 6.7895},
'Ll': {'weight': 0.04759,
'energy (keV)': 5.9428},
'Mb': {'weight': 0.59443,
'energy (keV)': 1.3878},
'Mg': {'weight': 0.1418,
'energy (keV)': 1.5802},
'Lb2': {'weight': 0.23563,
'energy (keV)': 7.9101},
'Lb3': {'weight': 0.06,
'energy (keV)': 7.653},
'Lg3': {'weight': 0.0321,
'energy (keV)': 9.0876},
'Lg1': {'weight': 0.08448,
'energy (keV)': 8.7568},
'Mz': {'weight': 0.6629,
'energy (keV)': 1.0448}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1351.0,
'filename': 'Ho.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1391.0,
'filename': 'Ho.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1741.0,
'filename': 'Ho.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1923.0,
'filename': 'Ho.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 161.0,
'filename': 'Ho.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 161.0,
'filename': 'Ho.N5'}}},
'General_properties': {'Z': 67,
'atomic_weight': 164.93032,
'name': 'holmium'}},
'Hf': {'Physical_properties': {'density (g/cm^3)': 13.31},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.46231,
'energy (keV)': 9.023},
'Kb': {'weight': 0.15,
'energy (keV)': 63.2432},
'Ka': {'weight': 1.0,
'energy (keV)': 55.7901},
'M2N4': {'weight': 0.01,
'energy (keV)': 2.1416},
'Ma': {'weight': 1.0,
'energy (keV)': 1.6446},
'Lb4': {'weight': 0.10189,
'energy (keV)': 8.9053},
'La': {'weight': 1.0,
'energy (keV)': 7.899},
'Ln': {'weight': 0.0158,
'energy (keV)': 8.1385},
'Ll': {'weight': 0.05089,
'energy (keV)': 6.9598},
'Mb': {'weight': 0.59443,
'energy (keV)': 1.6993},
'Mg': {'weight': 0.08505,
'energy (keV)': 1.8939},
'Lb2': {'weight': 0.2048,
'energy (keV)': 9.347},
'Lb3': {'weight': 0.1316,
'energy (keV)': 9.1631},
'Lg3': {'weight': 0.0347,
'energy (keV)': 10.8903},
'Lg1': {'weight': 0.08968,
'energy (keV)': 10.5156},
'Mz': {'weight': 0.06,
'energy (keV)': 1.2813}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1662.0,
'filename': 'Hf.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1716.0,
'filename': 'Hf.M5'}}},
'General_properties': {'Z': 72,
'atomic_weight': 178.49,
'name': 'hafnium'}},
'Hg': {'Physical_properties': {'density (g/cm^3)': 13.534},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.39504,
'energy (keV)': 11.8238},
'Kb': {'weight': 0.15,
'energy (keV)': 80.2552},
'Ka': {'weight': 1.0,
'energy (keV)': 70.8184},
'M2N4': {'weight': 0.02901,
'energy (keV)': 2.9002},
'Ma': {'weight': 1.0,
'energy (keV)': 2.1964},
'Lb4': {'weight': 0.0566,
'energy (keV)': 11.5608},
'La': {'weight': 1.0,
'energy (keV)': 9.989},
'Ln': {'weight': 0.0136,
'energy (keV)': 10.6471},
'M3O4': {'energy (keV)': 2.8407,
'weight': 0.005},
'Ll': {'weight': 0.05709,
'energy (keV)': 8.7223},
'Mb': {'weight': 0.64124,
'energy (keV)': 2.2827},
'Mg': {'weight': 0.08505,
'energy (keV)': 2.4873},
'Lb2': {'weight': 0.2221,
'energy (keV)': 11.9241},
'Lb3': {'weight': 0.06469,
'energy (keV)': 11.9922},
'M3O5': {'energy (keV)': 2.8407,
'weight': 0.01},
'Lg3': {'weight': 0.0184,
'energy (keV)': 14.2683},
'Lg1': {'weight': 0.0832,
'energy (keV)': 13.8304},
'Mz': {'weight': 0.01344,
'energy (keV)': 1.7239}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2295.0,
'filename': 'Hg.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 2385.0,
'filename': 'Hg.M5'}}},
'General_properties': {'Z': 80,
'atomic_weight': 200.59,
'name': 'mercury'}},
'Mg': {'Physical_properties': {'density (g/cm^3)': 1.738},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.01,
'energy (keV)': 1.305},
'Ka': {'weight': 1.0,
'energy (keV)': 1.2536}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1305.0,
'filename': 'Mg.K1'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 51.0,
'filename': 'Mg.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 51.0,
'filename': 'Mg.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 89.0,
'filename': 'Mg.L1'}}},
'General_properties': {'Z': 12,
'atomic_weight': 24.305,
'name': 'magnesium'}},
'K': {'Physical_properties': {'density (g/cm^3)': 0.856},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.1039,
'energy (keV)': 3.5896},
'Ka': {'weight': 1.0,
'energy (keV)': 3.3138}},
'Binding_energies': {'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 294.0,
'filename': 'K.L3'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 296.0,
'filename': 'K.L3'},
'L1a': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 377.0,
'filename': 'K.L1a'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 377.0,
'filename': 'K.L1'}}},
'General_properties': {'Z': 19,
'atomic_weight': 39.0983,
'name': 'potassium'}},
'Mn': {'Physical_properties': {'density (g/cm^3)': 7.47},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.1252,
'energy (keV)': 6.4904},
'Ka': {'weight': 1.0,
'energy (keV)': 5.8987},
'La': {'weight': 1.0,
'energy (keV)': 0.63316},
'Ln': {'weight': 0.1898,
'energy (keV)': 0.5675},
'Ll': {'weight': 0.3898,
'energy (keV)': 0.5564},
'Lb3': {'weight': 0.0263,
'energy (keV)': 0.7204}},
'Binding_energies': {'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 51.0,
'filename': 'Mn.M3'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 651.0,
'filename': 'Mn.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 640.0,
'filename': 'Mn.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 51.0,
'filename': 'Mn.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 769.0,
'filename': 'Mn.L1'}}},
'General_properties': {'Z': 25,
'atomic_weight': 54.938045,
'name': 'manganese'}},
'O': {'Physical_properties': {'density (g/cm^3)': 0.001429},
'Atomic_properties': {'Xray_lines': {'Ka': {'weight': 1.0,
'energy (keV)': 0.5249}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 532.0,
'filename': 'O.K1'}}},
'General_properties': {'Z': 8,
'atomic_weight': 15.9994,
'name': 'oxygen'}},
'S': {'Physical_properties': {'density (g/cm^3)': 1.96},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.06525,
'energy (keV)': 2.46427},
'Ka': {'weight': 1.0,
'energy (keV)': 2.3072}},
'Binding_energies': {'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 165.0,
'filename': 'S.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 229.0,
'filename': 'S.L1'}}},
'General_properties': {'Z': 16,
'atomic_weight': 32.065,
'name': 'sulfur'}},
'W': {'Physical_properties': {'density (g/cm^3)': 19.25},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.3679,
'energy (keV)': 9.6724},
'Kb': {'weight': 0.15,
'energy (keV)': 67.244},
'Ka': {'weight': 1.0,
'energy (keV)': 59.3182},
'M2N4': {'weight': 0.01,
'energy (keV)': 2.3161},
'Ma': {'weight': 1.0,
'energy (keV)': 1.7756},
'Lb4': {'weight': 0.05649,
'energy (keV)': 9.5249},
'La': {'weight': 1.0,
'energy (keV)': 8.3976},
'Ln': {'weight': 0.01155,
'energy (keV)': 8.7244},
'M3O4': {'energy (keV)': 2.2749,
'weight': 0.001},
'Ll': {'weight': 0.04169,
'energy (keV)': 7.3872},
'Mb': {'weight': 0.59443,
'energy (keV)': 1.8351},
'Mg': {'weight': 0.08505,
'energy (keV)': 2.0356},
'Lb2': {'weight': 0.21385,
'energy (keV)': 9.9614},
'Lb3': {'weight': 0.07077,
'energy (keV)': 9.8188},
'M3O5': {'energy (keV)': 2.281,
'weight': 0.01},
'Lg3': {'weight': 0.0362,
'energy (keV)': 11.6745},
'Lg1': {'weight': 0.05658,
'energy (keV)': 11.2852},
'Mz': {'weight': 0.01344,
'energy (keV)': 1.3839}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1809.0,
'filename': 'W.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1872.0,
'filename': 'W.M5'}}},
'General_properties': {'Z': 74,
'atomic_weight': 183.84,
'name': 'tungsten'}},
'Zn': {'Physical_properties': {'density (g/cm^3)': 7.14},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.1679,
'energy (keV)': 1.0347},
'Kb': {'weight': 0.12605,
'energy (keV)': 9.572},
'Ka': {'weight': 1.0,
'energy (keV)': 8.6389},
'La': {'weight': 1.0,
'energy (keV)': 1.0116},
'Ln': {'weight': 0.0368,
'energy (keV)': 0.9069},
'Ll': {'weight': 0.0603,
'energy (keV)': 0.8838},
'Lb3': {'weight': 0.002,
'energy (keV)': 1.107}},
'Binding_energies': {'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 87.0,
'filename': 'Zn.M3'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 1043.0,
'filename': 'Zn.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1020.0,
'filename': 'Zn.L3'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 87.0,
'filename': 'Zn.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1194.0,
'filename': 'Zn.L1'}}},
'General_properties': {'Z': 30,
'atomic_weight': 65.38,
'name': 'zinc'}},
'Eu': {'Physical_properties': {'density (g/cm^3)': 5.244},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.43904,
'energy (keV)': 6.4565},
'Kb': {'weight': 0.15,
'energy (keV)': 47.0384},
'Ka': {'weight': 1.0,
'energy (keV)': 41.5421},
'M2N4': {'weight': 0.013,
'energy (keV)': 1.4807},
'Ma': {'weight': 1.0,
'energy (keV)': 1.0991},
'Lb4': {'weight': 0.0874,
'energy (keV)': 6.4381},
'La': {'weight': 1.0,
'energy (keV)': 5.846},
'Ln': {'weight': 0.015,
'energy (keV)': 5.8171},
'Ll': {'weight': 0.04559,
'energy (keV)': 5.1769},
'Mb': {'weight': 0.87,
'energy (keV)': 1.15769},
'Mg': {'weight': 0.26,
'energy (keV)': 1.3474},
'Lb2': {'weight': 0.1985,
'energy (keV)': 6.8437},
'Lb3': {'weight': 0.1265,
'energy (keV)': 6.5714},
'Lg3': {'weight': 0.0318,
'energy (keV)': 7.7954},
'Lg1': {'weight': 0.08064,
'energy (keV)': 7.4839},
'Mz': {'weight': 0.06,
'energy (keV)': 0.8743}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1131.0,
'filename': 'Eu.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1161.0,
'filename': 'Eu.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1481.0,
'filename': 'Eu.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1614.0,
'filename': 'Eu.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 134.0,
'filename': 'Eu.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 134.0,
'filename': 'Eu.N5'}}},
'General_properties': {'Z': 63,
'atomic_weight': 151.964,
'name': 'europium'}},
'Zr': {'Physical_properties': {'density (g/cm^3)': 6.511},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.37912,
'energy (keV)': 2.1243},
'Kb': {'weight': 0.15,
'energy (keV)': 17.6671},
'Ka': {'weight': 1.0,
'energy (keV)': 15.7753},
'Lb2': {'weight': 0.0177,
'energy (keV)': 2.2223},
'La': {'weight': 1.0,
'energy (keV)': 2.0423},
'Ln': {'weight': 0.0153,
'energy (keV)': 1.8764},
'Ll': {'weight': 0.04209,
'energy (keV)': 1.792},
'Lb3': {'weight': 0.05219,
'energy (keV)': 2.2011},
'Lg3': {'weight': 0.0082,
'energy (keV)': 2.5029},
'Lg1': {'weight': 0.006,
'energy (keV)': 2.30268}},
'Binding_energies': {'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 344.0,
'filename': 'Zr.M3'},
'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 181.0,
'filename': 'Zr.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 181.0,
'filename': 'Zr.M5'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 2307.0,
'filename': 'Zr.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2222.0,
'filename': 'Zr.L3'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 330.0,
'filename': 'Zr.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 2532.0,
'filename': 'Zr.L1'}}},
'General_properties': {'Z': 40,
'atomic_weight': 91.224,
'name': 'zirconium'}},
'Er': {'Physical_properties': {'density (g/cm^3)': 9.066},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.45263,
'energy (keV)': 7.811},
'Kb': {'weight': 0.15,
'energy (keV)': 55.6737},
'Ka': {'weight': 1.0,
'energy (keV)': 49.1276},
'M2N4': {'weight': 0.0045,
'energy (keV)': 1.8291},
'Ma': {'weight': 1.0,
'energy (keV)': 1.405},
'Lb4': {'weight': 0.0922,
'energy (keV)': 7.7455},
'La': {'weight': 1.0,
'energy (keV)': 6.9486},
'Ln': {'weight': 0.0153,
'energy (keV)': 7.0578},
'Ll': {'weight': 0.0482,
'energy (keV)': 6.1514},
'Mb': {'weight': 0.59443,
'energy (keV)': 1.449},
'Mg': {'weight': 0.08505,
'energy (keV)': 1.6442},
'Lb2': {'weight': 0.2005,
'energy (keV)': 8.1903},
'Lb3': {'weight': 0.1258,
'energy (keV)': 7.9395},
'Lg3': {'weight': 0.0324,
'energy (keV)': 9.4313},
'Lg1': {'weight': 0.08487,
'energy (keV)': 9.0876},
'Mz': {'weight': 0.06,
'energy (keV)': 1.0893}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1409.0,
'filename': 'Er.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1453.0,
'filename': 'Er.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1812.0,
'filename': 'Er.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 2006.0,
'filename': 'Er.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 168.0,
'filename': 'Er.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 168.0,
'filename': 'Er.N5'}}},
'General_properties': {'Z': 68,
'atomic_weight': 167.259,
'name': 'erbium'}},
'Ni': {'Physical_properties': {'density (g/cm^3)': 8.908},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.1677,
'energy (keV)': 0.8683},
'Kb': {'weight': 0.1277,
'energy (keV)': 8.2647},
'Ka': {'weight': 1.0,
'energy (keV)': 7.4781},
'La': {'weight': 1.0,
'energy (keV)': 0.8511},
'Ln': {'weight': 0.09693,
'energy (keV)': 0.7601},
'Ll': {'weight': 0.14133,
'energy (keV)': 0.7429},
'Lb3': {'weight': 0.00199,
'energy (keV)': 0.94}},
'Binding_energies': {'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 68.0,
'filename': 'Ni.M3'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 872.0,
'filename': 'Ni.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 855.0,
'filename': 'Ni.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 68.0,
'filename': 'Ni.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1008.0,
'filename': 'Ni.L1'}}},
'General_properties': {'Z': 28,
'atomic_weight': 58.6934,
'name': 'nickel'}},
'Na': {'Physical_properties': {'density (g/cm^3)': 0.968},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.01,
'energy (keV)': 1.0721},
'Ka': {'weight': 1.0,
'energy (keV)': 1.041}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1072.0,
'filename': 'Na.K1'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 31.0,
'filename': 'Na.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 63.0,
'filename': 'Na.L1'}}},
'General_properties': {'Z': 11,
'atomic_weight': 22.98976928,
'name': 'sodium'}},
'Nb': {'Physical_properties': {'density (g/cm^3)': 8.57},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.32519,
'energy (keV)': 2.2573},
'Kb': {'weight': 0.15,
'energy (keV)': 18.6226},
'Ka': {'weight': 1.0,
'energy (keV)': 16.6151},
'Lb2': {'weight': 0.03299,
'energy (keV)': 2.3705},
'La': {'weight': 1.0,
'energy (keV)': 2.1659},
'Ln': {'weight': 0.0129,
'energy (keV)': 1.9963},
'Ll': {'weight': 0.04169,
'energy (keV)': 1.9021},
'Lb3': {'weight': 0.06429,
'energy (keV)': 2.3347},
'Lg3': {'weight': 0.0103,
'energy (keV)': 2.6638},
'Lg1': {'weight': 0.00975,
'energy (keV)': 2.4615}},
'Binding_energies': {'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 378.0,
'filename': 'Nb.M3'},
'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 202.3,
'filename': 'Nb.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 205.0,
'filename': 'Nb.M5'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 2465.0,
'filename': 'Nb.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2371.0,
'filename': 'Nb.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 363.0,
'filename': 'Nb.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 2698.0,
'filename': 'Nb.L1'}}},
'General_properties': {'Z': 41,
'atomic_weight': 92.90638,
'name': 'niobium'}},
'Nd': {'Physical_properties': {'density (g/cm^3)': 7.01},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.42672,
'energy (keV)': 5.722},
'Kb': {'weight': 0.15,
'energy (keV)': 42.2715},
'Ka': {'weight': 1.0,
'energy (keV)': 37.361},
'M2N4': {'weight': 0.052,
'energy (keV)': 1.2853},
'Ma': {'weight': 1.0,
'energy (keV)': 0.9402},
'Lb4': {'weight': 0.0858,
'energy (keV)': 5.7232},
'La': {'weight': 1.0,
'energy (keV)': 5.2302},
'Ln': {'weight': 0.01469,
'energy (keV)': 5.1462},
'Ll': {'weight': 0.04429,
'energy (keV)': 4.6326},
'Mb': {'weight': 0.99,
'energy (keV)': 0.9965},
'Mg': {'weight': 0.625,
'energy (keV)': 1.1799},
'Lb2': {'weight': 0.1957,
'energy (keV)': 6.0904},
'Lb3': {'weight': 0.12869,
'energy (keV)': 5.8286},
'Lg3': {'weight': 0.0318,
'energy (keV)': 6.9014},
'Lg1': {'weight': 0.07712,
'energy (keV)': 6.604},
'Mz': {'weight': 0.069,
'energy (keV)': 0.7531}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 978.0,
'filename': 'Nd.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1000.0,
'filename': 'Nd.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1297.0,
'filename': 'Nd.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1403.0,
'filename': 'Nd.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 118.0,
'filename': 'Nd.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 118.0,
'filename': 'Nd.N5'}}},
'General_properties': {'Z': 60,
'atomic_weight': 144.242,
'name': 'neodymium'}},
'Ne': {'Physical_properties': {'density (g/cm^3)': 0.0009},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.01,
'energy (keV)': 0.8669},
'Ka': {'weight': 1.0,
'energy (keV)': 0.8486}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 867.0,
'filename': 'Ne.K1'}}},
'General_properties': {'Z': 10,
'atomic_weight': 20.1791,
'name': 'neon'}},
'Fr': {'Physical_properties': {'density (g/cm^3)': 'NaN'},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.38327,
'energy (keV)': 14.7703},
'Kb': {'weight': 0.15,
'energy (keV)': 97.474},
'Ka': {'weight': 1.0,
'energy (keV)': 86.1058},
'M2N4': {'weight': 0.00674,
'energy (keV)': 3.7237},
'Lb4': {'weight': 0.0603,
'energy (keV)': 14.312},
'La': {'weight': 1.0,
'energy (keV)': 12.0315},
'Ln': {'weight': 0.0134,
'energy (keV)': 13.2545},
'Ll': {'weight': 0.06339,
'energy (keV)': 10.3792},
'Mb': {'weight': 0.64124,
'energy (keV)': 2.88971},
'Mg': {'weight': 0.21845,
'energy (keV)': 3.086},
'Lb2': {'weight': 0.2337,
'energy (keV)': 14.4542},
'Lg3': {'weight': 0.017,
'energy (keV)': 17.829},
'Lg1': {'weight': 0.08,
'energy (keV)': 17.3032},
'Lb3': {'weight': 0.05969,
'energy (keV)': 14.976},
'Mz': {'weight': 0.0058,
'energy (keV)': 2.1897}}},
'General_properties': {'Z': 87,
'atomic_weight': 223,
'name': 'francium'}},
'Fe': {'Physical_properties': {'density (g/cm^3)': 7.874},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.1272,
'energy (keV)': 7.058},
'Ka': {'weight': 1.0,
'energy (keV)': 6.4039},
'La': {'weight': 1.0,
'energy (keV)': 0.7045},
'Ln': {'weight': 0.12525,
'energy (keV)': 0.6282},
'Ll': {'weight': 0.3086,
'energy (keV)': 0.6152},
'Lb3': {'weight': 0.02448,
'energy (keV)': 0.7921}},
'Binding_energies': {'K': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 7113.0,
'filename': 'Fe.K1'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 846.0,
'filename': 'Fe.L1'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 721.0,
'filename': 'Fe.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 708.0,
'filename': 'Fe.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 57.0,
'filename': 'Fe.M3'},
'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 57.0,
'filename': 'Fe.M3'}}},
'General_properties': {'Z': 26,
'atomic_weight': 55.845,
'name': 'iron'}},
'B': {'Physical_properties': {'density (g/cm^3)': 2.46},
'Atomic_properties': {'Xray_lines': {'Ka': {'weight': 1.0,
'energy (keV)': 0.1833}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 188.0,
'filename': 'B.K1'}}},
'General_properties': {'Z': 5,
'atomic_weight': 10.811,
'name': 'boron'}},
'F': {'Physical_properties': {'density (g/cm^3)': 0.001696},
'Atomic_properties': {'Xray_lines': {'Ka': {'weight': 1.0,
'energy (keV)': 0.6768}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 685.0,
'filename': 'F.K1'}}},
'General_properties': {'Z': 9,
'atomic_weight': 18.9984032,
'name': 'fluorine'}},
'Sr': {'Physical_properties': {'density (g/cm^3)': 2.63},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.37975,
'energy (keV)': 1.8718},
'Kb': {'weight': 0.15,
'energy (keV)': 15.8355},
'Ka': {'weight': 1.0,
'energy (keV)': 14.165},
'La': {'weight': 1.0,
'energy (keV)': 1.8065},
'Ln': {'weight': 0.01669,
'energy (keV)': 1.6493},
'Ll': {'weight': 0.04309,
'energy (keV)': 1.5821},
'Lb3': {'weight': 0.047,
'energy (keV)': 1.9472},
'Lg3': {'weight': 0.0065,
'energy (keV)': 2.1964}},
'Binding_energies': {'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 280.0,
'filename': 'Sr.M3'},
'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 134.0,
'filename': 'Sr.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 134.0,
'filename': 'Sr.M5'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 2007.0,
'filename': 'Sr.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1940.0,
'filename': 'Sr.L3'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 269.0,
'filename': 'Sr.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 2216.0,
'filename': 'Sr.L1'}}},
'General_properties': {'Z': 38,
'atomic_weight': 87.62,
'name': 'strontium'}},
'N': {'Physical_properties': {'density (g/cm^3)': 0.001251},
'Atomic_properties': {'Xray_lines': {'Ka': {'weight': 1.0,
'energy (keV)': 0.3924}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 401.0,
'filename': 'N.K1'}}},
'General_properties': {'Z': 7,
'atomic_weight': 14.0067,
'name': 'nitrogen'}},
'Kr': {'Physical_properties': {'density (g/cm^3)': 0.00375},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.39031,
'energy (keV)': 1.6383},
'Kb': {'weight': 0.1538,
'energy (keV)': 14.1118},
'Ka': {'weight': 1.0,
'energy (keV)': 12.6507},
'La': {'weight': 1.0,
'energy (keV)': 1.586},
'Ln': {'weight': 0.0175,
'energy (keV)': 1.43887},
'Ll': {'weight': 0.04509,
'energy (keV)': 1.38657},
'Lb3': {'weight': 0.0465,
'energy (keV)': 1.7072},
'Lg3': {'weight': 0.005,
'energy (keV)': 1.921}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 89.0,
'filename': 'Kr.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 89.0,
'filename': 'Kr.M5'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 1727.0,
'filename': 'Kr.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1675.0,
'filename': 'Kr.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1921.0,
'filename': 'Kr.L1'}}},
'General_properties': {'Z': 36,
'atomic_weight': 83.798,
'name': 'krypton'}},
'Si': {'Physical_properties': {'density (g/cm^3)': 2.33},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.02779,
'energy (keV)': 1.8389},
'Ka': {'weight': 1.0,
'energy (keV)': 1.7397}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1839.0,
'filename': 'Si.K1'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 99.8,
'filename': 'Si.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 99.2,
'filename': 'Si.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 149.7,
'filename': 'Si.L1'}}},
'General_properties': {'Z': 14,
'atomic_weight': 28.0855,
'name': 'silicon'}},
'Sn': {'Physical_properties': {'density (g/cm^3)': 7.31},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.43456,
'energy (keV)': 3.6628},
'Kb': {'weight': 0.15,
'energy (keV)': 28.4857},
'Ka': {'weight': 1.0,
'energy (keV)': 25.2713},
'Lb2': {'weight': 0.14689,
'energy (keV)': 3.9049},
'Lb4': {'weight': 0.0948,
'energy (keV)': 3.7083},
'La': {'weight': 1.0,
'energy (keV)': 3.444},
'Ln': {'weight': 0.0158,
'energy (keV)': 3.2723},
'Ll': {'weight': 0.0416,
'energy (keV)': 3.045},
'Lb3': {'weight': 0.1547,
'energy (keV)': 3.7503},
'Lg3': {'weight': 0.0321,
'energy (keV)': 4.3761},
'Lg1': {'weight': 0.058,
'energy (keV)': 4.1322}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 485.0,
'filename': 'Sn.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 494.0,
'filename': 'Sn.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 714.0,
'filename': 'Sn.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 756.0,
'filename': 'Sn.M3'}}},
'General_properties': {'Z': 50,
'atomic_weight': 118.71,
'name': 'tin'}},
'Sm': {'Physical_properties': {'density (g/cm^3)': 7.353},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.43512,
'energy (keV)': 6.2058},
'Kb': {'weight': 0.15,
'energy (keV)': 45.4144},
'Ka': {'weight': 1.0,
'energy (keV)': 40.118},
'M2N4': {'weight': 0.012,
'energy (keV)': 1.4117},
'Ma': {'weight': 1.0,
'energy (keV)': 1.0428},
'Lb4': {'weight': 0.08689,
'energy (keV)': 6.1961},
'La': {'weight': 1.0,
'energy (keV)': 5.636},
'Ln': {'weight': 0.01489,
'energy (keV)': 5.589},
'Ll': {'weight': 0.04519,
'energy (keV)': 4.9934},
'Mb': {'weight': 0.88,
'energy (keV)': 1.1005},
'Mg': {'weight': 0.26,
'energy (keV)': 1.2908},
'Lb2': {'weight': 0.19769,
'energy (keV)': 6.5872},
'Lb3': {'weight': 0.12669,
'energy (keV)': 6.317},
'Lg3': {'weight': 0.0318,
'energy (keV)': 7.4894},
'Lg1': {'weight': 0.07951,
'energy (keV)': 7.1828},
'Mz': {'weight': 0.06,
'energy (keV)': 0.8328}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1080.0,
'filename': 'Sm.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1106.0,
'filename': 'Sm.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1420.0,
'filename': 'Sm.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1541.0,
'filename': 'Sm.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 130.0,
'filename': 'Sm.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 130.0,
'filename': 'Sm.N5'}}},
'General_properties': {'Z': 62,
'atomic_weight': 150.36,
'name': 'samarium'}},
'V': {'Physical_properties': {'density (g/cm^3)': 6.11},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.1225,
'energy (keV)': 5.4273},
'Ka': {'weight': 1.0,
'energy (keV)': 4.9522},
'La': {'weight': 1.0,
'energy (keV)': 0.5129},
'Ln': {'weight': 0.2805,
'energy (keV)': 0.454},
'Ll': {'weight': 0.5745,
'energy (keV)': 0.4464},
'Lb3': {'weight': 0.0154,
'energy (keV)': 0.5904}},
'Binding_energies': {'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 47.0,
'filename': 'V.M3'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 521.0,
'filename': 'V.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 513.0,
'filename': 'V.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 47.0,
'filename': 'V.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 628.0,
'filename': 'V.L1'}}},
'General_properties': {'Z': 23,
'atomic_weight': 50.9415,
'name': 'vanadium'}},
'Sc': {'Physical_properties': {'density (g/cm^3)': 2.985},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.12839,
'energy (keV)': 4.4605},
'Ka': {'weight': 1.0,
'energy (keV)': 4.0906},
'La': {'weight': 0.308,
'energy (keV)': 0.4022},
'Ln': {'weight': 0.488,
'energy (keV)': 0.3529},
'Ll': {'weight': 1.0,
'energy (keV)': 0.3484},
'Lb3': {'weight': 0.037,
'energy (keV)': 0.4681}},
'Binding_energies': {'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 32.0,
'filename': 'Sc.M3'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 407.0,
'filename': 'Sc.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 402.0,
'filename': 'Sc.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 32.0,
'filename': 'Sc.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 500.0,
'filename': 'Sc.L1'}}},
'General_properties': {'Z': 21,
'atomic_weight': 44.955912,
'name': 'scandium'}},
'Sb': {'Physical_properties': {'density (g/cm^3)': 6.697},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4276,
'energy (keV)': 3.8435},
'Kb': {'weight': 0.15,
'energy (keV)': 29.7256},
'Ka': {'weight': 1.0,
'energy (keV)': 26.359},
'Lb2': {'weight': 0.1556,
'energy (keV)': 4.1008},
'Lb4': {'weight': 0.0932,
'energy (keV)': 3.8864},
'La': {'weight': 1.0,
'energy (keV)': 3.6047},
'Ln': {'weight': 0.0155,
'energy (keV)': 3.4367},
'Ll': {'weight': 0.0419,
'energy (keV)': 3.1885},
'Lb3': {'weight': 0.15099,
'energy (keV)': 3.9327},
'Lg3': {'weight': 0.0321,
'energy (keV)': 4.5999},
'Lg1': {'weight': 0.06064,
'energy (keV)': 4.349}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 528.0,
'filename': 'Sb.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 537.0,
'filename': 'Sb.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 766.0,
'filename': 'Sb.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 812.0,
'filename': 'Sb.M3'}}},
'General_properties': {'Z': 51,
'atomic_weight': 121.76,
'name': 'antimony'}},
'Se': {'Physical_properties': {'density (g/cm^3)': 4.819},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.38848,
'energy (keV)': 1.4195},
'Kb': {'weight': 0.1505,
'energy (keV)': 12.4959},
'Ka': {'weight': 1.0,
'energy (keV)': 11.222},
'La': {'weight': 1.0,
'energy (keV)': 1.3791},
'Ln': {'weight': 0.0187,
'energy (keV)': 1.2447},
'Ll': {'weight': 0.04759,
'energy (keV)': 1.2043},
'Lb3': {'weight': 0.047,
'energy (keV)': 1.492}},
'Binding_energies': {'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 1476.0,
'filename': 'Se.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1436.0,
'filename': 'Se.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1654.0,
'filename': 'Se.L1'}}},
'General_properties': {'Z': 34,
'atomic_weight': 78.96,
'name': 'selenium'}},
'Co': {'Physical_properties': {'density (g/cm^3)': 8.9},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.1277,
'energy (keV)': 7.6494},
'Ka': {'weight': 1.0,
'energy (keV)': 6.9303},
'La': {'weight': 1.0,
'energy (keV)': 0.7757},
'Ln': {'weight': 0.0833,
'energy (keV)': 0.6929},
'Ll': {'weight': 0.2157,
'energy (keV)': 0.6779},
'Lb3': {'weight': 0.0238,
'energy (keV)': 0.8661}},
'Binding_energies': {'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 62.0,
'filename': 'Co.M3'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 794.0,
'filename': 'Co.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 779.0,
'filename': 'Co.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 62.0,
'filename': 'Co.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 926.0,
'filename': 'Co.L1'}}},
'General_properties': {'Z': 27,
'atomic_weight': 58.933195,
'name': 'cobalt'}},
'Cl': {'Physical_properties': {'density (g/cm^3)': 0.003214},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.0838,
'energy (keV)': 2.8156},
'Ka': {'weight': 1.0,
'energy (keV)': 2.6224}},
'Binding_energies': {'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 200.0,
'filename': 'Cl.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 270.0,
'filename': 'Cl.L1'}}},
'General_properties': {'Z': 17,
'atomic_weight': 35.453,
'name': 'chlorine'}},
'Ca': {'Physical_properties': {'density (g/cm^3)': 1.55},
'Atomic_properties': {'Xray_lines': {'Ln': {'weight': 0.23,
'energy (keV)': 0.3063},
'Kb': {'weight': 0.112,
'energy (keV)': 4.0127},
'Ka': {'weight': 1.0,
'energy (keV)': 3.6917},
'Ll': {'weight': 1.0,
'energy (keV)': 0.3027},
'La': {'weight': 0.0,
'energy (keV)': 0.3464}},
'Binding_energies': {'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 350.0,
'filename': 'Ca.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 346.0,
'filename': 'Ca.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 438.0,
'filename': 'Ca.L1'}}},
'General_properties': {'Z': 20,
'atomic_weight': 40.078,
'name': 'calcium'}},
'Ce': {'Physical_properties': {'density (g/cm^3)': 6.689},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.43,
'energy (keV)': 5.2629},
'Kb': {'weight': 0.15,
'energy (keV)': 39.2576},
'Ka': {'weight': 1.0,
'energy (keV)': 34.7196},
'M2N4': {'weight': 0.08,
'energy (keV)': 1.1628},
'Ma': {'weight': 1.0,
'energy (keV)': 0.8455},
'Lb4': {'weight': 0.08699,
'energy (keV)': 5.276},
'La': {'weight': 1.0,
'energy (keV)': 4.8401},
'Ln': {'weight': 0.015,
'energy (keV)': 4.7296},
'Ll': {'weight': 0.0436,
'energy (keV)': 4.2888},
'Mb': {'weight': 0.91,
'energy (keV)': 0.8154},
'Mg': {'weight': 0.5,
'energy (keV)': 1.0754},
'Lb2': {'weight': 0.19399,
'energy (keV)': 5.6134},
'Lb3': {'weight': 0.1325,
'energy (keV)': 5.3634},
'Lg3': {'weight': 0.0324,
'energy (keV)': 6.3416},
'Lg1': {'weight': 0.0764,
'energy (keV)': 6.0542},
'Mz': {'weight': 0.07,
'energy (keV)': 0.6761}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 883.0,
'filename': 'Ce.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 901.0,
'filename': 'Ce.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1185.0,
'filename': 'Ce.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1273.0,
'filename': 'Ce.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 110.0,
'filename': 'Ce.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 110.0,
'filename': 'Ce.N5'}}},
'General_properties': {'Z': 58,
'atomic_weight': 140.116,
'name': 'cerium'}},
'Xe': {'Physical_properties': {'density (g/cm^3)': 0.0059},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.42248,
'energy (keV)': 4.4183},
'Kb': {'weight': 0.15,
'energy (keV)': 33.6244},
'Ka': {'weight': 1.0,
'energy (keV)': 29.7792},
'Lb2': {'weight': 0.17699,
'energy (keV)': 4.7182},
'Lb4': {'weight': 0.08929,
'energy (keV)': 4.4538},
'La': {'weight': 1.0,
'energy (keV)': 4.1099},
'Ln': {'weight': 0.015,
'energy (keV)': 3.9591},
'Ll': {'weight': 0.0424,
'energy (keV)': 3.6376},
'Lb3': {'weight': 0.14119,
'energy (keV)': 4.5158},
'Lg3': {'weight': 0.0323,
'energy (keV)': 5.3061},
'Lg1': {'weight': 0.06848,
'energy (keV)': 5.0397}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 672.0,
'filename': 'Xe.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 685.0,
'filename': 'Xe.M5'}}},
'General_properties': {'Z': 54,
'atomic_weight': 131.293,
'name': 'xenon'}},
'Tm': {'Physical_properties': {'density (g/cm^3)': 9.321},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.45831,
'energy (keV)': 8.1023},
'Kb': {'weight': 0.15,
'energy (keV)': 57.5051},
'Ka': {'weight': 1.0,
'energy (keV)': 50.7416},
'M2N4': {'weight': 0.01,
'energy (keV)': 1.9102},
'Ma': {'weight': 1.0,
'energy (keV)': 1.4624},
'Lb4': {'weight': 0.09449,
'energy (keV)': 8.0259},
'La': {'weight': 1.0,
'energy (keV)': 7.1803},
'Ln': {'weight': 0.0156,
'energy (keV)': 7.3101},
'Ll': {'weight': 0.04889,
'energy (keV)': 6.3412},
'Mb': {'weight': 0.59443,
'energy (keV)': 1.5093},
'Mg': {'weight': 0.08505,
'energy (keV)': 1.7049},
'Lb2': {'weight': 0.20059,
'energy (keV)': 8.4684},
'Lb3': {'weight': 0.1273,
'energy (keV)': 8.2312},
'Lg3': {'weight': 0.0329,
'energy (keV)': 9.7791},
'Lg1': {'weight': 0.08615,
'energy (keV)': 9.4373},
'Mz': {'weight': 0.06,
'energy (keV)': 1.1311}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1468.0,
'filename': 'Tm.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1515.0,
'filename': 'Tm.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1884.0,
'filename': 'Tm.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 2090.0,
'filename': 'Tm.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 177.0,
'filename': 'Tm.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 177.0,
'filename': 'Tm.N5'}}},
'General_properties': {'Z': 69,
'atomic_weight': 168.93421,
'name': 'thulium'}},
'Cs': {'Physical_properties': {'density (g/cm^3)': 1.879},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.42983,
'energy (keV)': 4.6199},
'Kb': {'weight': 0.15,
'energy (keV)': 34.987},
'Ka': {'weight': 1.0,
'energy (keV)': 30.9727},
'Lb2': {'weight': 0.19589,
'energy (keV)': 4.9354},
'Lb4': {'weight': 0.08869,
'energy (keV)': 4.6493},
'La': {'weight': 1.0,
'energy (keV)': 4.2864},
'Ln': {'weight': 0.0152,
'energy (keV)': 4.1423},
'Ll': {'weight': 0.04269,
'energy (keV)': 3.7948},
'Lb3': {'weight': 0.1399,
'energy (keV)': 4.7167},
'Lg3': {'weight': 0.0325,
'energy (keV)': 5.5527},
'Lg1': {'weight': 0.07215,
'energy (keV)': 5.2806}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 726.0,
'filename': 'Cs.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 740.0,
'filename': 'Cs.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 998.0,
'filename': 'Cs.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1065.0,
'filename': 'Cs.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 78.0,
'filename': 'Cs.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 78.0,
'filename': 'Cs.N5'}}},
'General_properties': {'Z': 55,
'atomic_weight': 132.9054519,
'name': 'cesium'}},
'Cr': {'Physical_properties': {'density (g/cm^3)': 7.14},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.134,
'energy (keV)': 5.9467},
'Ka': {'weight': 1.0,
'energy (keV)': 5.4147},
'La': {'weight': 1.0,
'energy (keV)': 0.5722},
'Ln': {'weight': 0.2353,
'energy (keV)': 0.5096},
'Ll': {'weight': 0.6903,
'energy (keV)': 0.5004},
'Lb3': {'weight': 0.0309,
'energy (keV)': 0.6521}},
'Binding_energies': {'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 48.0,
'filename': 'Cr.M3'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 584.0,
'filename': 'Cr.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 575.0,
'filename': 'Cr.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 48.0,
'filename': 'Cr.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 695.0,
'filename': 'Cr.L1'}}},
'General_properties': {'Z': 24,
'atomic_weight': 51.9961,
'name': 'chromium'}},
'Cu': {'Physical_properties': {'density (g/cm^3)': 8.92},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.03197,
'energy (keV)': 0.9494},
'Kb': {'weight': 0.13157,
'energy (keV)': 8.9053},
'Ka': {'weight': 1.0,
'energy (keV)': 8.0478},
'La': {'weight': 1.0,
'energy (keV)': 0.9295},
'Ln': {'weight': 0.01984,
'energy (keV)': 0.8312},
'Ll': {'weight': 0.08401,
'energy (keV)': 0.8113},
'Lb3': {'weight': 0.00114,
'energy (keV)': 1.0225}},
'Binding_energies': {'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 74.0,
'filename': 'Cu.M3'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 951.0,
'filename': 'Cu.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 931.0,
'filename': 'Cu.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 74.0,
'filename': 'Cu.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1096.0,
'filename': 'Cu.L1'}}},
'General_properties': {'Z': 29,
'atomic_weight': 63.546,
'name': 'copper'}},
'La': {'Physical_properties': {'density (g/cm^3)': 6.146},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.42631,
'energy (keV)': 5.0421},
'Kb': {'weight': 0.15,
'energy (keV)': 37.8012},
'Ka': {'weight': 1.0,
'energy (keV)': 33.4419},
'M2N4': {'weight': 0.022,
'energy (keV)': 1.1055},
'Ma': {'weight': 1.0,
'energy (keV)': 0.8173},
'Lb4': {'weight': 0.0872,
'energy (keV)': 5.0619},
'La': {'weight': 1.0,
'energy (keV)': 4.651},
'Ln': {'weight': 0.015,
'energy (keV)': 4.5293},
'Ll': {'weight': 0.0432,
'energy (keV)': 4.1214},
'Mb': {'weight': 0.9,
'energy (keV)': 0.8162},
'Mg': {'weight': 0.4,
'energy (keV)': 1.0245},
'Lb2': {'weight': 0.19579,
'energy (keV)': 5.3838},
'Lb3': {'weight': 0.1341,
'energy (keV)': 5.1429},
'Lg3': {'weight': 0.0329,
'energy (keV)': 6.0749},
'Lg1': {'weight': 0.07656,
'energy (keV)': 5.7917},
'Mz': {'weight': 0.06,
'energy (keV)': 0.6403}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 832.0,
'filename': 'La.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 849.0,
'filename': 'La.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1123.0,
'filename': 'La.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1204.0,
'filename': 'La.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 99.0,
'filename': 'La.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 99.0,
'filename': 'La.N5'}}},
'General_properties': {'Z': 57,
'atomic_weight': 138.90547,
'name': 'lanthanum'}},
'Li': {'Physical_properties': {'density (g/cm^3)': 0.534},
'Atomic_properties': {'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 55.0,
'filename': 'Li.K1'}}},
'General_properties': {'atomic_weight': 6.939,
'Z': 3,
'name': 'lithium'}},
'Tl': {'Physical_properties': {'density (g/cm^3)': 11.85},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.39112,
'energy (keV)': 12.2128},
'Kb': {'weight': 0.15,
'energy (keV)': 82.5738},
'Ka': {'weight': 1.0,
'energy (keV)': 72.8729},
'M2N4': {'weight': 0.00863,
'energy (keV)': 3.0091},
'Ma': {'weight': 1.0,
'energy (keV)': 2.2708},
'Lb4': {'weight': 0.05419,
'energy (keV)': 11.931},
'La': {'weight': 1.0,
'energy (keV)': 10.2682},
'Ln': {'weight': 0.0134,
'energy (keV)': 10.9938},
'M3O4': {'energy (keV)': 2.9413,
'weight': 0.005},
'Ll': {'weight': 0.0578,
'energy (keV)': 8.9534},
'Mb': {'weight': 0.64124,
'energy (keV)': 2.3623},
'Mg': {'weight': 0.21845,
'energy (keV)': 2.5704},
'Lb2': {'weight': 0.22289,
'energy (keV)': 12.2713},
'Lb3': {'weight': 0.0607,
'energy (keV)': 12.3901},
'M3O5': {'energy (keV)': 2.9435,
'weight': 0.01},
'Lg3': {'weight': 0.0175,
'energy (keV)': 14.7377},
'Lg1': {'weight': 0.08304,
'energy (keV)': 14.2913},
'Mz': {'weight': 0.0058,
'energy (keV)': 1.7803}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2389.0,
'filename': 'Tl.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 2485.0,
'filename': 'Tl.M5'}}},
'General_properties': {'Z': 81,
'atomic_weight': 204.3833,
'name': 'thallium'}},
'Lu': {'Physical_properties': {'density (g/cm^3)': 9.841},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.46975,
'energy (keV)': 8.7092},
'Kb': {'weight': 0.15,
'energy (keV)': 61.2902},
'Ka': {'weight': 1.0,
'energy (keV)': 54.0697},
'M2N4': {'weight': 0.01,
'energy (keV)': 2.0587},
'Ma': {'weight': 1.0,
'energy (keV)': 1.5816},
'Lb4': {'weight': 0.0996,
'energy (keV)': 8.6069},
'La': {'weight': 1.0,
'energy (keV)': 7.6556},
'Ln': {'weight': 0.016,
'energy (keV)': 7.8574},
'Ll': {'weight': 0.05009,
'energy (keV)': 6.7529},
'Mb': {'weight': 0.59443,
'energy (keV)': 1.6325},
'Mg': {'weight': 0.08505,
'energy (keV)': 1.8286},
'Lb2': {'weight': 0.20359,
'energy (keV)': 9.0491},
'Lb3': {'weight': 0.13099,
'energy (keV)': 8.8468},
'Lg3': {'weight': 0.0342,
'energy (keV)': 10.5111},
'Lg1': {'weight': 0.08968,
'energy (keV)': 10.1438},
'Mz': {'weight': 0.06,
'energy (keV)': 1.2292}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1588.0,
'filename': 'Lu.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1639.0,
'filename': 'Lu.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 2024.0,
'filename': 'Lu.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 2263.0,
'filename': 'Lu.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 195.0,
'filename': 'Lu.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 195.0,
'filename': 'Lu.N5'}}},
'General_properties': {'Z': 71,
'atomic_weight': 174.9668,
'name': 'lutetium'}},
'Th': {'Physical_properties': {'density (g/cm^3)': 11.724},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4,
'energy (keV)': 16.2024},
'Kb': {'weight': 0.15,
'energy (keV)': 105.6049},
'Ka': {'weight': 1.0,
'energy (keV)': 93.3507},
'M2N4': {'weight': 0.00674,
'energy (keV)': 4.1163},
'Ma': {'weight': 1.0,
'energy (keV)': 2.9968},
'Lb4': {'weight': 0.05,
'energy (keV)': 15.6417},
'La': {'weight': 1.0,
'energy (keV)': 12.9683},
'Ln': {'weight': 0.0134,
'energy (keV)': 14.5109},
'M3O4': {'energy (keV)': 3.9518,
'weight': 0.01},
'Ll': {'weight': 0.06709,
'energy (keV)': 11.118},
'Mb': {'weight': 0.64124,
'energy (keV)': 3.1464},
'Mg': {'weight': 0.33505,
'energy (keV)': 3.3697},
'Lb2': {'weight': 0.236,
'energy (keV)': 15.6239},
'Lb3': {'weight': 0.06,
'energy (keV)': 16.426},
'M3O5': {'energy (keV)': 3.9582,
'weight': 0.01},
'Lg3': {'weight': 0.017,
'energy (keV)': 19.5048},
'Lg1': {'weight': 0.08,
'energy (keV)': 18.9791},
'Mz': {'weight': 0.03512,
'energy (keV)': 2.3647}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 3332.0,
'filename': 'Th.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 3491.0,
'filename': 'Th.M5'},
'O5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 83.0,
'filename': 'Th.O5'},
'O4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 83.0,
'filename': 'Th.O5'}}},
'General_properties': {'Z': 90,
'atomic_weight': 232.03806,
'name': 'thorium'}},
'Ti': {'Physical_properties': {'density (g/cm^3)': 4.507},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.11673,
'energy (keV)': 4.9318},
'Ka': {'weight': 1.0,
'energy (keV)': 4.5109},
'La': {'weight': 0.694,
'energy (keV)': 0.4555},
'Ln': {'weight': 0.491,
'energy (keV)': 0.4012},
'Ll': {'weight': 1.0,
'energy (keV)': 0.3952},
'Lb3': {'weight': 0.166,
'energy (keV)': 0.5291}},
'Binding_energies': {'M2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 47.0,
'filename': 'Ti.M3'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 462.0,
'filename': 'Ti.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 456.0,
'filename': 'Ti.L3'},
'M3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 47.0,
'filename': 'Ti.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 564.0,
'filename': 'Ti.L1'}}},
'General_properties': {'Z': 22,
'atomic_weight': 47.867,
'name': 'titanium'}},
'Te': {'Physical_properties': {'density (g/cm^3)': 6.24},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.43183,
'energy (keV)': 4.0295},
'Kb': {'weight': 0.15,
'energy (keV)': 30.9951},
'Ka': {'weight': 1.0,
'energy (keV)': 27.4724},
'Lb2': {'weight': 0.16269,
'energy (keV)': 4.3016},
'Lb4': {'weight': 0.0906,
'energy (keV)': 4.0695},
'La': {'weight': 1.0,
'energy (keV)': 3.7693},
'Ln': {'weight': 0.0154,
'energy (keV)': 3.606},
'Ll': {'weight': 0.0419,
'energy (keV)': 3.3354},
'Lb3': {'weight': 0.1458,
'energy (keV)': 4.1205},
'Lg3': {'weight': 0.0317,
'energy (keV)': 4.829},
'Lg1': {'weight': 0.06375,
'energy (keV)': 4.5722}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 572.0,
'filename': 'Te.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 582.0,
'filename': 'Te.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 819.0,
'filename': 'Te.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 870.0,
'filename': 'Te.M3'}}},
'General_properties': {'Z': 52,
'atomic_weight': 127.6,
'name': 'tellurium'}},
'Tb': {'Physical_properties': {'density (g/cm^3)': 8.219},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.44104,
'energy (keV)': 6.9766},
'Kb': {'weight': 0.15,
'energy (keV)': 50.3844},
'Ka': {'weight': 1.0,
'energy (keV)': 44.4817},
'M2N4': {'weight': 0.014,
'energy (keV)': 1.6207},
'Ma': {'weight': 1.0,
'energy (keV)': 1.2326},
'Lb4': {'weight': 0.0874,
'energy (keV)': 6.9403},
'La': {'weight': 1.0,
'energy (keV)': 6.2728},
'Ln': {'weight': 0.01479,
'energy (keV)': 6.2841},
'Ll': {'weight': 0.0465,
'energy (keV)': 5.5465},
'Mb': {'weight': 0.78,
'energy (keV)': 1.2656},
'Mg': {'weight': 0.2615,
'energy (keV)': 1.4643},
'Lb2': {'weight': 0.19929,
'energy (keV)': 7.367},
'Lb3': {'weight': 0.124,
'energy (keV)': 7.0967},
'Lg3': {'weight': 0.0315,
'energy (keV)': 8.423},
'Lg1': {'weight': 0.08168,
'energy (keV)': 8.1046},
'Mz': {'weight': 0.06,
'energy (keV)': 0.9562}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1241.0,
'filename': 'Tb.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1275.0,
'filename': 'Tb.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1611.0,
'filename': 'Tb.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1768.0,
'filename': 'Tb.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 148.0,
'filename': 'Tb.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 148.0,
'filename': 'Tb.N5'}}},
'General_properties': {'Z': 65,
'atomic_weight': 158.92535,
'name': 'terbium'}},
'Tc': {'Physical_properties': {'density (g/cm^3)': 11.5},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.32951,
'energy (keV)': 2.5368},
'Kb': {'weight': 0.15,
'energy (keV)': 20.619},
'Ka': {'weight': 1.0,
'energy (keV)': 18.3671},
'Lb2': {'weight': 0.05839,
'energy (keV)': 2.67017},
'La': {'weight': 1.0,
'energy (keV)': 2.424},
'Ln': {'weight': 0.0127,
'energy (keV)': 2.2456},
'Ll': {'weight': 0.0412,
'energy (keV)': 2.1293},
'Lb3': {'weight': 0.0644,
'energy (keV)': 2.6175},
'Lg3': {'weight': 0.0111,
'energy (keV)': 3.0036},
'Lg1': {'weight': 0.01744,
'energy (keV)': 2.78619}}},
'General_properties': {'Z': 43,
'atomic_weight': 98,
'name': 'technetium'}},
'Ta': {'Physical_properties': {'density (g/cm^3)': 16.65},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.46248,
'energy (keV)': 9.3429},
'Kb': {'weight': 0.15,
'energy (keV)': 65.2224},
'Ka': {'weight': 1.0,
'energy (keV)': 57.5353},
'M2N4': {'weight': 0.01,
'energy (keV)': 2.2274},
'Ma': {'weight': 1.0,
'energy (keV)': 1.7101},
'Lb4': {'weight': 0.10449,
'energy (keV)': 9.2128},
'La': {'weight': 1.0,
'energy (keV)': 8.146},
'Ln': {'weight': 0.0158,
'energy (keV)': 8.4281},
'M3O4': {'energy (keV)': 2.1883,
'weight': 0.0001},
'Ll': {'weight': 0.0515,
'energy (keV)': 7.1731},
'Mb': {'weight': 0.59443,
'energy (keV)': 1.7682},
'Mg': {'weight': 0.08505,
'energy (keV)': 1.9647},
'Lb2': {'weight': 0.2076,
'energy (keV)': 9.6518},
'Lb3': {'weight': 0.1333,
'energy (keV)': 9.4875},
'M3O5': {'energy (keV)': 2.194,
'weight': 0.01},
'Lg3': {'weight': 0.0354,
'energy (keV)': 11.277},
'Lg1': {'weight': 0.09071,
'energy (keV)': 10.8948},
'Mz': {'weight': 0.01344,
'energy (keV)': 1.3306}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1735.0,
'filename': 'Ta.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1793.0,
'filename': 'Ta.M5'}}},
'General_properties': {'Z': 73,
'atomic_weight': 180.94788,
'name': 'tantalum'}},
'Yb': {'Physical_properties': {'density (g/cm^3)': 6.57},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.46224,
'energy (keV)': 8.4019},
'Kb': {'weight': 0.15,
'energy (keV)': 59.3825},
'Ka': {'weight': 1.0,
'energy (keV)': 52.3887},
'M2N4': {'weight': 0.01,
'energy (keV)': 1.9749},
'Ma': {'weight': 1.0,
'energy (keV)': 1.5215},
'Lb4': {'weight': 0.09589,
'energy (keV)': 8.3134},
'La': {'weight': 1.0,
'energy (keV)': 7.4158},
'Ln': {'weight': 0.0157,
'energy (keV)': 7.5801},
'Ll': {'weight': 0.0494,
'energy (keV)': 6.5455},
'Mb': {'weight': 0.59443,
'energy (keV)': 1.57},
'Mg': {'weight': 0.08505,
'energy (keV)': 1.7649},
'Lb2': {'weight': 0.2017,
'energy (keV)': 8.7587},
'Lb3': {'weight': 0.12789,
'energy (keV)': 8.5366},
'Lg3': {'weight': 0.0331,
'energy (keV)': 10.1429},
'Lg1': {'weight': 0.08728,
'energy (keV)': 9.7801},
'Mz': {'weight': 0.06,
'energy (keV)': 1.1843}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1528.0,
'filename': 'Yb.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1576.0,
'filename': 'Yb.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1950.0,
'filename': 'Yb.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 2173.0,
'filename': 'Yb.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 184.0,
'filename': 'Yb.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 184.0,
'filename': 'Yb.N5'}}},
'General_properties': {'Z': 70,
'atomic_weight': 173.054,
'name': 'ytterbium'}},
'Dy': {'Physical_properties': {'density (g/cm^3)': 8.551},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.444,
'energy (keV)': 7.2481},
'Kb': {'weight': 0.15,
'energy (keV)': 52.1129},
'Ka': {'weight': 1.0,
'energy (keV)': 45.9984},
'M2N4': {'weight': 0.008,
'energy (keV)': 1.6876},
'Ma': {'weight': 1.0,
'energy (keV)': 1.2907},
'Lb4': {'weight': 0.0891,
'energy (keV)': 7.204},
'La': {'weight': 1.0,
'energy (keV)': 6.4952},
'Ln': {'weight': 0.01489,
'energy (keV)': 6.5338},
'Ll': {'weight': 0.0473,
'energy (keV)': 5.7433},
'Mb': {'weight': 0.76,
'energy (keV)': 1.3283},
'Mg': {'weight': 0.08505,
'energy (keV)': 1.5214},
'Lb2': {'weight': 0.2,
'energy (keV)': 7.6359},
'Lb3': {'weight': 0.12529,
'energy (keV)': 7.3702},
'Lg3': {'weight': 0.0319,
'energy (keV)': 8.7529},
'Lg1': {'weight': 0.08295,
'energy (keV)': 8.4264},
'Mz': {'weight': 0.06,
'energy (keV)': 1.002}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1295.0,
'filename': 'Dy.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 1332.0,
'filename': 'Dy.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1676.0,
'filename': 'Dy.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 1842.0,
'filename': 'Dy.M3'},
'N4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 154.0,
'filename': 'Dy.N5'},
'N5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 154.0,
'filename': 'Dy.N5'}}},
'General_properties': {'Z': 66,
'atomic_weight': 162.5,
'name': 'dysprosium'}},
'I': {'Physical_properties': {'density (g/cm^3)': 4.94},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.43087,
'energy (keV)': 4.2208},
'Kb': {'weight': 0.15,
'energy (keV)': 32.2948},
'Ka': {'weight': 1.0,
'energy (keV)': 28.6123},
'Lb2': {'weight': 0.17059,
'energy (keV)': 4.5075},
'Lb4': {'weight': 0.09189,
'energy (keV)': 4.2576},
'La': {'weight': 1.0,
'energy (keV)': 3.9377},
'Ln': {'weight': 0.0154,
'energy (keV)': 3.78},
'Ll': {'weight': 0.0423,
'energy (keV)': 3.485},
'Lb3': {'weight': 0.1464,
'energy (keV)': 4.3135},
'Lg3': {'weight': 0.0327,
'energy (keV)': 5.0654},
'Lg1': {'weight': 0.06704,
'energy (keV)': 4.8025}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 620.0,
'filename': 'I.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 631.0,
'filename': 'I.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 875.0,
'filename': 'I.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 930.0,
'filename': 'I.M3'}}},
'General_properties': {'Z': 53,
'atomic_weight': 126.90447,
'name': 'iodine'}},
'U': {'Physical_properties': {'density (g/cm^3)': 19.05},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4,
'energy (keV)': 17.22},
'Kb': {'weight': 0.15,
'energy (keV)': 111.3026},
'Ka': {'weight': 1.0,
'energy (keV)': 98.4397},
'M2N4': {'weight': 0.00674,
'energy (keV)': 4.4018},
'Ma': {'weight': 1.0,
'energy (keV)': 3.1708},
'Lb4': {'weight': 0.04,
'energy (keV)': 16.5752},
'La': {'weight': 1.0,
'energy (keV)': 13.6146},
'Ln': {'weight': 0.01199,
'energy (keV)': 15.3996},
'M3O4': {'energy (keV)': 4.1984,
'weight': 0.01},
'Ll': {'weight': 0.069,
'energy (keV)': 11.6183},
'Mb': {'weight': 0.6086,
'energy (keV)': 3.3363},
'Mg': {'weight': 0.33505,
'energy (keV)': 3.5657},
'Lb2': {'weight': 0.236,
'energy (keV)': 16.4286},
'Lb3': {'weight': 0.06,
'energy (keV)': 17.454},
'M3O5': {'energy (keV)': 4.2071,
'weight': 0.01},
'Lg3': {'weight': 0.017,
'energy (keV)': 20.7125},
'Lg1': {'weight': 0.08,
'energy (keV)': 20.1672},
'Mz': {'weight': 0.03512,
'energy (keV)': 2.5068}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 3552.0,
'filename': 'U.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 3728.0,
'filename': 'U.M5'},
'O5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 96.0,
'filename': 'U.O5'},
'O4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 96.0,
'filename': 'U.O5'}}},
'General_properties': {'Z': 92,
'atomic_weight': 238.02891,
'name': 'uranium'}},
'Y': {'Physical_properties': {'density (g/cm^3)': 4.472},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.39127,
'energy (keV)': 1.9959},
'Kb': {'weight': 0.15,
'energy (keV)': 16.7381},
'Ka': {'weight': 1.0,
'energy (keV)': 14.9584},
'Lb2': {'weight': 0.00739,
'energy (keV)': 2.08},
'La': {'weight': 1.0,
'energy (keV)': 1.9226},
'Ln': {'weight': 0.0162,
'energy (keV)': 1.7619},
'Ll': {'weight': 0.0428,
'energy (keV)': 1.6864},
'Lb3': {'weight': 0.05059,
'energy (keV)': 2.0722},
'Lg3': {'weight': 0.0075,
'energy (keV)': 2.3469},
'Lg1': {'weight': 0.00264,
'energy (keV)': 2.1555}},
'Binding_energies': {'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 312.0,
'filename': 'Y.M3'},
'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 160.0,
'filename': 'Y.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 160.0,
'filename': 'Y.M5'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 2155.0,
'filename': 'Y.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2080.0,
'filename': 'Y.L3'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 300.0,
'filename': 'Y.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 2372.0,
'filename': 'Y.L1'}}},
'General_properties': {'Z': 39,
'atomic_weight': 88.90585,
'name': 'yttrium'}},
'Ac': {'Physical_properties': {'density (g/cm^3)': 10.07},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4,
'energy (keV)': 15.713},
'Kb': {'weight': 0.15,
'energy (keV)': 102.846},
'Ka': {'weight': 1.0,
'energy (keV)': 90.884},
'M2N4': {'weight': 0.00674,
'energy (keV)': 3.9811},
'Ma': {'energy (keV)': 2.9239330000000003,
'weight': 1.0},
'La': {'weight': 1.0,
'energy (keV)': 12.652},
'Ln': {'weight': 0.0133,
'energy (keV)': 14.0812},
'M3O4': {'energy (keV)': 3.82586,
'weight': 0.01},
'Ll': {'weight': 0.06549,
'energy (keV)': 10.869},
'Mb': {'weight': 0.64124,
'energy (keV)': 3.06626},
'Mg': {'weight': 0.33505,
'energy (keV)': 3.272},
'M3O5': {'energy (keV)': 3.83206,
'weight': 0.01},
'Lb2': {'weight': 0.236,
'energy (keV)': 15.234},
'Lg3': {'weight': 0.017,
'energy (keV)': 18.95},
'Lg1': {'weight': 0.08,
'energy (keV)': 18.4083},
'Lb3': {'weight': 0.06,
'energy (keV)': 15.931},
'Mz': {'weight': 0.03512,
'energy (keV)': 2.329}}},
'General_properties': {'Z': 89,
'atomic_weight': 227,
'name': 'actinium'}},
'Ag': {'Physical_properties': {'density (g/cm^3)': 10.49},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.35175,
'energy (keV)': 3.1509},
'Kb': {'weight': 0.15,
'energy (keV)': 24.9426},
'Ka': {'weight': 1.0,
'energy (keV)': 22.1629},
'Lb2': {'weight': 0.1165,
'energy (keV)': 3.3478},
'Lb4': {'weight': 0.0444,
'energy (keV)': 3.2034},
'La': {'weight': 1.0,
'energy (keV)': 2.9844},
'Ln': {'weight': 0.0131,
'energy (keV)': 2.8062},
'Ll': {'weight': 0.04129,
'energy (keV)': 2.6336},
'Lb3': {'weight': 0.0737,
'energy (keV)': 3.2344},
'Lg3': {'weight': 0.014,
'energy (keV)': 3.7499},
'Lg1': {'weight': 0.03735,
'energy (keV)': 3.5204}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 367.0,
'filename': 'Ag.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 373.0,
'filename': 'Ag.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 571.0,
'filename': 'Ag.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 602.0,
'filename': 'Ag.M3'}}},
'General_properties': {'Z': 47,
'atomic_weight': 107.8682,
'name': 'silver'}},
'Ir': {'Physical_properties': {'density (g/cm^3)': 22.56},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.4168,
'energy (keV)': 10.708},
'Kb': {'weight': 0.15,
'energy (keV)': 73.5603},
'Ka': {'weight': 1.0,
'energy (keV)': 64.8958},
'M2N4': {'weight': 0.02901,
'energy (keV)': 2.5973},
'Ma': {'weight': 1.0,
'energy (keV)': 1.9799},
'Lb4': {'weight': 0.07269,
'energy (keV)': 10.5098},
'La': {'weight': 1.0,
'energy (keV)': 9.1748},
'Ln': {'weight': 0.01429,
'energy (keV)': 9.6504},
'M3O4': {'energy (keV)': 2.54264,
'weight': 0.005},
'Ll': {'weight': 0.05429,
'energy (keV)': 8.0415},
'Mb': {'weight': 0.59443,
'energy (keV)': 2.0527},
'Mg': {'weight': 0.08505,
'energy (keV)': 2.2558},
'Lb2': {'weight': 0.216,
'energy (keV)': 10.9203},
'Lb3': {'weight': 0.0874,
'energy (keV)': 10.8678},
'M3O5': {'energy (keV)': 2.54385,
'weight': 0.01},
'Lg3': {'weight': 0.024,
'energy (keV)': 12.9242},
'Lg1': {'weight': 0.08543,
'energy (keV)': 12.5127},
'Mz': {'weight': 0.01344,
'energy (keV)': 1.5461}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2040.0,
'filename': 'Ir.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 2116.0,
'filename': 'Ir.M5'}}},
'General_properties': {'Z': 77,
'atomic_weight': 192.217,
'name': 'iridium'}},
'Al': {'Physical_properties': {'density (g/cm^3)': 2.7},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.0132,
'energy (keV)': 1.5596},
'Ka': {'weight': 1.0,
'energy (keV)': 1.4865}},
'Binding_energies': {'K': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1560.0,
'filename': 'Al.K1'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 73.0,
'filename': 'Al.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 118.0,
'filename': 'Al.L1'}}},
'General_properties': {'Z': 13,
'atomic_weight': 26.9815386,
'name': 'aluminum'}},
'As': {'Physical_properties': {'density (g/cm^3)': 5.727},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.16704,
'energy (keV)': 1.3174},
'Kb': {'weight': 0.14589,
'energy (keV)': 11.7262},
'Ka': {'weight': 1.0,
'energy (keV)': 10.5436},
'La': {'weight': 1.0,
'energy (keV)': 1.2819},
'Ln': {'weight': 0.01929,
'energy (keV)': 1.1551},
'Ll': {'weight': 0.04929,
'energy (keV)': 1.1196},
'Lb3': {'weight': 0.04769,
'energy (keV)': 1.386}},
'Binding_energies': {'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 1359.0,
'filename': 'As.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 1323.0,
'filename': 'As.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 1526.0,
'filename': 'As.L1'}}},
'General_properties': {'Z': 33,
'atomic_weight': 74.9216,
'name': 'arsenic'}},
'Ar': {'Physical_properties': {'density (g/cm^3)': 0.001784},
'Atomic_properties': {'Xray_lines': {'Kb': {'weight': 0.10169,
'energy (keV)': 3.1905},
'Ka': {'weight': 1.0,
'energy (keV)': 2.9577}},
'Binding_energies': {'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 245.0,
'filename': 'Ar.L3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 320.0,
'filename': 'Ar.L1'}}},
'General_properties': {'Z': 18,
'atomic_weight': 39.948,
'name': 'argon'}},
'Au': {'Physical_properties': {'density (g/cm^3)': 19.3},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.40151,
'energy (keV)': 11.4425},
'Kb': {'weight': 0.15,
'energy (keV)': 77.9819},
'Ka': {'weight': 1.0,
'energy (keV)': 68.8062},
'M2N4': {'weight': 0.02901,
'energy (keV)': 2.7958},
'Ma': {'weight': 1.0,
'energy (keV)': 2.1229},
'Lb4': {'weight': 0.0594,
'energy (keV)': 11.205},
'La': {'weight': 1.0,
'energy (keV)': 9.713},
'Ln': {'weight': 0.01379,
'energy (keV)': 10.3087},
'M3O4': {'energy (keV)': 2.73469,
'weight': 0.005},
'Ll': {'weight': 0.0562,
'energy (keV)': 8.4938},
'Mb': {'weight': 0.59443,
'energy (keV)': 2.2047},
'Mg': {'weight': 0.08505,
'energy (keV)': 2.4091},
'Lb2': {'weight': 0.21949,
'energy (keV)': 11.5848},
'Lb3': {'weight': 0.069,
'energy (keV)': 11.6098},
'M3O5': {'energy (keV)': 2.73621,
'weight': 0.01},
'Lg3': {'weight': 0.0194,
'energy (keV)': 13.8074},
'Lg1': {'weight': 0.08407,
'energy (keV)': 13.3816},
'Mz': {'weight': 0.01344,
'energy (keV)': 1.6603}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2206.0,
'filename': 'Au.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 2291.0,
'filename': 'Au.M5'}}},
'General_properties': {'Z': 79,
'atomic_weight': 196.966569,
'name': 'gold'}},
'At': {'Physical_properties': {'density (g/cm^3)': 'NaN'},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.38048,
'energy (keV)': 13.876},
'Kb': {'weight': 0.15,
'energy (keV)': 92.3039},
'Ka': {'weight': 1.0,
'energy (keV)': 81.5164},
'M2N4': {'weight': 0.00863,
'energy (keV)': 3.4748},
'Lb4': {'weight': 0.05809,
'energy (keV)': 13.485},
'La': {'weight': 1.0,
'energy (keV)': 11.4268},
'Ln': {'weight': 0.0132,
'energy (keV)': 12.4677},
'Ll': {'weight': 0.06179,
'energy (keV)': 9.8965},
'Mb': {'weight': 0.64124,
'energy (keV)': 2.71162},
'Mg': {'weight': 0.21845,
'energy (keV)': 2.95061},
'Lb2': {'weight': 0.2305,
'energy (keV)': 13.73812},
'Lg3': {'weight': 0.017,
'energy (keV)': 16.753},
'Lg1': {'weight': 0.08,
'energy (keV)': 16.2515},
'Lb3': {'weight': 0.06,
'energy (keV)': 14.067},
'Mz': {'weight': 0.00354,
'energy (keV)': 2.0467}}},
'General_properties': {'Z': 85,
'atomic_weight': 210,
'name': 'astatine'}},
'In': {'Physical_properties': {'density (g/cm^3)': 7.31},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.3616,
'energy (keV)': 3.4872},
'Kb': {'weight': 0.15,
'energy (keV)': 27.2756},
'Ka': {'weight': 1.0,
'energy (keV)': 24.2098},
'Lb2': {'weight': 0.1371,
'energy (keV)': 3.7139},
'Lb4': {'weight': 0.05349,
'energy (keV)': 3.5353},
'La': {'weight': 1.0,
'energy (keV)': 3.287},
'Ln': {'weight': 0.0132,
'energy (keV)': 3.1124},
'Ll': {'weight': 0.0415,
'energy (keV)': 2.9045},
'Lb3': {'weight': 0.08779,
'energy (keV)': 3.5732},
'Lg3': {'weight': 0.0177,
'energy (keV)': 4.1601},
'Lg1': {'weight': 0.04535,
'energy (keV)': 3.9218}},
'Binding_energies': {'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 443.0,
'filename': 'In.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 451.0,
'filename': 'In.M5'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 664.0,
'filename': 'In.M3'},
'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 702.0,
'filename': 'In.M3'}}},
'General_properties': {'Z': 49,
'atomic_weight': 114.818,
'name': 'indium'}},
'Mo': {'Physical_properties': {'density (g/cm^3)': 10.28},
'Atomic_properties': {'Xray_lines': {'Lb1': {'weight': 0.32736,
'energy (keV)': 2.3948},
'Kb': {'weight': 0.15,
'energy (keV)': 19.6072},
'Ka': {'weight': 1.0,
'energy (keV)': 17.4793},
'Lb2': {'weight': 0.04509,
'energy (keV)': 2.5184},
'La': {'weight': 1.0,
'energy (keV)': 2.2932},
'Ln': {'weight': 0.0128,
'energy (keV)': 2.1205},
'Ll': {'weight': 0.0415,
'energy (keV)': 2.0156},
'Lb3': {'weight': 0.06299,
'energy (keV)': 2.4732},
'Lg3': {'weight': 0.0105,
'energy (keV)': 2.8307},
'Lg1': {'weight': 0.01335,
'energy (keV)': 2.6233}},
'Binding_energies': {'M2': {'relevance': 'Minor',
'factor': 0.5,
'onset_energy (eV)': 410.0,
'filename': 'Mo.M3'},
'M5': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 227.0,
'filename': 'Mo.M5'},
'M4': {'relevance': 'Major',
'factor': 0.6666666666666666,
'onset_energy (eV)': 228.0,
'filename': 'Mo.M5'},
'L2': {'relevance': 'Major',
'factor': 0.5,
'onset_energy (eV)': 2625.0,
'filename': 'Mo.L3'},
'L3': {'relevance': 'Major',
'factor': 1,
'onset_energy (eV)': 2520.0,
'filename': 'Mo.L3'},
'M3': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 392.0,
'filename': 'Mo.M3'},
'L1': {'relevance': 'Minor',
'factor': 1,
'onset_energy (eV)': 2866.0,
'filename': 'Mo.L1'}}},
'General_properties': {'Z': 42,
'atomic_weight': 95.96,
'name': 'molybdenum'}}}
elements_db = utils.DictionaryTreeBrowser(elements)
| gpl-3.0 |
rjschof/gem5 | src/dev/BadDevice.py | 69 | 1789 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.params import *
from Device import BasicPioDevice
class BadDevice(BasicPioDevice):
type = 'BadDevice'
cxx_header = "dev/baddev.hh"
devicename = Param.String("Name of device to error on")
| bsd-3-clause |
haya14busa/alc-etm-searcher | nltk-3.0a3/build/lib/nltk/classify/tadm.py | 2 | 3527 | # Natural Language Toolkit: Interface to TADM Classifier
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Joseph Frazee <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
import sys
import subprocess
from nltk import compat
from nltk.internals import find_binary
try:
import numpy
except ImportError:
numpy = None
_tadm_bin = None
def config_tadm(bin=None):
global _tadm_bin
_tadm_bin = find_binary(
'tadm', bin,
env_vars=['TADM_DIR'],
binary_names=['tadm'],
url='http://tadm.sf.net')
def write_tadm_file(train_toks, encoding, stream):
"""
Generate an input file for ``tadm`` based on the given corpus of
classified tokens.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type encoding: TadmEventMaxentFeatureEncoding
:param encoding: A feature encoding, used to convert featuresets
into feature vectors.
:type stream: stream
:param stream: The stream to which the ``tadm`` input file should be
written.
"""
# See the following for a file format description:
#
# http://sf.net/forum/forum.php?thread_id=1391502&forum_id=473054
# http://sf.net/forum/forum.php?thread_id=1675097&forum_id=473054
labels = encoding.labels()
for featureset, label in train_toks:
length_line = '%d\n' % len(labels)
stream.write(length_line)
for known_label in labels:
v = encoding.encode(featureset, known_label)
line = '%d %d %s\n' % (
int(label == known_label),
len(v),
' '.join('%d %d' % u for u in v)
)
stream.write(line)
def parse_tadm_weights(paramfile):
"""
Given the stdout output generated by ``tadm`` when training a
model, return a ``numpy`` array containing the corresponding weight
vector.
"""
weights = []
for line in paramfile:
weights.append(float(line.strip()))
return numpy.array(weights, 'd')
def call_tadm(args):
"""
Call the ``tadm`` binary with the given arguments.
"""
if isinstance(args, compat.string_types):
raise TypeError('args should be a list of strings')
if _tadm_bin is None:
config_tadm()
# Call tadm via a subprocess
cmd = [_tadm_bin] + args
p = subprocess.Popen(cmd, stdout=sys.stdout)
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print()
print(stderr)
raise OSError('tadm command failed!')
def names_demo():
from nltk.classify.util import names_demo
from nltk.classify.maxent import TadmMaxentClassifier
classifier = names_demo(TadmMaxentClassifier.train)
def encoding_demo():
import sys
from nltk.classify.maxent import TadmEventMaxentFeatureEncoding
tokens = [({'f0':1, 'f1':1, 'f3':1}, 'A'),
({'f0':1, 'f2':1, 'f4':1}, 'B'),
({'f0':2, 'f2':1, 'f3':1, 'f4':1}, 'A')]
encoding = TadmEventMaxentFeatureEncoding.train(tokens)
write_tadm_file(tokens, encoding, sys.stdout)
print()
for i in range(encoding.length()):
print('%s --> %d' % (encoding.describe(i), i))
print()
if __name__ == '__main__':
encoding_demo()
names_demo()
| mit |
gavin-feng/odoo | addons/document/odt2txt.py | 435 | 2110 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys, zipfile, xml.dom.minidom
import StringIO
class OpenDocumentTextFile :
def __init__ (self, filepath):
zip = zipfile.ZipFile(filepath)
self.content = xml.dom.minidom.parseString(zip.read("content.xml"))
def toString (self):
""" Converts the document to a string. """
buffer = u""
for val in ["text:p", "text:h", "text:list"]:
for paragraph in self.content.getElementsByTagName(val) :
buffer += self.textToString(paragraph) + "\n"
return buffer
def textToString(self, element):
buffer = u""
for node in element.childNodes :
if node.nodeType == xml.dom.Node.TEXT_NODE :
buffer += node.nodeValue
elif node.nodeType == xml.dom.Node.ELEMENT_NODE :
buffer += self.textToString(node)
return buffer
if __name__ == "__main__" :
s =StringIO.StringIO(file(sys.argv[1]).read())
odt = OpenDocumentTextFile(s)
print odt.toString().encode('ascii','replace')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Venturi/oldcms | env/lib/python2.7/site-packages/django/db/models/fields/related.py | 10 | 114783 | from __future__ import unicode_literals
import warnings
from operator import attrgetter
from django import forms
from django.apps import apps
from django.core import checks, exceptions
from django.core.exceptions import FieldDoesNotExist
from django.db import connection, connections, router, transaction
from django.db.backends import utils
from django.db.models import Q, signals
from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.fields import (
BLANK_CHOICE_DASH, AutoField, Field, IntegerField, PositiveIntegerField,
PositiveSmallIntegerField,
)
from django.db.models.lookups import IsNull
from django.db.models.query import QuerySet
from django.db.models.query_utils import PathInfo
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property, curry
from django.utils.translation import ugettext_lazy as _
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_registered_model raises a LookupError, it means
# that the related model isn't loaded yet, so we need to pend the relation
# until the class is prepared.
try:
model = cls._meta.apps.get_registered_model(app_label, model_name)
except LookupError:
key = (app_label, model_name)
value = (cls, field, operation)
cls._meta.apps._pending_lookups.setdefault(key, []).append(value)
else:
operation(field, model, cls)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in sender._meta.apps._pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
class RelatedField(Field):
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.rel.to
def check(self, **kwargs):
errors = super(RelatedField, self).check(**kwargs)
errors.extend(self._check_related_name_is_valid())
errors.extend(self._check_relation_model_exists())
errors.extend(self._check_referencing_to_swapped_model())
errors.extend(self._check_clashes())
return errors
def _check_related_name_is_valid(self):
import re
import keyword
related_name = self.rel.related_name
if not related_name:
return []
is_valid_id = True
if keyword.iskeyword(related_name):
is_valid_id = False
if six.PY3:
if not related_name.isidentifier():
is_valid_id = False
else:
if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*\Z', related_name):
is_valid_id = False
if not (is_valid_id or related_name.endswith('+')):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s" %
(self.rel.related_name, self.model._meta.object_name,
self.name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=self,
id='fields.E306',
)
]
return []
def _check_relation_model_exists(self):
rel_is_missing = self.rel.to not in apps.get_models()
rel_is_string = isinstance(self.rel.to, six.string_types)
model_name = self.rel.to if rel_is_string else self.rel.to._meta.object_name
if rel_is_missing and (rel_is_string or not self.rel.to._meta.swapped):
return [
checks.Error(
("Field defines a relation with model '%s', which "
"is either not installed, or is abstract.") % model_name,
hint=None,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.rel.to not in apps.get_models() and
not isinstance(self.rel.to, six.string_types) and
self.rel.to._meta.swapped):
model = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name
)
return [
checks.Error(
("Field defines a relation with the model '%s', "
"which has been swapped out.") % model,
hint="Update the relation to point at 'settings.%s'." % self.rel.to._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
""" Check accessor and reverse query name clashes. """
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.rel.to` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.rel.to, ModelBase):
return []
# If the field doesn't install backward relation on the target model (so
# `is_hidden` returns True), then there are no clashes to check and we
# can skip these fields.
if self.rel.is_hidden():
return []
try:
self.rel
except AttributeError:
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
rel_opts = self.rel.to._meta
# rel_opts.object_name == "Target"
rel_name = self.rel.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name,
self.name) # i. e. "Model.field"
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i.e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
clash_name = "%s.%s" % (rel_opts.object_name,
clash_field.name) # i. e. "Target.model_set"
if clash_field.name == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E302',
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E303',
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = (r for r in rel_opts.related_objects if r.field is not self)
for clash_field in potential_clashes:
clash_name = "%s.%s" % ( # i. e. "Model.m2m"
clash_field.related_model._meta.object_name,
clash_field.field.name)
if clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E304',
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name for '%s'."
% (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E305',
)
)
return errors
def db_type(self, connection):
'''By default related field will not have a column
as it relates columns to another table'''
return None
def contribute_to_class(self, cls, name, virtual_only=False):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name, virtual_only=virtual_only)
if not cls._meta.abstract and self.rel.related_name:
related_name = force_text(self.rel.related_name) % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.rel.related_name = related_name
other = self.rel.to
if isinstance(other, six.string_types) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
@property
def swappable_setting(self):
"""
Gets the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.rel.to, six.string_types):
to_string = self.rel.to
else:
to_string = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name,
)
# See if anything swapped/swappable matches
for model in apps.get_models(include_swapped=True):
if model._meta.swapped:
if model._meta.swapped == to_string:
return model._meta.swappable
if ("%s.%s" % (model._meta.app_label, model._meta.object_name)) == to_string and model._meta.swappable:
return model._meta.swappable
return None
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.set_field_name()
@property
def related(self):
warnings.warn(
"Usage of field.related has been deprecated. Use field.rel instead.",
RemovedInDjango110Warning, 2)
return self.rel
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.rel)
def get_limit_choices_to(self):
"""Returns 'limit_choices_to' for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.rel.limit_choices_to):
return self.rel.limit_choices_to()
return self.rel.limit_choices_to
def formfield(self, **kwargs):
"""Passes ``limit_choices_to`` to field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.rel, 'get_related_field'):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.rel.limit_choices_to
defaults.update({
'limit_choices_to': limit_choices_to,
})
defaults.update(kwargs)
return super(RelatedField, self).formfield(**defaults)
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_query_name or self.rel.related_name or self.opts.model_name
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ReverseSingleRelatedObjectDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.related.related_model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.related.related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
params = {}
for lh_field, rh_field in self.related.field.related_fields:
params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname)
try:
rel_obj = self.get_queryset(instance=instance).get(**params)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' % (
instance._meta.object_name,
self.related.get_accessor_name(),
)
)
elif value is not None and not isinstance(value, self.related.related_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.to` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.rel.to.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.field.rel.to._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.field.rel.to._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.rel.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.rel.multiple:
rel_obj_cache_name = self.field.rel.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
params = {
rh_field.attname: getattr(instance, lh_field.attname)
for lh_field, rh_field in self.field.related_fields}
qs = self.get_queryset(instance=instance)
extra_filter = self.field.get_extra_descriptor_filter(instance)
if isinstance(extra_filter, dict):
params.update(extra_filter)
qs = qs.filter(**params)
else:
qs = qs.filter(extra_filter, **params)
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.rel.multiple:
setattr(rel_obj, self.field.rel.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name)
)
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.rel.to._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.rel.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.rel.get_cache_name(), instance)
def create_foreign_related_manager(superclass, rel_field, rel_model):
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.core_filters = {rel_field.name: instance}
self.model = rel_model
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_foreign_related_manager(manager.__class__, rel_field, rel_model)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in rel_field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % rel_field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs):
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" %
(self.model._meta.object_name, obj))
setattr(obj, rel_field.name, self.instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = rel_field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if rel_field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{rel_field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, rel_field.name, None)
obj.save(update_fields=[rel_field.name])
_clear.alters_data = True
return RelatedManager
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.model, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's default
# manager.
return create_foreign_related_manager(
self.related.related_model._default_manager.__class__,
self.related.field,
self.related.related_model,
)
def create_many_related_manager(superclass, rel):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
source_field_name=None, target_field_name=None, reverse=False,
through=None, prefetch_cache_name=None):
super(ManyRelatedManager, self).__init__()
self.model = model
self.query_field_name = query_field_name
source_field = through._meta.get_field(source_field_name)
source_related_fields = source_field.related_fields
self.core_filters = {}
for lh_field, rh_field in source_related_fields:
self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
self.instance = instance
self.symmetrical = symmetrical
self.source_field = source_field
self.target_field = through._meta.get_field(target_field_name)
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.reverse = reverse
self.through = through
self.prefetch_cache_name = prefetch_cache_name
self.related_val = source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_many_related_manager(manager.__class__, rel)
return manager_class(
model=self.model,
query_field_name=self.query_field_name,
instance=self.instance,
symmetrical=self.symmetrical,
source_field_name=self.source_field_name,
target_field_name=self.target_field_name,
reverse=self.reverse,
through=self.through,
prefetch_cache_name=self.prefetch_cache_name,
)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.related_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related
# model's default manager.
return create_many_related_manager(
self.related.related_model._default_manager.__class__,
self.related.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_model = self.related.related_model
manager = self.related_manager_cls(
model=rel_model,
query_field_name=self.related.field.name,
prefetch_cache_name=self.related.field.related_query_name(),
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True,
through=self.related.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)
)
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.through, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's
# default manager.
return create_many_related_manager(
self.field.rel.to._default_manager.__class__,
self.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
manager = self.related_manager_cls(
model=self.field.rel.to,
query_field_name=self.field.related_query_name(),
prefetch_cache_name=self.field.name,
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False,
through=self.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)
)
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.through, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
manager.clear()
manager.add(*value)
class ForeignObjectRel(object):
# Field flags
auto_created = True
concrete = False
editable = False
is_relation = True
def __init__(self, field, to, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
self.field = field
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
self.symmetrical = False
# Some of the following cached_properties can't be initialized in
# __init__ as the field doesn't have its model yet. Calling these methods
# before field.contribute_to_class() has been called will result in
# AttributeError
@cached_property
def model(self):
return self.to
@cached_property
def hidden(self):
return self.is_hidden()
@cached_property
def name(self):
return self.field.related_query_name()
@cached_property
def related_model(self):
if not self.field.model:
raise AttributeError(
"This property can't be accessed before self.field.contribute_to_class has been called.")
return self.field.model
@cached_property
def many_to_many(self):
return self.field.many_to_many
@cached_property
def many_to_one(self):
return self.field.one_to_many
@cached_property
def one_to_many(self):
return self.field.many_to_one
@cached_property
def one_to_one(self):
return self.field.one_to_one
def __repr__(self):
return '<%s: %s.%s>' % (
type(self).__name__,
self.related_model._meta.app_label,
self.related_model._meta.model_name,
)
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH,
limit_to_currently_related=False):
"""
Returns choices with a default blank choices included, for use as
SelectField choices for this field.
Analog of django.db.models.fields.Field.get_choices(), provided
initially for utilization by RelatedFieldListFilter.
"""
first_choice = blank_choice if include_blank else []
queryset = self.related_model._default_manager.all()
if limit_to_currently_related:
queryset = queryset.complex_filter(
{'%s__isnull' % self.related_model._meta.model_name: False}
)
lst = [(x._get_pk_val(), smart_text(x)) for x in queryset]
return first_choice + lst
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value, connection=connection, prepared=prepared)
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name is not None and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
def get_accessor_name(self, model=None):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
# Due to backwards compatibility ModelForms need to be able to provide
# an alternate model. See BaseInlineFormSet.get_default_prefix().
opts = model._meta if model else self.related_model._meta
model = model or self.related_model
if self.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if self.symmetrical and model == self.to:
return None
if self.related_name:
return self.related_name
if opts.default_related_name:
return opts.default_related_name % {
'model_name': opts.model_name.lower(),
'app_label': opts.app_label.lower(),
}
return opts.model_name + ('_set' if self.multiple else '')
def get_cache_name(self):
return "_%s_cache" % self.get_accessor_name()
def get_path_info(self):
return self.field.get_reverse_path_info()
class ManyToOneRel(ForeignObjectRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(ManyToOneRel, self).__init__(
field, to, related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.field_name = field_name
def __getstate__(self):
state = self.__dict__.copy()
state.pop('related_model', None)
return state
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
field = self.to._meta.get_field(self.field_name)
if not field.concrete:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return field
def set_field_name(self):
self.field_name = self.field_name or self.to._meta.pk.name
class OneToOneRel(ManyToOneRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(OneToOneRel, self).__init__(field, to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.multiple = False
class ManyToManyRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None, through_fields=None,
db_constraint=True, related_query_name=None):
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
if through_fields and not through:
raise ValueError("Cannot specify through_fields without a through model")
super(ManyToManyRel, self).__init__(
field, to, related_name=related_name,
limit_choices_to=limit_choices_to, related_query_name=related_query_name)
self.symmetrical = symmetrical
self.multiple = True
self.through = through
self.through_fields = through_fields
self.db_constraint = db_constraint
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name is not None and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the 'to' object to which this relationship is tied.
Provided for symmetry with ManyToOneRel.
"""
opts = self.through._meta
if self.through_fields:
field = opts.get_field(self.through_fields[0])
else:
for field in opts.fields:
rel = getattr(field, 'rel', None)
if rel and rel.to == self.to:
break
return field.foreign_related_fields[0]
class ForeignObject(RelatedField):
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
# For backwards compatibility; ignored as of Django 1.8.4.
allow_unsaved_instance_assignment = False
requires_unique_target = True
related_accessor_class = ForeignRelatedObjectsDescriptor
def __init__(self, to, from_fields, to_fields, swappable=True, **kwargs):
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
if 'rel' not in kwargs:
kwargs['rel'] = ForeignObjectRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
super(ForeignObject, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ForeignObject, self).check(**kwargs)
errors.extend(self._check_unique_target())
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.rel.to, six.string_types)
if rel_is_string or not self.requires_unique_target:
return []
# Skip if the
try:
self.foreign_related_fields
except FieldDoesNotExist:
return []
try:
self.rel
except AttributeError:
return []
if not self.foreign_related_fields:
return []
has_unique_field = any(rel_field.unique
for rel_field in self.foreign_related_fields)
if not has_unique_field and len(self.foreign_related_fields) > 1:
field_combination = ', '.join("'%s'" % rel_field.name
for rel_field in self.foreign_related_fields)
model_name = self.rel.to.__name__
return [
checks.Error(
"None of the fields %s on model '%s' have a unique=True constraint."
% (field_combination, model_name),
hint=None,
obj=self,
id='fields.E310',
)
]
elif not has_unique_field:
field_name = self.foreign_related_fields[0].name
model_name = self.rel.to.__name__
return [
checks.Error(
("'%s.%s' must set unique=True "
"because it is referenced by a foreign key.") % (model_name, field_name),
hint=None,
obj=self,
id='fields.E311',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ForeignObject, self).deconstruct()
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
if self.rel.related_name is not None:
kwargs['related_name'] = self.rel.related_name
if self.rel.related_query_name is not None:
kwargs['related_query_name'] = self.rel.related_query_name
if self.rel.on_delete != CASCADE:
kwargs['on_delete'] = self.rel.on_delete
if self.rel.parent_link:
kwargs['parent_link'] = self.rel.parent_link
# Work out string form of "to"
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs['to'].setting_name, swappable_setting)
)
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.rel.to, six.string_types):
raise ValueError('Related model %r cannot be resolved' % self.rel.to)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field(from_field_name))
to_field = (self.rel.to._meta.pk if to_field_name is None
else self.rel.to._meta.get_field(to_field_name))
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (not possible_parent_link or
possible_parent_link.primary_key or
possible_parent_link.model._meta.abstract):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Returns an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Returns a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.rel.to._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookups,
raw_value):
from django.db.models.sql.where import SubqueryConstraint, AND, OR
root_constraint = constraint_class()
assert len(targets) == len(sources)
if len(lookups) > 1:
raise exceptions.FieldError('Relation fields do not support nested lookups')
lookup_type = lookups[0]
def get_normalized_value(value):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
for source in sources:
# Account for one-to-one relations when sent a different model
while not isinstance(value, source.model) and source.rel:
source = source.rel.to._meta.get_field(source.rel.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
elif not isinstance(value, tuple):
return (value,)
return value
is_multicolumn = len(self.related_fields) > 1
if (hasattr(raw_value, '_as_sql') or
hasattr(raw_value, 'get_compiler')):
root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets],
[source.name for source in sources], raw_value),
AND)
elif lookup_type == 'isnull':
root_constraint.add(IsNull(targets[0].get_col(alias, sources[0]), raw_value), AND)
elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte']
and not is_multicolumn)):
value = get_normalized_value(raw_value)
for target, source, val in zip(targets, sources, value):
lookup_class = target.get_lookup(lookup_type)
root_constraint.add(
lookup_class(target.get_col(alias, source), val), AND)
elif lookup_type in ['range', 'in'] and not is_multicolumn:
values = [get_normalized_value(value) for value in raw_value]
value = [val[0] for val in values]
lookup_class = targets[0].get_lookup(lookup_type)
root_constraint.add(lookup_class(targets[0].get_col(alias, sources[0]), value), AND)
elif lookup_type == 'in':
values = [get_normalized_value(value) for value in raw_value]
for value in values:
value_constraint = constraint_class()
for source, target, val in zip(sources, targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(target.get_col(alias, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
raise TypeError('Related Field got invalid lookup: %s' % lookup_type)
return root_constraint
@property
def attnames(self):
return tuple(field.attname for field in self.local_related_fields)
def get_defaults(self):
return tuple(field.get_default() for field in self.local_related_fields)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), self.related_accessor_class(related))
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
class ForeignKey(ForeignObject):
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel,
db_constraint=True, **kwargs):
try:
to._meta.model_name
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r" % (
self.__class__.__name__, to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.db_constraint = db_constraint
kwargs['rel'] = rel_class(
self, to, to_field,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs)
def check(self, **kwargs):
errors = super(ForeignKey, self).check(**kwargs)
errors.extend(self._check_on_delete())
errors.extend(self._check_unique())
return errors
def _check_on_delete(self):
on_delete = getattr(self.rel, 'on_delete', None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=self,
id='fields.E320',
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=self,
id='fields.E321',
)
]
else:
return []
def _check_unique(self, **kwargs):
return [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=self,
id='fields.W342',
)
] if self.unique else []
def deconstruct(self):
name, path, args, kwargs = super(ForeignKey, self).deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.rel.to, "_meta", None)
if self.rel.field_name and (not to_meta or (to_meta.pk and self.rel.field_name != to_meta.pk.name)):
kwargs['to_field'] = self.rel.field_name
return name, path, args, kwargs
@property
def related_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.rel.to._meta.verbose_name, 'pk': value,
'field': self.rel.field_name, 'value': value,
}, # 'pk' is included for backwards compatibility
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.related_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (value == '' and
(not self.related_field.empty_strings_allowed or
connection.features.interprets_empty_strings_as_nulls)):
return None
else:
return self.related_field.get_db_prep_save(value, connection=connection)
def get_db_prep_value(self, value, connection, prepared=False):
return self.related_field.get_db_prep_value(value, connection, prepared)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.rel.to, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.rel.to))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.related_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
def convert_empty_strings(self, value, expression, connection, context):
if (not value) and isinstance(value, six.string_types):
return None
return value
def get_db_converters(self, connection):
converters = super(ForeignKey, self).get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
return super(ForeignKey, self).get_col(alias, output_field or self.related_field)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = SingleRelatedObjectDescriptor
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OneToOneField, self).deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def _check_unique(self, **kwargs):
# override ForeignKey since check isn't applicable here
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, six.string_types):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.model_name
to = to.lower()
meta = type(str('Meta'), (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(
klass,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.rel.db_constraint,
),
to: models.ForeignKey(
to_model,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.rel.db_constraint,
)
})
class ManyToManyField(RelatedField):
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
description = _("Many-to-many relationship")
def __init__(self, to, db_constraint=True, swappable=True, **kwargs):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ManyToManyField must be "
"either a model, a model name, or the string %r" %
(self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
)
# Class names must be ASCII in Python 2.x, so we forcibly coerce it
# here to break early if there's a problem.
to = str(to)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
through_fields=kwargs.pop('through_fields', None),
db_constraint=db_constraint,
)
self.swappable = swappable
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(ManyToManyField, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ManyToManyField, self).check(**kwargs)
errors.extend(self._check_unique(**kwargs))
errors.extend(self._check_relationship_model(**kwargs))
errors.extend(self._check_ignored_options(**kwargs))
return errors
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=self,
id='fields.E330',
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.null:
warnings.append(
checks.Warning(
'null has no effect on ManyToManyField.',
hint=None,
obj=self,
id='fields.W340',
)
)
if len(self._validators) > 0:
warnings.append(
checks.Warning(
'ManyToManyField does not support validators.',
hint=None,
obj=self,
id='fields.W341',
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.rel.through, '_meta'):
qualified_model_name = "%s.%s" % (
self.rel.through._meta.app_label, self.rel.through.__name__)
else:
qualified_model_name = self.rel.through
errors = []
if self.rel.through not in apps.get_models(include_auto_created=True):
# The relationship model is not installed.
errors.append(
checks.Error(
("Field specifies a many-to-many relation through model "
"'%s', which has not been installed.") %
qualified_model_name,
hint=None,
obj=self,
id='fields.E331',
)
)
else:
assert from_model is not None, \
"ManyToManyField with intermediate " \
"tables cannot be checked if you don't pass the model " \
"where the field is attached to."
# Set some useful local variables
to_model = self.rel.to
from_model_name = from_model._meta.object_name
if isinstance(to_model, six.string_types):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.rel.through._meta.object_name
self_referential = from_model == to_model
# Check symmetrical attribute.
if (self_referential and self.rel.symmetrical and
not self.rel.through._meta.auto_created):
errors.append(
checks.Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=self,
id='fields.E332',
)
)
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(from_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
if seen_self > 2 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=("Use through_fields to specify which two "
"foreign keys Django should use."),
obj=self.rel.through,
id='fields.E333',
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(from_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
seen_to = sum(to_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
if seen_from > 1 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=('If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, '
'through="%s").') % relationship_model_name,
obj=self,
id='fields.E334',
)
)
if seen_to > 1 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, to_model_name),
hint=('If you want to create a recursive '
'relationship, use ForeignKey("self", '
'symmetrical=False, through="%s").') % relationship_model_name,
obj=self,
id='fields.E335',
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'.") % (
self, from_model_name, to_model_name
),
hint=None,
obj=self.rel.through,
id='fields.E336',
)
)
# Validate `through_fields`
if self.rel.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy"
if not (len(self.rel.through_fields) >= 2 and
self.rel.through_fields[0] and self.rel.through_fields[1]):
errors.append(
checks.Error(
("Field specifies 'through_fields' but does not "
"provide the names of the two link fields that should be "
"used for the relation through model "
"'%s'.") % qualified_model_name,
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=self,
id='fields.E337',
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models
else:
assert from_model is not None, \
"ManyToManyField with intermediate " \
"tables cannot be checked if you don't pass the model " \
"where the field is attached to."
source, through, target = from_model, self.rel.through, self.rel.to
source_field_name, target_field_name = self.rel.through_fields[:2]
for field_name, related_model in ((source_field_name, source),
(target_field_name, target)):
possible_field_names = []
for f in through._meta.fields:
if hasattr(f, 'rel') and getattr(f.rel, 'to', None) == related_model:
possible_field_names.append(f.name)
if possible_field_names:
hint = ("Did you mean one of the following foreign "
"keys to '%s': %s?") % (related_model._meta.object_name,
', '.join(possible_field_names))
else:
hint = None
try:
field = through._meta.get_field(field_name)
except FieldDoesNotExist:
errors.append(
checks.Error(
("The intermediary model '%s' has no field '%s'.") % (
qualified_model_name, field_name),
hint=hint,
obj=self,
id='fields.E338',
)
)
else:
if not (hasattr(field, 'rel') and
getattr(field.rel, 'to', None) == related_model):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'." % (
through._meta.object_name, field_name,
related_model._meta.object_name),
hint=hint,
obj=self,
id='fields.E339',
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super(ManyToManyField, self).deconstruct()
# Handle the simpler arguments
if self.db_table is not None:
kwargs['db_table'] = self.db_table
if self.rel.db_constraint is not True:
kwargs['db_constraint'] = self.rel.db_constraint
if self.rel.related_name is not None:
kwargs['related_name'] = self.rel.related_name
if self.rel.related_query_name is not None:
kwargs['related_query_name'] = self.rel.related_query_name
# Rel needs more work.
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
if getattr(self.rel, 'through', None) is not None:
if isinstance(self.rel.through, six.string_types):
kwargs['through'] = self.rel.through
elif not self.rel.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (self.rel.through._meta.app_label, self.rel.through._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ManyToManyField pointing to a "
"model that is swapped in place of more than one model "
"(%s and %s)" % (kwargs['to'].setting_name, swappable_setting)
)
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
pathinfos = []
int_model = self.rel.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return utils.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.rel.through_fields is not None:
link_field_name = self.rel.through_fields[0]
else:
link_field_name = None
for f in self.rel.through._meta.fields:
if (f.is_relation and f.rel.to == related.related_model and
(link_field_name is None or link_field_name == f.name)):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.rel.through_fields is not None:
link_field_name = self.rel.through_fields[1]
else:
link_field_name = None
for f in self.rel.through._meta.fields:
# NOTE f.rel.to != f.related_model
if f.is_relation and f.rel.to == related.model:
if link_field_name is None and related.related_model == related.model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name, **kwargs):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
elif self.rel.is_hidden():
# If the backwards relation is disabled, replace the original
# related_name with one generated from the m2m field name. Django
# still uses backwards relations internally and we need to avoid
# clashes between multiple m2m fields with related_name == '+'.
self.rel.related_name = "_%s_%s_+" % (cls.__name__.lower(), name)
super(ManyToManyField, self).contribute_to_class(cls, name, **kwargs)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, six.string_types):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db),
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
| apache-2.0 |
rallylee/gem5 | configs/ruby/GPU_VIPER.py | 12 | 25726 | #
# Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Lisa Hsu
#
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
from Ruby import send_evicts
from topologies.Cluster import Cluster
from topologies.Crossbar import Crossbar
class CntrlBase:
_seqs = 0
@classmethod
def seqCount(cls):
# Use SeqCount not class since we need global count
CntrlBase._seqs += 1
return CntrlBase._seqs - 1
_cntrls = 0
@classmethod
def cntrlCount(cls):
# Use CntlCount not class since we need global count
CntrlBase._cntrls += 1
return CntrlBase._cntrls - 1
_version = 0
@classmethod
def versionCount(cls):
cls._version += 1 # Use count for this particular type
return cls._version - 1
class L1Cache(RubyCache):
resourceStalls = False
dataArrayBanks = 2
tagArrayBanks = 2
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class L2Cache(RubyCache):
resourceStalls = False
assoc = 16
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class CPCntrl(CorePair_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1Icache = L1Cache()
self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
self.L1D0cache = L1Cache()
self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
self.L1D1cache = L1Cache()
self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
self.L2cache = L2Cache()
self.L2cache.create(options.l2_size, options.l2_assoc, options)
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1Icache
self.sequencer.dcache = self.L1D0cache
self.sequencer.ruby_system = ruby_system
self.sequencer.coreid = 0
self.sequencer.is_cpu_sequencer = True
self.sequencer1 = RubySequencer()
self.sequencer1.version = self.seqCount()
self.sequencer1.icache = self.L1Icache
self.sequencer1.dcache = self.L1D1cache
self.sequencer1.ruby_system = ruby_system
self.sequencer1.coreid = 1
self.sequencer1.is_cpu_sequencer = True
self.issue_latency = options.cpu_to_dir_latency
self.send_evictions = send_evicts(options)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCPCache(RubyCache):
size = "16kB"
assoc = 16
dataArrayBanks = 16 #number of data banks
tagArrayBanks = 16 #number of tag banks
dataAccessLatency = 4
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.tcp_size)
self.assoc = options.tcp_assoc
self.resourceStalls = options.no_tcc_resource_stalls
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCPCntrl(TCP_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency,
dataAccessLatency = options.TCP_latency)
self.L1cache.resourceStalls = options.no_resource_stalls
self.L1cache.create(options)
self.issue_latency = 1
self.coalescer = VIPERCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def createCP(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency,
dataAccessLatency = options.TCP_latency)
self.L1cache.resourceStalls = options.no_resource_stalls
self.L1cache.create(options)
self.issue_latency = 1
self.coalescer = VIPERCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = True
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class SQCCache(RubyCache):
dataArrayBanks = 8
tagArrayBanks = 8
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.sqc_size)
self.assoc = options.sqc_assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class SQCCntrl(SQC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = SQCCache()
self.L1cache.create(options)
self.L1cache.resourceStalls = options.no_resource_stalls
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.support_data_reqs = False
self.sequencer.is_cpu_sequencer = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCC(RubyCache):
size = MemorySize("256kB")
assoc = 16
dataAccessLatency = 8
tagAccessLatency = 2
resourceStalls = True
def create(self, options):
self.assoc = options.tcc_assoc
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
s = options.num_compute_units
tcc_size = s * 128
tcc_size = str(tcc_size)+'kB'
self.size = MemorySize(tcc_size)
self.dataArrayBanks = 64
self.tagArrayBanks = 64
else:
self.size = MemorySize(options.tcc_size)
self.dataArrayBanks = 256 / options.num_tccs #number of data banks
self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
self.size.value = self.size.value / options.num_tccs
if ((self.size.value / long(self.assoc)) < 128):
self.size.value = long(128 * self.assoc)
self.start_index_bit = math.log(options.cacheline_size, 2) + \
math.log(options.num_tccs, 2)
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCCCntrl(TCC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L2cache = TCC()
self.L2cache.create(options)
self.L2cache.resourceStalls = options.no_tcc_resource_stalls
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class L3Cache(RubyCache):
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, options, ruby_system, system):
self.size = MemorySize(options.l3_size)
self.size.value /= options.num_dirs
self.assoc = options.l3_assoc
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataAccessLatency = options.l3_data_latency
self.tagAccessLatency = options.l3_tag_latency
self.resourceStalls = False
self.replacement_policy = PseudoLRUReplacementPolicy()
class L3Cntrl(L3Cache_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L3cache = L3Cache()
self.L3cache.create(options, ruby_system, system)
self.l3_response_latency = max(self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
class DirMem(RubyDirectoryMemory, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
phys_mem_size = AddrRange(options.mem_size).size()
mem_module_size = phys_mem_size / options.num_dirs
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
self.size = dir_size
class DirCntrl(Directory_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.response_latency = 30
self.directory = DirMem()
self.directory.create(options, ruby_system, system)
self.L3CacheMemory = L3Cache()
self.L3CacheMemory.create(options, ruby_system, system)
self.l3_hit_latency = max(self.L3CacheMemory.dataAccessLatency,
self.L3CacheMemory.tagAccessLatency)
self.number_of_TBEs = options.num_tbes
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
def define_options(parser):
parser.add_option("--num-subcaches", type = "int", default = 4)
parser.add_option("--l3-data-latency", type = "int", default = 20)
parser.add_option("--l3-tag-latency", type = "int", default = 15)
parser.add_option("--cpu-to-dir-latency", type = "int", default = 120)
parser.add_option("--gpu-to-dir-latency", type = "int", default = 120)
parser.add_option("--no-resource-stalls", action = "store_false",
default = True)
parser.add_option("--no-tcc-resource-stalls", action = "store_false",
default = True)
parser.add_option("--use-L3-on-WT", action = "store_true", default = False)
parser.add_option("--num-tbes", type = "int", default = 256)
parser.add_option("--l2-latency", type = "int", default = 50) # load to use
parser.add_option("--num-tccs", type = "int", default = 1,
help = "number of TCC banks in the GPU")
parser.add_option("--sqc-size", type = 'string', default = '32kB',
help = "SQC cache size")
parser.add_option("--sqc-assoc", type = 'int', default = 8,
help = "SQC cache assoc")
parser.add_option("--WB_L1", action = "store_true", default = False,
help = "writeback L1")
parser.add_option("--WB_L2", action = "store_true", default = False,
help = "writeback L2")
parser.add_option("--TCP_latency", type = "int", default = 4,
help = "TCP latency")
parser.add_option("--TCC_latency", type = "int", default = 16,
help = "TCC latency")
parser.add_option("--tcc-size", type = 'string', default = '256kB',
help = "agregate tcc size")
parser.add_option("--tcc-assoc", type = 'int', default = 16,
help = "tcc assoc")
parser.add_option("--tcp-size", type = 'string', default = '16kB',
help = "tcp size")
parser.add_option("--tcp-assoc", type = 'int', default = 16,
help = "tcp assoc")
parser.add_option("--noL1", action = "store_true", default = False,
help = "bypassL1")
def create_system(options, full_system, system, dma_devices, ruby_system):
if buildEnv['PROTOCOL'] != 'GPU_VIPER':
panic("This script requires the GPU_VIPER protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes
# must be listed before the directory nodes and directory nodes before
# dma nodes, etc.
#
cp_cntrl_nodes = []
tcp_cntrl_nodes = []
sqc_cntrl_nodes = []
tcc_cntrl_nodes = []
dir_cntrl_nodes = []
l3_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
# For an odd number of CPUs, still create the right number of controllers
TCC_bits = int(math.log(options.num_tccs, 2))
# This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
# Clusters
crossbar_bw = None
mainCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
#Assuming a 2GHz clock
crossbar_bw = 16 * options.num_compute_units * options.bw_scalor
mainCluster = Cluster(intBW=crossbar_bw)
else:
mainCluster = Cluster(intBW=8) # 16 GB/s
for i in xrange(options.num_dirs):
dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits)
dir_cntrl.create(options, ruby_system, system)
dir_cntrl.number_of_TBEs = options.num_tbes
dir_cntrl.useL3OnWT = options.use_L3_on_WT
# the number_of_TBEs is inclusive of TBEs below
# Connect the Directory controller to the ruby network
dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
dir_cntrl.requestFromCores.slave = ruby_system.network.master
dir_cntrl.responseFromCores = MessageBuffer()
dir_cntrl.responseFromCores.slave = ruby_system.network.master
dir_cntrl.unblockFromCores = MessageBuffer()
dir_cntrl.unblockFromCores.slave = ruby_system.network.master
dir_cntrl.probeToCore = MessageBuffer()
dir_cntrl.probeToCore.master = ruby_system.network.slave
dir_cntrl.responseToCore = MessageBuffer()
dir_cntrl.responseToCore.master = ruby_system.network.slave
dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.responseFromMemory = MessageBuffer()
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
mainCluster.add(dir_cntrl)
cpuCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else:
cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
for i in xrange((options.num_cpus + 1) / 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
exec("ruby_system.cp_cntrl%d = cp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
# Connect the CP controllers and the network
cp_cntrl.requestFromCore = MessageBuffer()
cp_cntrl.requestFromCore.master = ruby_system.network.slave
cp_cntrl.responseFromCore = MessageBuffer()
cp_cntrl.responseFromCore.master = ruby_system.network.slave
cp_cntrl.unblockFromCore = MessageBuffer()
cp_cntrl.unblockFromCore.master = ruby_system.network.slave
cp_cntrl.probeToCore = MessageBuffer()
cp_cntrl.probeToCore.slave = ruby_system.network.master
cp_cntrl.responseToCore = MessageBuffer()
cp_cntrl.responseToCore.slave = ruby_system.network.master
cp_cntrl.mandatoryQueue = MessageBuffer()
cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
cpuCluster.add(cp_cntrl)
gpuCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else:
gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
for i in xrange(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
number_of_TBEs = 2560)
# TBEs set to max outstanding requests
tcp_cntrl.create(options, ruby_system, system)
tcp_cntrl.WB = options.WB_L1
tcp_cntrl.disableL1 = options.noL1
tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency
exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.coalescer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the TCP controller to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer()
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# Connect the SQC controller to the ruby network
sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
sqc_cntrl.probeToSQC.slave = ruby_system.network.master
sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
sqc_cntrl.responseToSQC.slave = ruby_system.network.master
sqc_cntrl.mandatoryQueue = MessageBuffer()
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_cp):
tcp_ID = options.num_compute_units + i
sqc_ID = options.num_sqc + i
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
number_of_TBEs = 2560)
# TBEs set to max outstanding requests
tcp_cntrl.createCP(options, ruby_system, system)
tcp_cntrl.WB = options.WB_L1
tcp_cntrl.disableL1 = options.noL1
tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency
exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % tcp_ID)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.sequencer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the CP (TCP) controllers to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % sqc_ID)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_tccs):
tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency)
tcc_cntrl.create(options, ruby_system, system)
tcc_cntrl.l2_request_latency = options.gpu_to_dir_latency
tcc_cntrl.l2_response_latency = options.TCC_latency
tcc_cntrl_nodes.append(tcc_cntrl)
tcc_cntrl.WB = options.WB_L2
tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units
# the number_of_TBEs is inclusive of TBEs below
# Connect the TCC controllers to the ruby network
tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcc_cntrl.requestFromTCP.slave = ruby_system.network.master
tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
tcc_cntrl.responseToCore.master = ruby_system.network.slave
tcc_cntrl.probeFromNB = MessageBuffer()
tcc_cntrl.probeFromNB.slave = ruby_system.network.master
tcc_cntrl.responseFromNB = MessageBuffer()
tcc_cntrl.responseFromNB.slave = ruby_system.network.master
tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
tcc_cntrl.requestToNB.master = ruby_system.network.slave
tcc_cntrl.responseToNB = MessageBuffer()
tcc_cntrl.responseToNB.master = ruby_system.network.slave
tcc_cntrl.unblockToNB = MessageBuffer()
tcc_cntrl.unblockToNB.master = ruby_system.network.slave
tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)
exec("ruby_system.tcc_cntrl%d = tcc_cntrl" % i)
# connect all of the wire buffers between L3 and dirs up
# TCC cntrls added to the GPU cluster
gpuCluster.add(tcc_cntrl)
# Assuming no DMA devices
assert(len(dma_devices) == 0)
# Add cpu/gpu clusters to main cluster
mainCluster.add(cpuCluster)
mainCluster.add(gpuCluster)
ruby_system.network.number_of_virtual_networks = 10
return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
| bsd-3-clause |
Suwings/Yeinw | src/Crypto/Random/OSRNG/posix.py | 125 | 2835 | #
# Random/OSRNG/posix.py : OS entropy source for POSIX systems
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
__all__ = ['DevURandomRNG']
import errno
import os
import stat
from rng_base import BaseRNG
from Crypto.Util.py3compat import b
class DevURandomRNG(BaseRNG):
def __init__(self, devname=None):
if devname is None:
self.name = "/dev/urandom"
else:
self.name = devname
# Test that /dev/urandom is a character special device
f = open(self.name, "rb", 0)
fmode = os.fstat(f.fileno())[stat.ST_MODE]
if not stat.S_ISCHR(fmode):
f.close()
raise TypeError("%r is not a character special device" % (self.name,))
self.__file = f
BaseRNG.__init__(self)
def _close(self):
self.__file.close()
def _read(self, N):
# Starting with Python 3 open with buffering=0 returns a FileIO object.
# FileIO.read behaves like read(2) and not like fread(3) and thus we
# have to handle the case that read returns less data as requested here
# more carefully.
data = b("")
while len(data) < N:
try:
d = self.__file.read(N - len(data))
except IOError, e:
# read(2) has been interrupted by a signal; redo the read
if e.errno == errno.EINTR:
continue
raise
if d is None:
# __file is in non-blocking mode and no data is available
return data
if len(d) == 0:
# __file is in blocking mode and arrived at EOF
return data
data += d
return data
def new(*args, **kwargs):
return DevURandomRNG(*args, **kwargs)
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-3.0 |
WoLpH/EventGhost | lib27/site-packages/curl/__init__.py | 8 | 7023 | '''A high-level interface to the pycurl extension'''
# ** mfx NOTE: the CGI class uses "black magic" using COOKIEFILE in
# combination with a non-existant file name. See the libcurl docs
# for more info.
import sys, pycurl
py3 = sys.version_info[0] == 3
# python 2/3 compatibility
if py3:
import urllib.parse as urllib_parse
from urllib.parse import urljoin
from io import BytesIO
else:
import urllib as urllib_parse
from urlparse import urljoin
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
try:
import signal
from signal import SIGPIPE, SIG_IGN
signal.signal(SIGPIPE, SIG_IGN)
except ImportError:
pass
class Curl:
"High-level interface to pycurl functions."
def __init__(self, base_url="", fakeheaders=[]):
self.handle = pycurl.Curl()
# These members might be set.
self.set_url(base_url)
self.verbosity = 0
self.fakeheaders = fakeheaders
# Nothing past here should be modified by the caller.
self.payload = None
self.payload_io = BytesIO()
self.hrd = ""
# Verify that we've got the right site; harmless on a non-SSL connect.
self.set_option(pycurl.SSL_VERIFYHOST, 2)
# Follow redirects in case it wants to take us to a CGI...
self.set_option(pycurl.FOLLOWLOCATION, 1)
self.set_option(pycurl.MAXREDIRS, 5)
self.set_option(pycurl.NOSIGNAL, 1)
# Setting this option with even a nonexistent file makes libcurl
# handle cookie capture and playback automatically.
self.set_option(pycurl.COOKIEFILE, "/dev/null")
# Set timeouts to avoid hanging too long
self.set_timeout(30)
# Use password identification from .netrc automatically
self.set_option(pycurl.NETRC, 1)
self.set_option(pycurl.WRITEFUNCTION, self.payload_io.write)
def header_callback(x):
self.hdr += x.decode('ascii')
self.set_option(pycurl.HEADERFUNCTION, header_callback)
def set_timeout(self, timeout):
"Set timeout for a retrieving an object"
self.set_option(pycurl.TIMEOUT, timeout)
def set_url(self, url):
"Set the base URL to be retrieved."
self.base_url = url
self.set_option(pycurl.URL, self.base_url)
def set_option(self, *args):
"Set an option on the retrieval."
self.handle.setopt(*args)
def set_verbosity(self, level):
"Set verbosity to 1 to see transactions."
self.set_option(pycurl.VERBOSE, level)
def __request(self, relative_url=None):
"Perform the pending request."
if self.fakeheaders:
self.set_option(pycurl.HTTPHEADER, self.fakeheaders)
if relative_url:
self.set_option(pycurl.URL, urljoin(self.base_url, relative_url))
self.payload = None
self.hdr = ""
self.handle.perform()
self.payload = self.payload_io.getvalue()
return self.payload
def get(self, url="", params=None):
"Ship a GET request for a specified URL, capture the response."
if params:
url += "?" + urllib_parse.urlencode(params)
self.set_option(pycurl.HTTPGET, 1)
return self.__request(url)
def post(self, cgi, params):
"Ship a POST request to a specified CGI, capture the response."
self.set_option(pycurl.POST, 1)
self.set_option(pycurl.POSTFIELDS, urllib_parse.urlencode(params))
return self.__request(cgi)
def body(self):
"Return the body from the last response."
return self.payload
def header(self):
"Return the header from the last response."
return self.hdr
def get_info(self, *args):
"Get information about retrieval."
return self.handle.getinfo(*args)
def info(self):
"Return a dictionary with all info on the last response."
m = {}
m['effective-url'] = self.handle.getinfo(pycurl.EFFECTIVE_URL)
m['http-code'] = self.handle.getinfo(pycurl.HTTP_CODE)
m['total-time'] = self.handle.getinfo(pycurl.TOTAL_TIME)
m['namelookup-time'] = self.handle.getinfo(pycurl.NAMELOOKUP_TIME)
m['connect-time'] = self.handle.getinfo(pycurl.CONNECT_TIME)
m['pretransfer-time'] = self.handle.getinfo(pycurl.PRETRANSFER_TIME)
m['redirect-time'] = self.handle.getinfo(pycurl.REDIRECT_TIME)
m['redirect-count'] = self.handle.getinfo(pycurl.REDIRECT_COUNT)
m['size-upload'] = self.handle.getinfo(pycurl.SIZE_UPLOAD)
m['size-download'] = self.handle.getinfo(pycurl.SIZE_DOWNLOAD)
m['speed-upload'] = self.handle.getinfo(pycurl.SPEED_UPLOAD)
m['header-size'] = self.handle.getinfo(pycurl.HEADER_SIZE)
m['request-size'] = self.handle.getinfo(pycurl.REQUEST_SIZE)
m['content-length-download'] = self.handle.getinfo(pycurl.CONTENT_LENGTH_DOWNLOAD)
m['content-length-upload'] = self.handle.getinfo(pycurl.CONTENT_LENGTH_UPLOAD)
m['content-type'] = self.handle.getinfo(pycurl.CONTENT_TYPE)
m['response-code'] = self.handle.getinfo(pycurl.RESPONSE_CODE)
m['speed-download'] = self.handle.getinfo(pycurl.SPEED_DOWNLOAD)
m['ssl-verifyresult'] = self.handle.getinfo(pycurl.SSL_VERIFYRESULT)
m['filetime'] = self.handle.getinfo(pycurl.INFO_FILETIME)
m['starttransfer-time'] = self.handle.getinfo(pycurl.STARTTRANSFER_TIME)
m['redirect-time'] = self.handle.getinfo(pycurl.REDIRECT_TIME)
m['redirect-count'] = self.handle.getinfo(pycurl.REDIRECT_COUNT)
m['http-connectcode'] = self.handle.getinfo(pycurl.HTTP_CONNECTCODE)
m['httpauth-avail'] = self.handle.getinfo(pycurl.HTTPAUTH_AVAIL)
m['proxyauth-avail'] = self.handle.getinfo(pycurl.PROXYAUTH_AVAIL)
m['os-errno'] = self.handle.getinfo(pycurl.OS_ERRNO)
m['num-connects'] = self.handle.getinfo(pycurl.NUM_CONNECTS)
m['ssl-engines'] = self.handle.getinfo(pycurl.SSL_ENGINES)
m['cookielist'] = self.handle.getinfo(pycurl.INFO_COOKIELIST)
m['lastsocket'] = self.handle.getinfo(pycurl.LASTSOCKET)
m['ftp-entry-path'] = self.handle.getinfo(pycurl.FTP_ENTRY_PATH)
return m
def answered(self, check):
"Did a given check string occur in the last payload?"
return self.payload.find(check) >= 0
def close(self):
"Close a session, freeing resources."
if self.handle:
self.handle.close()
self.handle = None
self.hdr = ""
self.payload = ""
def __del__(self):
self.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
url = 'http://curl.haxx.se'
else:
url = sys.argv[1]
c = Curl()
c.get(url)
print(c.body())
print('='*74 + '\n')
import pprint
pprint.pprint(c.info())
print(c.get_info(pycurl.OS_ERRNO))
print(c.info()['os-errno'])
c.close()
| gpl-2.0 |
h3biomed/ansible | lib/ansible/modules/network/cnos/cnos_reload.py | 52 | 3430 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to reload Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_reload
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Perform switch restart on devices running Lenovo CNOS
description:
- This module allows you to restart the switch using the current startup
configuration. The module is usually invoked after the running
configuration has been saved over the startup configuration.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are
written in the main.yml file of the tasks directory.
---
- name: Test Reload
cnos_reload:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_reload_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Device is Reloading. Please wait..."
'''
import sys
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except Exception:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
command = 'reload'
outputfile = module.params['outputfile']
output = ''
cmd = [{'command': command, 'prompt': 'reboot system? (y/n): ',
'answer': 'y'}]
output = output + str(cnos.run_cnos_commands(module, cmd))
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg in "Device Response Timed out"):
module.exit_json(changed=True,
msg="Device is Reloading. Please wait...")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
AbsentMoniker/ECE463Honors | web2py/gluon/contrib/fpdf/template.py | 40 | 11941 | # -*- coding: iso-8859-1 -*-
"PDF Template Helper for FPDF.py"
__author__ = "Mariano Reingart <[email protected]>"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
import sys,os,csv
from fpdf import FPDF
def rgb(col):
return (col // 65536), (col // 256 % 256), (col% 256)
class Template:
def __init__(self, infile=None, elements=None, format='A4', orientation='portrait',
title='', author='', subject='', creator='', keywords=''):
if elements:
self.elements = elements
self.keys = [v['name'].lower() for v in self.elements]
self.handlers = {'T': self.text, 'L': self.line, 'I': self.image,
'B': self.rect, 'BC': self.barcode, }
self.pg_no = 0
self.texts = {}
pdf = self.pdf = FPDF(format=format,orientation=orientation, unit="mm")
pdf.set_title(title)
pdf.set_author(author)
pdf.set_creator(creator)
pdf.set_subject(subject)
pdf.set_keywords(keywords)
def parse_csv(self, infile, delimiter=",", decimal_sep="."):
"Parse template format csv file and create elements dict"
keys = ('name','type','x1','y1','x2','y2','font','size',
'bold','italic','underline','foreground','background',
'align','text','priority', 'multiline')
self.elements = []
for row in csv.reader(open(infile, 'rb'), delimiter=delimiter):
kargs = {}
for i,v in enumerate(row):
if not v.startswith("'") and decimal_sep!=".":
v = v.replace(decimal_sep,".")
else:
v = v
if v=='':
v = None
else:
v = eval(v.strip())
kargs[keys[i]] = v
self.elements.append(kargs)
self.keys = [v['name'].lower() for v in self.elements]
def add_page(self):
self.pg_no += 1
self.texts[self.pg_no] = {}
def __setitem__(self, name, value):
if self.has_key(name):
if isinstance(value,unicode):
value = value.encode("latin1","ignore")
elif value is None:
value = ""
else:
value = str(value)
self.texts[self.pg_no][name.lower()] = value
# setitem shortcut (may be further extended)
set = __setitem__
def has_key(self, name):
return name.lower() in self.keys
def __getitem__(self, name):
if self.has_key(name):
key = name.lower()
if key in self.texts:
# text for this page:
return self.texts[self.pg_no][key]
else:
# find first element for default text:
elements = [element for element in self.elements
if element['name'].lower() == key]
if elements:
return elements[0]['text']
def split_multicell(self, text, element_name):
"Divide (\n) a string using a given element width"
pdf = self.pdf
element = [element for element in self.elements
if element['name'].lower() == element_name.lower()][0]
style = ""
if element['bold']: style += "B"
if element['italic']: style += "I"
if element['underline']: style += "U"
pdf.set_font(element['font'],style,element['size'])
align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(element['align']) # D/I in spanish
if isinstance(text, unicode):
text = text.encode("latin1","ignore")
else:
text = str(text)
return pdf.multi_cell(w=element['x2']-element['x1'],
h=element['y2']-element['y1'],
txt=text,align=align,split_only=True)
def render(self, outfile, dest="F"):
pdf = self.pdf
for pg in range(1, self.pg_no+1):
pdf.add_page()
pdf.set_font('Arial','B',16)
pdf.set_auto_page_break(False,margin=0)
for element in sorted(self.elements,key=lambda x: x['priority']):
# make a copy of the element:
element = dict(element)
element['text'] = self.texts[pg].get(element['name'].lower(), element['text'])
if 'rotate' in element:
pdf.rotate(element['rotate'], element['x1'], element['y1'])
self.handlers[element['type'].upper()](pdf, **element)
if 'rotate' in element:
pdf.rotate(0)
return pdf.output(outfile, dest)
def text(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=10,
bold=False, italic=False, underline=False, align="",
foreground=0, backgroud=65535, multiline=None,
*args, **kwargs):
if text:
if pdf.text_color!=rgb(foreground):
pdf.set_text_color(*rgb(foreground))
if pdf.fill_color!=rgb(backgroud):
pdf.set_fill_color(*rgb(backgroud))
font = font.strip().lower()
if font == 'arial black':
font = 'arial'
style = ""
for tag in 'B', 'I', 'U':
if (text.startswith("<%s>" % tag) and text.endswith("</%s>" %tag)):
text = text[3:-4]
style += tag
if bold: style += "B"
if italic: style += "I"
if underline: style += "U"
align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(align) # D/I in spanish
pdf.set_font(font,style,size)
##m_k = 72 / 2.54
##h = (size/m_k)
pdf.set_xy(x1,y1)
if multiline is None:
# multiline==None: write without wrapping/trimming (default)
pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align)
elif multiline:
# multiline==True: automatic word - warp
pdf.multi_cell(w=x2-x1,h=y2-y1,txt=text,border=0,align=align)
else:
# multiline==False: trim to fit exactly the space defined
text = pdf.multi_cell(w=x2-x1, h=y2-y1,
txt=text, align=align, split_only=True)[0]
print "trimming: *%s*" % text
pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align)
#pdf.Text(x=x1,y=y1,txt=text)
def line(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
#print "SetDrawColor", hex(foreground)
pdf.set_draw_color(*rgb(foreground))
#print "SetLineWidth", size
pdf.set_line_width(size)
pdf.line(x1, y1, x2, y2)
def rect(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, backgroud=65535, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
pdf.set_draw_color(*rgb(foreground))
if pdf.fill_color!=rgb(backgroud):
pdf.set_fill_color(*rgb(backgroud))
pdf.set_line_width(size)
pdf.rect(x1, y1, x2-x1, y2-y1)
def image(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', *args,**kwargs):
pdf.image(text,x1,y1,w=x2-x1,h=y2-y1,type='',link='')
def barcode(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=1,
foreground=0, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
pdf.set_draw_color(*rgb(foreground))
font = font.lower().strip()
if font == 'interleaved 2of5 nt':
pdf.interleaved2of5(text,x1,y1,w=size,h=y2-y1)
if __name__ == "__main__":
# generate sample invoice (according Argentina's regulations)
import random
from decimal import Decimal
f = Template(format="A4",
title="Sample Invoice", author="Sample Company",
subject="Sample Customer", keywords="Electronic TAX Invoice")
f.parse_csv(infile="invoice.csv", delimiter=";", decimal_sep=",")
detail = "Lorem ipsum dolor sit amet, consectetur. " * 30
items = []
for i in range(1, 30):
ds = "Sample product %s" % i
qty = random.randint(1,10)
price = round(random.random()*100,3)
code = "%s%s%02d" % (chr(random.randint(65,90)), chr(random.randint(65,90)),i)
items.append(dict(code=code, unit='u',
qty=qty, price=price,
amount=qty*price,
ds="%s: %s" % (i,ds)))
# divide and count lines
lines = 0
li_items = []
for it in items:
qty = it['qty']
code = it['code']
unit = it['unit']
for ds in f.split_multicell(it['ds'], 'item_description01'):
# add item description line (without price nor amount)
li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None))
# clean qty and code (show only at first)
unit = qty = code = None
# set last item line price and amount
li_items[-1].update(amount = it['amount'],
price = it['price'])
obs="\n<U>Detail:</U>\n\n" + detail
for ds in f.split_multicell(obs, 'item_description01'):
li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None))
# calculate pages:
lines = len(li_items)
max_lines_per_page = 24
pages = lines / (max_lines_per_page - 1)
if lines % (max_lines_per_page - 1): pages = pages + 1
# completo campos y hojas
for page in range(1, pages+1):
f.add_page()
f['page'] = 'Page %s of %s' % (page, pages)
if pages>1 and page<pages:
s = 'Continues on page %s' % (page+1)
else:
s = ''
f['item_description%02d' % (max_lines_per_page+1)] = s
f["company_name"] = "Sample Company"
f["company_logo"] = "tutorial/logo.png"
f["company_header1"] = "Some Address - somewhere -"
f["company_header2"] = "http://www.example.com"
f["company_footer1"] = "Tax Code ..."
f["company_footer2"] = "Tax/VAT ID ..."
f['number'] = '0001-00001234'
f['issue_date'] = '2010-09-10'
f['due_date'] = '2099-09-10'
f['customer_name'] = "Sample Client"
f['customer_address'] = "Siempreviva 1234"
# print line item...
li = 0
k = 0
total = Decimal("0.00")
for it in li_items:
k = k + 1
if k > page * (max_lines_per_page - 1):
break
if it['amount']:
total += Decimal("%.6f" % it['amount'])
if k > (page - 1) * (max_lines_per_page - 1):
li += 1
if it['qty'] is not None:
f['item_quantity%02d' % li] = it['qty']
if it['code'] is not None:
f['item_code%02d' % li] = it['code']
if it['unit'] is not None:
f['item_unit%02d' % li] = it['unit']
f['item_description%02d' % li] = it['ds']
if it['price'] is not None:
f['item_price%02d' % li] = "%0.3f" % it['price']
if it['amount'] is not None:
f['item_amount%02d' % li] = "%0.2f" % it['amount']
if pages == page:
f['net'] = "%0.2f" % (total/Decimal("1.21"))
f['vat'] = "%0.2f" % (total*(1-1/Decimal("1.21")))
f['total_label'] = 'Total:'
else:
f['total_label'] = 'SubTotal:'
f['total'] = "%0.2f" % total
f.render("./invoice.pdf")
if sys.platform.startswith("linux"):
os.system("evince ./invoice.pdf")
else:
os.system("./invoice.pdf")
| gpl-2.0 |
sadanandb/pmt | src/pyasm/application/common/dependency.py | 6 | 2711 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['Dependency']
import sys, types
from base_app_info import BaseAppInfo
class Dependency(object):
'''class which handles the texture depedencies in a file or session'''
def __init__(my, node_name, file_type, path=""):
my.file_type = file_type
my.path = path
my.info = BaseAppInfo.get()
my.impl = my.info.get_app_implementation()
my.app = my.info.get_app()
my.node_name = node_name
my.texture_paths = []
my.texture_nodes = []
my.texture_attrs = []
my.dependent_paths = []
def get_texture_info(my):
return my.texture_paths, my.texture_nodes, my.texture_attrs
def execute(my):
assert my.file_type
my.app.message("path [%s] [%s]" % (my.app.APPNAME, my.file_type) )
# find all of the textures in the extracted file
if my.app.APPNAME == "maya":
if my.file_type == "mayaAscii":
# handle the textures
my.texture_nodes, my.texture_paths, my.texture_attrs = \
my.impl.get_textures_from_path(path)
# remember all of the geo paths
my.geo_paths = my.impl.get_geo_paths()
for geo_path in my.geo_paths:
my.dependent_paths.append(geo_path)
else:
my.texture_nodes, my.texture_paths, my.texture_attrs = \
my.impl.get_textures_from_session(my.node_name)
print my.texture_nodes, my.texture_paths, my.texture_attrs
elif my.app.APPNAME == "houdini":
my.texture_nodes, my.texture_paths, my.texture_attrs = \
my.app.get_file_references(my.node_name)
elif my.app.APPNAME == "xsi":
if my.file_type == "dotXSI":
my.texture_nodes, my.texture_paths, my.texture_attrs = \
my.impl.get_textures_from_path(my.path)
else:
my.texture_nodes, my.texture_paths, my.texture_attrs = \
my.impl.get_textures_from_session(my.node_name)
# add all of the texture paths
for texture_path in my.texture_paths:
# FIXME: all of the texture paths are uploaded!!!, even if
# they are identical
my.dependent_paths.append(texture_path)
return my.dependent_paths
| epl-1.0 |
switchboardOp/ansible | lib/ansible/modules/packaging/os/pkg5.py | 29 | 5253 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkg5
author: "Peter Oliver (@mavit)"
short_description: Manages packages with the Solaris 11 Image Packaging System
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
notes:
- The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
options:
name:
description:
- An FRMI of the package(s) to be installed/removed/updated.
- Multiple packages may be specified, separated by C(,).
required: true
state:
description:
- Whether to install (I(present), I(latest)), or remove (I(absent)) a
package.
required: false
default: present
choices: [ present, latest, absent ]
accept_licenses:
description:
- Accept any licences.
required: false
default: false
choices: [ true, false ]
aliases: [ accept_licences, accept ]
'''
EXAMPLES = '''
# Install Vim:
- pkg5:
name: editor/vim
# Remove finger daemon:
- pkg5:
name: service/network/finger
state: absent
# Install several packages at once:
- pkg5:
name:
- /file/gnu-findutils
- /text/gnu-grep
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='list'),
state=dict(
default='present',
choices=[
'present',
'installed',
'latest',
'absent',
'uninstalled',
'removed',
]
),
accept_licenses=dict(
type='bool',
default=False,
aliases=['accept_licences', 'accept'],
),
),
supports_check_mode=True,
)
params = module.params
packages = []
# pkg(5) FRMIs include a comma before the release number, but
# AnsibleModule will have split this into multiple items for us.
# Try to spot where this has happened and fix it.
for fragment in params['name']:
if (
re.search('^\d+(?:\.\d+)*', fragment)
and packages and re.search('@[^,]*$', packages[-1])
):
packages[-1] += ',' + fragment
else:
packages.append(fragment)
if params['state'] in ['present', 'installed']:
ensure(module, 'present', packages, params)
elif params['state'] in ['latest']:
ensure(module, 'latest', packages, params)
elif params['state'] in ['absent', 'uninstalled', 'removed']:
ensure(module, 'absent', packages, params)
def ensure(module, state, packages, params):
response = {
'results': [],
'msg': '',
}
behaviour = {
'present': {
'filter': lambda p: not is_installed(module, p),
'subcommand': 'install',
},
'latest': {
'filter': lambda p: (
not is_installed(module, p) or not is_latest(module, p)
),
'subcommand': 'install',
},
'absent': {
'filter': lambda p: is_installed(module, p),
'subcommand': 'uninstall',
},
}
if module.check_mode:
dry_run = ['-n']
else:
dry_run = []
if params['accept_licenses']:
accept_licenses = ['--accept']
else:
accept_licenses = []
to_modify = filter(behaviour[state]['filter'], packages)
if to_modify:
rc, out, err = module.run_command(
[
'pkg', behaviour[state]['subcommand']
]
+ dry_run
+ accept_licenses
+ [
'-q', '--'
] + to_modify
)
response['rc'] = rc
response['results'].append(out)
response['msg'] += err
response['changed'] = True
if rc != 0:
module.fail_json(**response)
module.exit_json(**response)
def is_installed(module, package):
rc, out, err = module.run_command(['pkg', 'list', '--', package])
return not bool(int(rc))
def is_latest(module, package):
rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
return bool(int(rc))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
40123148/w17b | static/Brython3.1.1-20150328-091302/Lib/opcode.py | 714 | 5442 |
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
hasnargs.append(131)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
hasnargs.append(140)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
hasnargs.append(141)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
hasnargs.append(142)
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
del def_op, name_op, jrel_op, jabs_op
| agpl-3.0 |
mars-knowsnothing/amos-bot | src/Lib/site-packages/pip/download.py | 334 | 32171 | from __future__ import absolute_import
import cgi
import email.utils
import getpass
import json
import logging
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
try:
import ssl # noqa
HAS_TLS = True
except ImportError:
HAS_TLS = False
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
import pip
from pip.exceptions import InstallationError, HashMismatch
from pip.models import PyPI
from pip.utils import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
ARCHIVE_EXTENSIONS, consume, call_subprocess)
from pip.utils.encoding import auto_decode
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip.utils.glibc import libc_ver
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
from pip.locations import write_delete_marker_file
from pip.vcs import vcs
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._vendor.requests.utils import get_netrc_auth
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.requests.packages import urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.six.moves import xmlrpc_client
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url']
logger = logging.getLogger(__name__)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
from pip._vendor import distro
distro_infos = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], distro.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], libc_ver()),
))
if libc:
distro_infos["libc"] = libc
if distro_infos:
data["distro"] = distro_infos
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
# Python 2.6 doesn't have ssl.OPENSSL_VERSION.
if HAS_TLS and sys.version_info[:2] > (2, 6):
data["openssl_version"] = ssl.OPENSSL_VERSION
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urllib_parse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.rsplit("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
# Get creds from netrc if we still don't have them
if username is None and password is None:
netrc_auth = get_netrc_auth(req.url)
username, password = netrc_auth if netrc_auth else (None, None)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want sudo's -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interrupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
status_forcelist=[503],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache, use_dir_lock=True),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.mount("https://{0}/".format(host), insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
return resp.url, resp.text
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
# if we have a UNC path, prepend UNC share notation
if netloc:
netloc = '\\\\' + netloc
path = urllib_request.url2pathname(netloc + path)
return path
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def unpack_vcs_link(link, location):
vcs_backend = _get_used_vcs_backend(link)
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def is_dir_url(link):
"""Return whether a file:// Link points to a directory.
``link`` must not have any other scheme but file://. Call is_file_url()
first.
"""
link_path = url_to_path(link.url_without_fragment)
return os.path.isdir(link_path)
def _progress_indicator(iterable, *args, **kwargs):
return iterable
def _download_url(resp, link, content_file, hashes):
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
def written_chunks(chunks):
for chunk in chunks:
content_file.write(chunk)
yield chunk
progress_indicator = _progress_indicator
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
if total_length:
logger.info("Downloading %s (%s)", url, format_size(total_length))
progress_indicator = DownloadProgressBar(max=total_length).iter
else:
logger.info("Downloading %s", url)
progress_indicator = DownloadProgressSpinner().iter
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
downloaded_chunks = written_chunks(
progress_indicator(
resp_read(CONTENT_CHUNK_SIZE),
CONTENT_CHUNK_SIZE
)
)
if hashes:
hashes.check_against_chunks(downloaded_chunks)
else:
consume(downloaded_chunks)
def _copy_file(filename, location, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' %
display_path(download_location), ('i', 'w', 'b', 'a'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
elif response == 'a':
sys.exit(-1)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(link, location, download_dir=None,
session=None, hashes=None):
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link,
session,
temp_dir,
hashes)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
if not already_downloaded_path:
os.unlink(from_path)
rmtree(temp_dir)
def unpack_file_url(link, location, download_dir=None, hashes=None):
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir.
"""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if is_dir_url(link):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(link_path)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
def _copy_dist_from_dir(link_path, location):
"""Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
"""
# Note: This is currently VERY SLOW if you have a lot of data in the
# directory, because it copies everything with `shutil.copytree`.
# What it should really do is build an sdist and install that.
# See https://github.com/pypa/pip/issues/2195
if os.path.isdir(location):
rmtree(location)
# build an sdist
setup_py = 'setup.py'
sdist_args = [sys.executable]
sdist_args.append('-c')
sdist_args.append(SETUPTOOLS_SHIM % setup_py)
sdist_args.append('sdist')
sdist_args += ['--dist-dir', location]
logger.info('Running setup.py sdist for %s', link_path)
with indent_log():
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
# unpack sdist into `location`
sdist = os.path.join(location, os.listdir(location)[0])
logger.info('Unpacking sdist %s into %s', sdist, location)
unpack_file(sdist, location, content_type=None, link=None)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(link, location, download_dir=None,
only_download=False, session=None, hashes=None):
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir, hashes=hashes)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
hashes=hashes
)
if only_download:
write_delete_marker_file(location)
def _download_http_url(link, session, temp_dir, hashes):
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file, hashes)
return file_path, content_type
def _check_download_dir(link, download_dir, hashes):
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
| gpl-3.0 |
mrunge/horizon | openstack_dashboard/dashboards/admin/routers/panel.py | 43 | 1067 | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.admin import dashboard
class Routers(horizon.Panel):
name = _("Routers")
slug = 'routers'
permissions = ('openstack.services.network',)
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
if network_config.get('enable_router', True):
dashboard.Admin.register(Routers)
| apache-2.0 |
proggy/uic | doc/conf.py | 1 | 7924 | # -*- coding: utf-8 -*-
#
# uic documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 9 23:00:14 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'uic'
copyright = u'2014, Daniel Jung'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'uicdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'uic.tex', u'uic Documentation',
u'Daniel Jung', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'uic', u'uic Documentation',
[u'Daniel Jung'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'uic', u'uic Documentation',
u'Daniel Jung', 'uic', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-2.0 |
LordSputnik/mbws3 | ws/schema/artist.py | 1 | 1672 | import datetime
from sqlalchemy.dialects.postgresql import UUID
from ws import db
class Artist(db.Model):
id = db.Column(db.Integer, primary_key=True)
gid = db.Column(UUID, unique=True, nullable=False)
name = db.Column(db.UnicodeText, nullable=False)
sort_name = db.Column(db.UnicodeText, nullable=False)
begin_date_year = db.Column(db.SmallInteger)
begin_date_month = db.Column(db.SmallInteger)
begin_date_day = db.Column(db.SmallInteger)
end_date_year = db.Column(db.SmallInteger)
end_date_month = db.Column(db.SmallInteger)
end_date_day = db.Column(db.SmallInteger)
type_id = db.Column('type', db.Integer, db.ForeignKey('artist_type.id'))
area_id = db.Column('area', db.Integer, db.ForeignKey('area.id'))
gender_id = db.Column('gender',db.Integer, db.ForeignKey('gender.id'))
comment = db.Column(db.Unicode(255), default=u'', nullable=False)
edits_pending = db.Column(db.Integer, default=0, nullable=False)
last_updated = db.Column(db.DateTime(timezone=True), default=datetime.datetime.utcnow)
ended = db.Column(db.Boolean, default=False, nullable=False)
begin_area_id = db.Column('begin_area',db.Integer, db.ForeignKey('area.id'))
end_area_id = db.Column('end_area',db.Integer, db.ForeignKey('area.id'))
type = db.relationship('ArtistType')
gender = db.relationship('Gender')
area = db.relationship('Area', foreign_keys=area_id)
begin_area = db.relationship('Area', foreign_keys=begin_area_id)
end_area = db.relationship('Area', foreign_keys=end_area_id)
@property
def begin_date(self):
pass
@property
def end_date(self):
pass
| gpl-3.0 |
Xilinx/hopper | hopper/commands/CommandHopperBase.py | 1 | 3733 | # Copyright (c) 2015 Xilinx Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys
import urlparse
from hopper.utils.logger import *
import hopper.utils.args
import hopper.utils.Proxy
import hopper.utils.tasks
class CommandHopperBase(hopper.utils.args.CommandBase):
threadLimit = hopper.utils.args.ValueOption(
None, "threads",
default = None,
description = "The maximum number of concurrent threads avaliable.\n" +
"(Default is to automatically detect)")
mirror = hopper.utils.args.ValueOption(
None, "mirror",
default = None,
description = "The location of a git repository mirror. These repositories will be used to seed the clones.\n" +
"(This can be defined via the environment variable HOPPER_MIRROR.)")
locallayers = hopper.utils.args.ValueOption(
None, "local-layers",
default = None,
description = "The location of layers for which are local and can be symlinked to. This is useful for development.\n" +
"(This can be defined via the environment variable HOPPER_LOCAL.)")
def __init__(self):
hopper.utils.args.CommandBase.__init__(self)
self.environment = None
def execute(self, handler = None):
hopper.utils.args.CommandBase.execute(self)
if self.threadLimit:
threads = self.threadLimit
else:
threads = CommandHopperBase.getDefaultThreads()
self.environment = hopper.utils.tasks.Environment(
basepath = os.getcwd(),
mirrorpath = CommandHopperBase.valueOrEnvironment(self.mirror, "HOPPER_MIRROR"),
proxy = CommandHopperBase.getProxy(),
threads = threads,
locallayers = CommandHopperBase.valueOrEnvironment(self.locallayers, "HOPPER_LOCAL"))
return True
@staticmethod
def valueOrEnvironment(value, env):
if value:
return value
elif env in os.environ:
return os.environ[env]
return None
@staticmethod
def getDefaultThreads():
import multiprocessing
systemthreads = multiprocessing.cpu_count()
activecpus = systemthreads / 2
debug("Detected %s threads avaliable to system (using half, %s threads)" % (systemthreads, activecpus))
# Check if using LSF and account for it
if "LSB_DJOB_NUMPROC" in os.environ:
try:
activecpus = int(os.environ["LSB_DJOB_NUMPROC"])
warning("Forced default threads by LSF environment to %s threads" % activecpus)
except:
pass
return activecpus
@staticmethod
def getHttpProxyUri():
if "http_proxy" in os.environ:
return urlparse.urlparse(os.environ["http_proxy"])
elif "HTTP_PROXY" in os.environ:
return urlparse.urlparse(os.environ["HTTP_PROXY"])
return None
@staticmethod
def getProxy():
uri = CommandHopperBase.getHttpProxyUri()
if uri:
return hopper.utils.Proxy.Proxy(uri.hostname, uri.port)
return None
| mit |
swenson/sagewiki | unidecode/unidecode/x058.py | 252 | 4678 | data = (
'Ku ', # 0x00
'Ke ', # 0x01
'Tang ', # 0x02
'Kun ', # 0x03
'Ni ', # 0x04
'Jian ', # 0x05
'Dui ', # 0x06
'Jin ', # 0x07
'Gang ', # 0x08
'Yu ', # 0x09
'E ', # 0x0a
'Peng ', # 0x0b
'Gu ', # 0x0c
'Tu ', # 0x0d
'Leng ', # 0x0e
'[?] ', # 0x0f
'Ya ', # 0x10
'Qian ', # 0x11
'[?] ', # 0x12
'An ', # 0x13
'[?] ', # 0x14
'Duo ', # 0x15
'Nao ', # 0x16
'Tu ', # 0x17
'Cheng ', # 0x18
'Yin ', # 0x19
'Hun ', # 0x1a
'Bi ', # 0x1b
'Lian ', # 0x1c
'Guo ', # 0x1d
'Die ', # 0x1e
'Zhuan ', # 0x1f
'Hou ', # 0x20
'Bao ', # 0x21
'Bao ', # 0x22
'Yu ', # 0x23
'Di ', # 0x24
'Mao ', # 0x25
'Jie ', # 0x26
'Ruan ', # 0x27
'E ', # 0x28
'Geng ', # 0x29
'Kan ', # 0x2a
'Zong ', # 0x2b
'Yu ', # 0x2c
'Huang ', # 0x2d
'E ', # 0x2e
'Yao ', # 0x2f
'Yan ', # 0x30
'Bao ', # 0x31
'Ji ', # 0x32
'Mei ', # 0x33
'Chang ', # 0x34
'Du ', # 0x35
'Tuo ', # 0x36
'Yin ', # 0x37
'Feng ', # 0x38
'Zhong ', # 0x39
'Jie ', # 0x3a
'Zhen ', # 0x3b
'Feng ', # 0x3c
'Gang ', # 0x3d
'Chuan ', # 0x3e
'Jian ', # 0x3f
'Pyeng ', # 0x40
'Toride ', # 0x41
'Xiang ', # 0x42
'Huang ', # 0x43
'Leng ', # 0x44
'Duan ', # 0x45
'[?] ', # 0x46
'Xuan ', # 0x47
'Ji ', # 0x48
'Ji ', # 0x49
'Kuai ', # 0x4a
'Ying ', # 0x4b
'Ta ', # 0x4c
'Cheng ', # 0x4d
'Yong ', # 0x4e
'Kai ', # 0x4f
'Su ', # 0x50
'Su ', # 0x51
'Shi ', # 0x52
'Mi ', # 0x53
'Ta ', # 0x54
'Weng ', # 0x55
'Cheng ', # 0x56
'Tu ', # 0x57
'Tang ', # 0x58
'Que ', # 0x59
'Zhong ', # 0x5a
'Li ', # 0x5b
'Peng ', # 0x5c
'Bang ', # 0x5d
'Sai ', # 0x5e
'Zang ', # 0x5f
'Dui ', # 0x60
'Tian ', # 0x61
'Wu ', # 0x62
'Cheng ', # 0x63
'Xun ', # 0x64
'Ge ', # 0x65
'Zhen ', # 0x66
'Ai ', # 0x67
'Gong ', # 0x68
'Yan ', # 0x69
'Kan ', # 0x6a
'Tian ', # 0x6b
'Yuan ', # 0x6c
'Wen ', # 0x6d
'Xie ', # 0x6e
'Liu ', # 0x6f
'Ama ', # 0x70
'Lang ', # 0x71
'Chang ', # 0x72
'Peng ', # 0x73
'Beng ', # 0x74
'Chen ', # 0x75
'Cu ', # 0x76
'Lu ', # 0x77
'Ou ', # 0x78
'Qian ', # 0x79
'Mei ', # 0x7a
'Mo ', # 0x7b
'Zhuan ', # 0x7c
'Shuang ', # 0x7d
'Shu ', # 0x7e
'Lou ', # 0x7f
'Chi ', # 0x80
'Man ', # 0x81
'Biao ', # 0x82
'Jing ', # 0x83
'Qi ', # 0x84
'Shu ', # 0x85
'Di ', # 0x86
'Zhang ', # 0x87
'Kan ', # 0x88
'Yong ', # 0x89
'Dian ', # 0x8a
'Chen ', # 0x8b
'Zhi ', # 0x8c
'Xi ', # 0x8d
'Guo ', # 0x8e
'Qiang ', # 0x8f
'Jin ', # 0x90
'Di ', # 0x91
'Shang ', # 0x92
'Mu ', # 0x93
'Cui ', # 0x94
'Yan ', # 0x95
'Ta ', # 0x96
'Zeng ', # 0x97
'Qi ', # 0x98
'Qiang ', # 0x99
'Liang ', # 0x9a
'[?] ', # 0x9b
'Zhui ', # 0x9c
'Qiao ', # 0x9d
'Zeng ', # 0x9e
'Xu ', # 0x9f
'Shan ', # 0xa0
'Shan ', # 0xa1
'Ba ', # 0xa2
'Pu ', # 0xa3
'Kuai ', # 0xa4
'Dong ', # 0xa5
'Fan ', # 0xa6
'Que ', # 0xa7
'Mo ', # 0xa8
'Dun ', # 0xa9
'Dun ', # 0xaa
'Dun ', # 0xab
'Di ', # 0xac
'Sheng ', # 0xad
'Duo ', # 0xae
'Duo ', # 0xaf
'Tan ', # 0xb0
'Deng ', # 0xb1
'Wu ', # 0xb2
'Fen ', # 0xb3
'Huang ', # 0xb4
'Tan ', # 0xb5
'Da ', # 0xb6
'Ye ', # 0xb7
'Sho ', # 0xb8
'Mama ', # 0xb9
'Yu ', # 0xba
'Qiang ', # 0xbb
'Ji ', # 0xbc
'Qiao ', # 0xbd
'Ken ', # 0xbe
'Yi ', # 0xbf
'Pi ', # 0xc0
'Bi ', # 0xc1
'Dian ', # 0xc2
'Jiang ', # 0xc3
'Ye ', # 0xc4
'Yong ', # 0xc5
'Bo ', # 0xc6
'Tan ', # 0xc7
'Lan ', # 0xc8
'Ju ', # 0xc9
'Huai ', # 0xca
'Dang ', # 0xcb
'Rang ', # 0xcc
'Qian ', # 0xcd
'Xun ', # 0xce
'Lan ', # 0xcf
'Xi ', # 0xd0
'He ', # 0xd1
'Ai ', # 0xd2
'Ya ', # 0xd3
'Dao ', # 0xd4
'Hao ', # 0xd5
'Ruan ', # 0xd6
'Mama ', # 0xd7
'Lei ', # 0xd8
'Kuang ', # 0xd9
'Lu ', # 0xda
'Yan ', # 0xdb
'Tan ', # 0xdc
'Wei ', # 0xdd
'Huai ', # 0xde
'Long ', # 0xdf
'Long ', # 0xe0
'Rui ', # 0xe1
'Li ', # 0xe2
'Lin ', # 0xe3
'Rang ', # 0xe4
'Ten ', # 0xe5
'Xun ', # 0xe6
'Yan ', # 0xe7
'Lei ', # 0xe8
'Ba ', # 0xe9
'[?] ', # 0xea
'Shi ', # 0xeb
'Ren ', # 0xec
'[?] ', # 0xed
'Zhuang ', # 0xee
'Zhuang ', # 0xef
'Sheng ', # 0xf0
'Yi ', # 0xf1
'Mai ', # 0xf2
'Ke ', # 0xf3
'Zhu ', # 0xf4
'Zhuang ', # 0xf5
'Hu ', # 0xf6
'Hu ', # 0xf7
'Kun ', # 0xf8
'Yi ', # 0xf9
'Hu ', # 0xfa
'Xu ', # 0xfb
'Kun ', # 0xfc
'Shou ', # 0xfd
'Mang ', # 0xfe
'Zun ', # 0xff
)
| gpl-2.0 |
twitchyliquid64/misc-scripts | s3tool/boto-develop/boto/services/sonofmmm.py | 170 | 3498 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.services.service import Service
from boto.services.message import ServiceMessage
import os
import mimetypes
class SonOfMMM(Service):
def __init__(self, config_file=None):
super(SonOfMMM, self).__init__(config_file)
self.log_file = '%s.log' % self.instance_id
self.log_path = os.path.join(self.working_dir, self.log_file)
boto.set_file_logger(self.name, self.log_path)
if self.sd.has_option('ffmpeg_args'):
self.command = '/usr/local/bin/ffmpeg ' + self.sd.get('ffmpeg_args')
else:
self.command = '/usr/local/bin/ffmpeg -y -i %s %s'
self.output_mimetype = self.sd.get('output_mimetype')
if self.sd.has_option('output_ext'):
self.output_ext = self.sd.get('output_ext')
else:
self.output_ext = mimetypes.guess_extension(self.output_mimetype)
self.output_bucket = self.sd.get_obj('output_bucket')
self.input_bucket = self.sd.get_obj('input_bucket')
# check to see if there are any messages queue
# if not, create messages for all files in input_bucket
m = self.input_queue.read(1)
if not m:
self.queue_files()
def queue_files(self):
boto.log.info('Queueing files from %s' % self.input_bucket.name)
for key in self.input_bucket:
boto.log.info('Queueing %s' % key.name)
m = ServiceMessage()
if self.output_bucket:
d = {'OutputBucket' : self.output_bucket.name}
else:
d = None
m.for_key(key, d)
self.input_queue.write(m)
def process_file(self, in_file_name, msg):
base, ext = os.path.splitext(in_file_name)
out_file_name = os.path.join(self.working_dir,
base+self.output_ext)
command = self.command % (in_file_name, out_file_name)
boto.log.info('running:\n%s' % command)
status = self.run(command)
if status == 0:
return [(out_file_name, self.output_mimetype)]
else:
return []
def shutdown(self):
if os.path.isfile(self.log_path):
if self.output_bucket:
key = self.output_bucket.new_key(self.log_file)
key.set_contents_from_filename(self.log_path)
super(SonOfMMM, self).shutdown()
| mit |
v-iam/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/fabric_error.py | 2 | 1515 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class FabricError(Model):
"""The REST API operations for Service Fabric return standard HTTP status
codes. This type defines the additional information returned from the
Service Fabric API operations that are not successful.
.
:param error:
:type error: :class:`FabricErrorError
<azure.servicefabric.models.FabricErrorError>`
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'Error', 'type': 'FabricErrorError'},
}
def __init__(self, error):
self.error = error
class FabricErrorException(HttpOperationError):
"""Server responsed with exception of type: 'FabricError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(FabricErrorException, self).__init__(deserialize, response, 'FabricError', *args)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.