repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
duyet-website/api.duyet.net | lib/flask/testsuite/__init__.py | 564 | 7022 | # -*- coding: utf-8 -*-
"""
flask.testsuite
~~~~~~~~~~~~~~~
Tests Flask itself. The majority of Flask is already tested
as part of Werkzeug.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import os
import sys
import flask
import warnings
import unittest
from functools import update_wrapper
from contextlib import contextmanager
from werkzeug.utils import import_string, find_modules
from flask._compat import reraise, StringIO
def add_to_path(path):
"""Adds an entry to sys.path if it's not already there. This does
not append it but moves it to the front so that we can be sure it
is loaded.
"""
if not os.path.isdir(path):
raise RuntimeError('Tried to add nonexisting path')
def _samefile(x, y):
if x == y:
return True
try:
return os.path.samefile(x, y)
except (IOError, OSError, AttributeError):
# Windows has no samefile
return False
sys.path[:] = [x for x in sys.path if not _samefile(path, x)]
sys.path.insert(0, path)
def iter_suites():
"""Yields all testsuites."""
for module in find_modules(__name__):
mod = import_string(module)
if hasattr(mod, 'suite'):
yield mod.suite()
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
@contextmanager
def catch_warnings():
"""Catch warnings in a with block in a list"""
# make sure deprecation warnings are active in tests
warnings.simplefilter('default', category=DeprecationWarning)
filters = warnings.filters
warnings.filters = filters[:]
old_showwarning = warnings.showwarning
log = []
def showwarning(message, category, filename, lineno, file=None, line=None):
log.append(locals())
try:
warnings.showwarning = showwarning
yield log
finally:
warnings.filters = filters
warnings.showwarning = old_showwarning
@contextmanager
def catch_stderr():
"""Catch stderr in a StringIO"""
old_stderr = sys.stderr
sys.stderr = rv = StringIO()
try:
yield rv
finally:
sys.stderr = old_stderr
def emits_module_deprecation_warning(f):
def new_f(self, *args, **kwargs):
with catch_warnings() as log:
f(self, *args, **kwargs)
self.assert_true(log, 'expected deprecation warning')
for entry in log:
self.assert_in('Modules are deprecated', str(entry['message']))
return update_wrapper(new_f, f)
class FlaskTestCase(unittest.TestCase):
"""Baseclass for all the tests that Flask uses. Use these methods
for testing instead of the camelcased ones in the baseclass for
consistency.
"""
def ensure_clean_request_context(self):
# make sure we're not leaking a request context since we are
# testing flask internally in debug mode in a few cases
leaks = []
while flask._request_ctx_stack.top is not None:
leaks.append(flask._request_ctx_stack.pop())
self.assert_equal(leaks, [])
def setup(self):
pass
def teardown(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
unittest.TestCase.tearDown(self)
self.ensure_clean_request_context()
self.teardown()
def assert_equal(self, x, y):
return self.assertEqual(x, y)
def assert_raises(self, exc_type, callable=None, *args, **kwargs):
catcher = _ExceptionCatcher(self, exc_type)
if callable is None:
return catcher
with catcher:
callable(*args, **kwargs)
def assert_true(self, x, msg=None):
self.assertTrue(x, msg)
def assert_false(self, x, msg=None):
self.assertFalse(x, msg)
def assert_in(self, x, y):
self.assertIn(x, y)
def assert_not_in(self, x, y):
self.assertNotIn(x, y)
if sys.version_info[:2] == (2, 6):
def assertIn(self, x, y):
assert x in y, "%r unexpectedly not in %r" % (x, y)
def assertNotIn(self, x, y):
assert x not in y, "%r unexpectedly in %r" % (x, y)
class _ExceptionCatcher(object):
def __init__(self, test_case, exc_type):
self.test_case = test_case
self.exc_type = exc_type
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
exception_name = self.exc_type.__name__
if exc_type is None:
self.test_case.fail('Expected exception of type %r' %
exception_name)
elif not issubclass(exc_type, self.exc_type):
reraise(exc_type, exc_value, tb)
return True
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def setup_path():
add_to_path(os.path.abspath(os.path.join(
os.path.dirname(__file__), 'test_apps')))
def suite():
"""A testsuite that has all the Flask tests. You can use this
function to integrate the Flask tests into your own testsuite
in case you want to test that monkeypatches to Flask do not
break it.
"""
setup_path()
suite = unittest.TestSuite()
for other_suite in iter_suites():
suite.addTest(other_suite)
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception as e:
print('Error: %s' % e)
| mit | -1,463,234,021,303,636,000 | 27.544715 | 79 | 0.600826 | false |
Nirvedh/CoarseCoherence | src/arch/x86/isa/insts/simd128/integer/data_reordering/unpack_and_interleave.py | 91 | 5843 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PUNPCKLBW_XMM_XMM {
unpack xmmh, xmml, xmmlm, ext=1, size=1
unpack xmml, xmml, xmmlm, ext=0, size=1
};
def macroop PUNPCKLBW_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=1
unpack xmml, xmml, ufp1, ext=0, size=1
};
def macroop PUNPCKLBW_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=1
unpack xmml, xmml, ufp1, ext=0, size=1
};
def macroop PUNPCKLWD_XMM_XMM {
unpack xmmh, xmml, xmmlm, ext=1, size=2
unpack xmml, xmml, xmmlm, ext=0, size=2
};
def macroop PUNPCKLWD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=2
unpack xmml, xmml, ufp1, ext=0, size=2
};
def macroop PUNPCKLWD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=2
unpack xmml, xmml, ufp1, ext=0, size=2
};
def macroop PUNPCKLDQ_XMM_XMM {
unpack xmmh, xmml, xmmlm, ext=1, size=4
unpack xmml, xmml, xmmlm, ext=0, size=4
};
def macroop PUNPCKLDQ_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=4
unpack xmml, xmml, ufp1, ext=0, size=4
};
def macroop PUNPCKLDQ_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=4
unpack xmml, xmml, ufp1, ext=0, size=4
};
def macroop PUNPCKHBW_XMM_XMM {
unpack xmml, xmmh, xmmhm, ext=0, size=1
unpack xmmh, xmmh, xmmhm, ext=1, size=1
};
def macroop PUNPCKHBW_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=1
unpack xmmh, xmmh, ufp1, ext=1, size=1
};
def macroop PUNPCKHBW_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=1
unpack xmmh, xmmh, ufp1, ext=1, size=1
};
def macroop PUNPCKHWD_XMM_XMM {
unpack xmml, xmmh, xmmhm, ext=0, size=2
unpack xmmh, xmmh, xmmhm, ext=1, size=2
};
def macroop PUNPCKHWD_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=2
unpack xmmh, xmmh, ufp1, ext=1, size=2
};
def macroop PUNPCKHWD_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=2
unpack xmmh, xmmh, ufp1, ext=1, size=2
};
def macroop PUNPCKHDQ_XMM_XMM {
unpack xmml, xmmh, xmmhm, ext=0, size=4
unpack xmmh, xmmh, xmmhm, ext=1, size=4
};
def macroop PUNPCKHDQ_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=4
unpack xmmh, xmmh, ufp1, ext=1, size=4
};
def macroop PUNPCKHDQ_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=4
unpack xmmh, xmmh, ufp1, ext=1, size=4
};
def macroop PUNPCKHQDQ_XMM_XMM {
movfp xmml, xmmh
movfp xmmh, xmmhm
};
def macroop PUNPCKHQDQ_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
movfp xmml, xmmh
movfp xmmh, ufp1
};
def macroop PUNPCKHQDQ_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, riprel, 8, dataSize=8
movfp xmml, xmmh
movfp xmmh, ufp1
};
def macroop PUNPCKLQDQ_XMM_XMM {
movfp xmmh, xmmlm
};
def macroop PUNPCKLQDQ_XMM_M {
ldfp xmmh, seg, sib, disp, dataSize=8
};
def macroop PUNPCKLQDQ_XMM_P {
rdip t7
ldfp xmmh, seg, riprel, disp, dataSize=8
};
'''
| bsd-3-clause | 3,458,599,504,992,559,000 | 30.583784 | 72 | 0.694506 | false |
switchyrascal/beeps | test/beeps/controllers/test_plancontroller.py | 1 | 27725 | # Copyright © 2012-2015 Jonty Pearson.
#
# This file is part of Beeps.
#
# Beeps is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Beeps is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Beeps. If not, see <http://www.gnu.org/licenses/>.
import testhelper
import beeps.models.plan
import beeps.views.planview
import beeps.controllers.plancontroller
import gi.repository.Gdk
import gi.repository.Gtk
import logging
import os.path
import tempfile
def teardown_module(module):
assert(testhelper.gtk_check_main() == True)
class TestController:
def test_initialization_with_model(self):
model = beeps.models.plan.Plan()
controller = beeps.controllers.plancontroller.Controller(model)
assert(controller.model == model)
assert(controller.view == None)
assert(len(controller._filters) == 0)
def test_initialization_with_filters(self):
model = beeps.models.plan.Plan()
view = beeps.views.planview.Component()
filter = beeps.helpers.event.WhiteListFilter(beeps.helpers.event.IntervalCreated)
controller = beeps.controllers.plancontroller.Controller(model, filter)
assert(len(controller._filters) == 1)
assert(controller._filters[0] == filter)
class TestDialogController:
def test_should_initialize_without_model(self):
view = beeps.views.planview.Component()
controller = beeps.controllers.plancontroller.DialogController(model = None)
assert(controller.model == None)
def test_should_initialize_with_model(self):
model = beeps.models.plan.Plan()
controller = beeps.controllers.plancontroller.DialogController(model = model)
assert(controller.model == model)
def test_should_initialize_without_parent(self):
view = beeps.views.planview.Component()
controller = beeps.controllers.plancontroller.DialogController(parent = None)
assert(controller._parent == None)
def test_should_initialize_with_parent(self):
view = beeps.views.planview.Component()
controller = beeps.controllers.plancontroller.DialogController(parent = self)
assert(controller._parent == self)
def test_should_initialize_without_view(self):
view = beeps.views.planview.Component()
controller = beeps.controllers.plancontroller.DialogController()
assert(controller.view == None)
class TestAboutInfoController:
def setup_method(self, method):
self._controller = beeps.controllers.plancontroller.AboutInfoController()
def test_should_run_the_dialog(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.DELETE_EVENT, self._do_dialog_delete_event)
def _do_dialog_delete_event(self):
self._controller.view.close()
class TestIntervalSoundChooserController:
def setup_method(self, method):
self._plan = beeps.models.plan.Plan()
self._plan.create_routine('test')
self._plan.create_interval(self._plan.routines[0], 0)
self._controller = beeps.controllers.plancontroller.IntervalSoundChooserController(self._plan, self._plan.routines[0].intervals[0])
def test_should_run_the_dialog(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.DELETE_EVENT, self._do_dialog_delete_event)
def test_should_handle_accept_response(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_dialog_accept)
assert(self._plan.routines[0].intervals[0].sound == os.path.abspath(testhelper.SOUND_FILE))
def _do_dialog_delete_event(self):
self._controller.view.close()
def _do_dialog_accept(self):
self._controller.view.set_filename(testhelper.SOUND_FILE)
testhelper.gtk_update_gui()
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
class TestPlanController:
def setup_class(self):
self._app = beeps.application.BeepsApp(application_id = beeps.application.BeepsApp.APPLICATION_ID + '.TestPlanController')
self._app.register()
self._app.config = beeps.helpers.config.ConfigIgnoreLoadIgnoreSave()
def setup_method(self, method):
# create a new view, connect it up, pre-existing routines are not
# loaded; can clear out the plan here, but useful for now not to for
# robustification:
# for r in self._controller.model.routines:
# self._plan.delete_routine(r)
self._plan = beeps.models.plan.Plan()
self._controller = beeps.controllers.plancontroller.PlanController(self._plan, self._app)
self._controller.run()
self._idle_delay = testhelper.CallbackDelay()
testhelper.gtk_update_gui()
def teardown_method(self, method):
testhelper.gtk_update_gui()
def test_should_handle_on_control_repeat_toggled(self):
self._create_routine(2)
assert(self._plan.runner.has_repeat(True))
self._setup_callback(self._do_control_repeat_toggled)
gi.repository.Gtk.main()
assert(self._plan.runner.has_repeat(True))
def test_should_handle_on_control_restart_toggled(self):
r = self._create_routine(2)
r.seek(0.5)
self._setup_callback(self._do_control_restart_toggled)
gi.repository.Gtk.main()
def test_should_handle_on_control_seek_change_value(self):
self._create_routine(2)
testhelper.assert_close(self._plan.routines[-1].completed(), 0.0)
seekscale = testhelper.find_child(self._controller.view, 'seekscale')
self._setup_callback(seekscale.emit, 'change-value', gi.repository.Gtk.ScrollType.JUMP, 50.0)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
testhelper.assert_close(self._plan.routines[-1].completed(), 1.0)
def test_should_handle_on_control_startstop_toggled(self):
self._create_routine(2)
assert(self._plan.runner.has_state(beeps.models.plan.Runner.State.READY))
self._setup_callback(self._do_control_startstop_toggled)
gi.repository.Gtk.main()
assert(self._plan.runner.has_state(beeps.models.plan.Runner.State.STOPPED))
def test_should_handle_on_edit_delete_activate(self):
self._create_routine(2)
self._setup_callback(self._controller.view.lookup_action('edit::delete').activate)
self._setup_callback(self._do_close_dialog_and_window)
gi.repository.Gtk.main()
def test_should_handle_on_edit_edit_activate(self):
self._create_routine(2)
self._setup_callback(self._controller.view.lookup_action('edit::edit').activate)
self._setup_callback(self._do_close_dialog_and_window)
gi.repository.Gtk.main()
def test_should_handle_on_edit_new_activate(self):
self._setup_callback(self._controller.view.lookup_action('edit::new').activate)
self._setup_callback(self._do_close_dialog_and_window)
gi.repository.Gtk.main()
def test_should_handle_on_plan_close_activate(self):
self._setup_callback(self._controller.view.lookup_action('plan::close').activate)
self._setup_callback(gi.repository.Gtk.main_quit)
gi.repository.Gtk.main()
def test_should_handle_on_plan_delete_event(self):
self._setup_callback(self._controller.view.close)
self._setup_callback(gi.repository.Gtk.main_quit)
gi.repository.Gtk.main()
def test_should_handle_on_plan_export_activate(self):
self._create_routine(2)
self._setup_callback(self._controller.view.lookup_action('plan::export').activate)
self._setup_callback(self._do_close_dialog_and_window)
gi.repository.Gtk.main()
def test_should_handle_on_plan_import_activate(self):
self._setup_callback(self._controller.view.lookup_action('plan::import').activate)
self._setup_callback(self._do_close_dialog_and_window)
gi.repository.Gtk.main()
def test_should_handle_on_routine_button_press_event(self):
e = gi.repository.Gdk.EventButton()
e.type = gi.repository.Gdk.EventType.BUTTON_PRESS
e.button = gi.repository.Gdk.BUTTON_SECONDARY
menu = self._controller.view.components[beeps.views.planview.RoutinePopupMenu]
self._setup_callback(menu.get_attach_widget().emit, 'button-press-event', e.copy())
self._setup_callback(menu.popdown)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
def test_should_handle_on_routine_changed(self, *args):
self._create_routine(4)
routinetreeview = testhelper.find_child(self._controller.view, 'routinetreeview')
self._setup_callback(routinetreeview.get_selection().unselect_all)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
def test_should_handle_on_routine_menu_menu(self):
menu = self._controller.view.components[beeps.views.planview.RoutinePopupMenu]
self._setup_callback(menu.get_attach_widget().emit, 'popup-menu')
self._setup_callback(menu.popdown)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
def test_should_handle_on_routine_row_activated(self):
self._create_routine(4)
self._create_routine(6)
self._plan.load_routine(self._plan.routines[-1])
assert(self._plan.runner.routine == self._plan.routines[-1])
routinetreeview = testhelper.find_child(self._controller.view, 'routinetreeview')
p = gi.repository.Gtk.TreePath(0)
s = routinetreeview.get_selection()
self._setup_callback(s.select_path, p)
self._setup_callback(routinetreeview.row_activated, p, routinetreeview.get_column(0))
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
assert(self._plan.runner.routine == self._plan.routines[-2])
def test_should_handle_on_volume_decrease_activate(self):
volumebutton = testhelper.find_child(self._controller.view, 'volumebutton')
testhelper.assert_close(volumebutton.get_value(), 0.1)
self._setup_callback(self._controller.view.lookup_action('volume::decrease').activate)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
testhelper.assert_close(volumebutton.get_value(), 0.0)
def test_should_handle_on_volume_increase_activate(self):
volumebutton = testhelper.find_child(self._controller.view, 'volumebutton')
testhelper.assert_close(volumebutton.get_value(), 0.1)
self._setup_callback(self._controller.view.lookup_action('volume::increase').activate)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
testhelper.assert_close(volumebutton.get_value(), 0.3)
def test_should_handle_on_volume_value_changed(self):
volumebutton = testhelper.find_child(self._controller.view, 'volumebutton')
assert(volumebutton.get_value() == 0.1)
# just make sure we don't have an infinite loop...
self._setup_callback(volumebutton.set_value, 0.45)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
assert(volumebutton.get_value() == 0.45)
def test_should_be_able_to_pause_a_routine_that_is_in_progress(self):
d = 100
self._create_routine(d)
self._setup_callback(self._controller.view.lookup_action('control::startstop').activate)
self._setup_callback(self._controller.view.lookup_action('control::startstop').activate)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
assert(self._plan.runner.has_state(beeps.models.plan.Runner.State.PAUSED) == True)
def test_should_be_able_to_quit_a_routine_that_is_in_progress(self):
l = testhelper.Listener()
self._controller.model.runner.connect('event', l.call)
d = 100
self._create_routine(d)
self._setup_callback(self._controller.view.lookup_action('control::startstop').activate)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
testhelper.gtk_update_gui()
assert(self._plan.runner.has_state(beeps.models.plan.Runner.State.STOPPED) == True)
assert(0 < l.elapsed(beeps.helpers.event.RoutineStarted, beeps.helpers.event.IntervalProgressed) < d)
def test_should_be_able_to_run_a_routine(self):
l = testhelper.Listener()
self._controller.model.runner.connect('event', l.call)
d = 2
self._create_routine(d)
self._setup_callback(self._controller.view.lookup_action('control::repeat').activate)
self._setup_callback(self._controller.view.lookup_action('control::startstop').activate)
self._idle_delay.total += (d * 2 * 1000)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
assert(2.0 < l.elapsed(beeps.helpers.event.RoutineStarted, beeps.helpers.event.IntervalFinished) < 2.5)
def test_should_be_able_to_seek_in_a_routine_that_is_in_progress(self):
l = testhelper.Listener()
self._controller.model.runner.connect('event', l.call)
d = 100
self._create_routine(d)
seekscale = testhelper.find_child(self._controller.view, 'seekscale')
self._setup_callback(self._controller.view.lookup_action('control::startstop').activate)
self._setup_callback(seekscale.emit, 'change-value', gi.repository.Gtk.ScrollType.JUMP, 33.0)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
assert(0 < l.elapsed(beeps.helpers.event.RoutineStarted, beeps.helpers.event.IntervalProgressed) < d)
testhelper.assert_close(self._plan.runner.routine.progress(), 0.33)
def test_should_handle_routine_finished(self):
eventually = testhelper.Eventually(testhelper.gtk_update_gui)
d = 1
self._create_routine(d)
self._setup_callback(self._controller.view.lookup_action('control::repeat').activate)
self._setup_callback(self._controller.view.lookup_action('control::startstop').activate)
self._idle_delay.total += (d * 2 * 1000)
self._setup_callback(self._do_close_window)
gi.repository.Gtk.main()
assert(self._plan.routines[-1].completed() <= 0.0)
def _create_routine(self, time):
self._plan.create_routine('test' + str(len(self._plan.routines)))
r = self._plan.routines[-1]
self._plan.create_interval(r, time)
self._plan.load_routine(r)
return r
def _do_close_window(self):
self._controller.view.close()
# window now closed, quit main loop
gi.repository.Gtk.main_quit()
def _do_close_dialog_and_window(self):
gi.repository.Gtk.grab_get_current().close()
testhelper.gtk_update_gui()
self._controller.view.close()
# dialog and window now closed, quit main loop
gi.repository.Gtk.main_quit()
def _do_control_repeat_toggled(self):
self._controller.view.lookup_action('control::repeat').activate()
assert(self._plan.runner.has_repeat(False))
self._controller.view.lookup_action('control::repeat').activate()
self._do_close_window()
def _do_control_restart_toggled(self):
testhelper.assert_close(self._controller.model.routines[-1].completed(), 0.5)
self._controller.view.lookup_action('control::restart').activate()
testhelper.assert_close(self._controller.model.routines[-1].completed(), 0.0)
self._do_close_window()
def _do_control_startstop_toggled(self):
action = self._controller.view.lookup_action('control::startstop')
action.activate()
testhelper.gtk_update_gui()
assert(self._plan.runner.has_state(beeps.models.plan.Runner.State.RUNNING))
action.activate()
testhelper.gtk_update_gui()
assert(self._plan.runner.has_state(beeps.models.plan.Runner.State.PAUSED))
self._do_close_window()
testhelper.gtk_update_gui()
def _setup_callback(self, callback, *args):
timeout = next(self._idle_delay)
logging.info('Calling ' + str(callback) + ' in ' + str(timeout) + ' ms')
gi.repository.GLib.timeout_add(timeout, testhelper.gtk_call_once(callback), *args)
class TestPlanExportController:
def setup_method(self, method):
self._plan = beeps.models.plan.Plan()
self._plan.create_routine('test')
self._controller = beeps.controllers.plancontroller.PlanExportController(self._plan)
def teardown_method(self, method):
self._controller.view.close()
testhelper.gtk_update_gui()
def test_should_run_the_dialog(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.DELETE_EVENT, self._do_dialog_delete_event)
def test_should_handle_accept_response(self):
with tempfile.TemporaryDirectory() as tmpdir:
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_dialog_accept, tmpdir)
assert(os.path.exists(self._controller.view.get_filename()) == True)
def _do_dialog_delete_event(self):
self._controller.view.close()
def _do_dialog_accept(self, tmpdir):
name = 'export.json'
path = os.path.join(tmpdir, name)
self._controller.view.set_current_folder(tmpdir)
self._controller.view.set_current_name(name)
testhelper.gtk_update_gui()
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
class TestPlanImportController:
def setup_method(self, method):
self._plan = beeps.models.plan.Plan()
self._controller = beeps.controllers.plancontroller.PlanImportController(self._plan)
def teardown_method(self, method):
self._controller.view.close()
testhelper.gtk_update_gui()
def test_should_run_the_dialog(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.DELETE_EVENT, self._do_dialog_delete_event)
def test_should_handle_accept_response(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_dialog_accept)
assert(len(self._plan.routines) == 1)
def _do_dialog_delete_event(self):
self._controller.view.close()
def _do_dialog_accept(self):
filename = testhelper.PLAN_FILE
self._controller.view.set_filename(filename)
testhelper.gtk_update_gui()
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
class RoutineEditorControllerTests:
def teardown_method(self, method):
self._controller.view.quit()
testhelper.gtk_update_gui()
def test_should_add_an_interval_sound(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_add_an_interval_sound)
assert(self._controller.model.routines[0].intervals[0].sound == testhelper.SOUND_FILE)
def test_should_create_a_sound_chooser_dialog(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_create_a_sound_chooser_dialog)
def test_should_create_an_interval(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_create_an_interval)
def test_should_delete_an_interval(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_delete_an_interval)
def test_should_remove_an_interval_sound(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_remove_an_interval_sound)
assert(self._controller.model.routines[0].intervals[0].sound == None)
def test_should_update_a_routine_name(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_update_a_routine_name)
assert(self._controller.model.routines[0].name == 'test2')
def test_should_update_an_interval_time(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_update_an_interval_time)
def _do_create_a_sound_chooser_dialog(self):
self._controller.view.get_action_group('editor').lookup_action('interval::add').activate()
v = testhelper.find_cell_model(self._controller.view, 'soundtreeviewcolumn', 'soundsliststore')
i = '0'
gi.repository.GLib.timeout_add(next(testhelper.CallbackDelay()), testhelper.gtk_call_once(self._do_sound_chooser_dialog_close))
testhelper.find_cell_renderer(self._controller.view, 'soundtreeviewcolumn', gi.repository.Gtk.CellRendererCombo).emit('changed', i, v.get_iter(i))
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
def _do_create_an_interval(self):
intervals = len(self._controller.model.routines[0].intervals)
self._controller.view.get_action_group('editor').lookup_action('interval::add').activate()
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
assert(len(self._controller.model.routines[0].intervals) == intervals + 1)
def _do_delete_an_interval(self):
intervals = len(self._controller.model.routines[0].intervals)
self._controller.view.get_action_group('editor').lookup_action('interval::add').activate()
assert(len(self._controller.model.routines[0].intervals) == intervals + 1)
self._controller.view.get_action_group('editor').lookup_action('interval::remove').activate()
assert(len(self._controller.model.routines[0].intervals) == intervals)
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
def _do_dialog_accept(self):
testhelper.gtk_update_gui()
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
def _do_dialog_delete_event(self):
self._controller.view.close()
def _do_sound_chooser_dialog_close(self):
gi.repository.Gtk.grab_get_current().close()
def _do_update_a_routine_name(self):
testhelper.find_child(self._controller.view, 'nameentry').set_text('test2')
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
def _do_update_an_interval_time(self):
assert(int(self._controller.model.routines[0].intervals[0].time) == 1)
testhelper.find_cell_renderer(self._controller.view, 'durationtreeviewcolumn', gi.repository.Gtk.CellRendererText).emit('edited', '0', 3)
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
assert(int(self._controller.model.routines[0].intervals[0].time) == 3)
class TestRoutineCreateController(RoutineEditorControllerTests):
def setup_class(self):
gi.repository.GtkClutter.init()
def setup_method(self, method):
self._plan = beeps.models.plan.Plan()
self._controller = beeps.controllers.plancontroller.RoutineCreateController(self._plan)
def test_should_run_the_dialog(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.DELETE_EVENT, self._do_dialog_delete_event)
def test_should_handle_accept_response(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_dialog_accept)
assert(len(self._plan.routines) == 1)
assert(self._plan.routines[0].name == 'Untitled')
def _do_add_an_interval_sound(self):
self._controller.view.get_action_group('editor').lookup_action('interval::add').activate()
# add a sound and select it (appears as 3rd row in dropdown)
v = testhelper.find_cell_model(self._controller.view, 'soundtreeviewcolumn', 'soundsliststore')
it = v.append()
v.set(it, v._sound_data(testhelper.SOUND_FILE))
i = '2'
testhelper.find_cell_renderer(self._controller.view, 'soundtreeviewcolumn', gi.repository.Gtk.CellRendererCombo).emit('changed', i, v.get_iter(i))
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
def _do_remove_an_interval_sound(self):
self._controller.view.get_action_group('editor').lookup_action('interval::add').activate()
v = testhelper.find_cell_model(self._controller.view, 'soundtreeviewcolumn', 'soundsliststore')
i = '1'
testhelper.find_cell_renderer(self._controller.view, 'soundtreeviewcolumn', gi.repository.Gtk.CellRendererCombo).emit('changed', i, v.get_iter(i))
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
def _do_update_an_interval_time(self):
self._controller.view.get_action_group('editor').lookup_action('interval::add').activate()
super()._do_update_an_interval_time()
class TestRoutineDeleteController:
def setup_method(self, method):
self._plan = beeps.models.plan.Plan()
self._plan.create_routine('test')
self._controller = beeps.controllers.plancontroller.RoutineDeleteController(self._plan, self._plan.routines[0])
def teardown_method(self, method):
testhelper.gtk_update_gui()
def test_should_run_the_dialog(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.DELETE_EVENT, self._do_dialog_delete_event)
def test_should_handle_accept_response(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_dialog_accept)
assert(len(self._plan.routines) == 0)
def _do_dialog_accept(self):
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
def _do_dialog_delete_event(self):
self._controller.view.close()
class TestRoutineUpdateController(RoutineEditorControllerTests):
def setup_class(self):
gi.repository.GtkClutter.init()
def setup_method(self, method):
self._plan = beeps.models.plan.Plan()
self._plan.create_routine('test')
self._plan.create_interval(self._plan.routines[0], 1, sound = testhelper.SOUND_FILE)
self._controller = beeps.controllers.plancontroller.RoutineUpdateController(self._plan, self._plan.routines[0])
def test_should_run_the_dialog(self):
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.DELETE_EVENT, self._do_dialog_delete_event)
def test_should_handle_accept_response(self):
name = self._plan.routines[0].name + '2'
testhelper.gtk_run_dialog(self._controller, gi.repository.Gtk.ResponseType.ACCEPT, self._do_dialog_accept, name)
assert(self._plan.routines[0].name == name)
def _do_add_an_interval_sound(self):
# add a sound and select it (appears as 3rd row in dropdown)
v = testhelper.find_cell_model(self._controller.view, 'soundtreeviewcolumn', 'soundsliststore')
it = v.append()
v.set(it, v._sound_data(testhelper.SOUND_FILE))
i = '0'
testhelper.find_cell_renderer(self._controller.view, 'soundtreeviewcolumn', gi.repository.Gtk.CellRendererCombo).emit('changed', i, v.get_iter(i))
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
def _do_create_a_sound_chooser_dialog(self):
self._controller.view.get_action_group('editor').lookup_action('interval::add').activate()
v = testhelper.find_cell_model(self._controller.view, 'soundtreeviewcolumn', 'soundsliststore')
i = '1'
gi.repository.GLib.timeout_add(next(testhelper.CallbackDelay()), testhelper.gtk_call_once(self._do_sound_chooser_dialog_close))
testhelper.find_cell_renderer(self._controller.view, 'soundtreeviewcolumn', gi.repository.Gtk.CellRendererCombo).emit('changed', i, v.get_iter(i))
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
def _do_dialog_accept(self, name):
testhelper.find_child(self._controller.view, 'nameentry').set_text('test2')
super()._do_dialog_accept()
def _do_remove_an_interval_sound(self):
v = testhelper.find_cell_model(self._controller.view, 'soundtreeviewcolumn', 'soundsliststore')
i = '2'
testhelper.find_cell_renderer(self._controller.view, 'soundtreeviewcolumn', gi.repository.Gtk.CellRendererCombo).emit('changed', i, v.get_iter(i))
self._controller.view.get_widget_for_response(gi.repository.Gtk.ResponseType.ACCEPT).emit('clicked')
| gpl-3.0 | 2,481,457,002,851,311,600 | 45.989831 | 150 | 0.732326 | false |
renegelinas/mi-instrument | mi/dataset/parser/spkir_abj_dcl.py | 5 | 14646 | #!/usr/bin/env python
"""
@package mi.dataset.parser.spkir_abj_dcl
@file marine-integrations/mi/dataset/parser/spkir_abj_dcl.py
@author Steve Myerson
@brief Parser for the spkir_abj_dcl dataset driver
This file contains code for the spkir_abj_dcl parsers and code to produce data particles.
For telemetered data, there is one parser which produces one type of data particle.
For recovered data, there is one parser which produces one type of data particle.
The input files and the content of the data particles are the same for both
recovered and telemetered.
Only the names of the output particle streams are different.
The input file is ASCII and binary and contains 2 types of records.
All records start with a timestamp.
Records are separated by one of the newlines.
Binary data is big endian.
Metadata records: timestamp [text] more text line-feed.
Sensor Data records: timestamp ASCII-data Binary-data carriage-return line-feed.
Only sensor data records produce particles if properly formed.
Metadata records produce no particles.
Release notes:
Initial Release
"""
import re
import struct
from mi.core.log import get_logger
from mi.dataset.dataset_parser import SimpleParser
from mi.dataset.parser.common_regexes import \
DATE_YYYY_MM_DD_REGEX, \
TIME_HR_MIN_SEC_MSEC_REGEX
from mi.dataset.parser.utilities import \
dcl_time_to_ntp
from mi.core.common import BaseEnum
from mi.core.exceptions import UnexpectedDataException
from mi.core.instrument.dataset_data_particle import \
DataParticle, \
DataParticleKey, \
DataParticleValue
log = get_logger()
__author__ = 'Steve Myerson'
__license__ = 'Apache 2.0'
# Basic patterns
ANY_CHARS = r'.*' # Any characters excluding a newline
BINARY_BYTE = b'([\x00-\xFF])' # Binary 8-bit field (1 bytes)
BINARY_SHORT = b'([\x00-\xFF]{2})' # Binary 16-bit field (2 bytes)
BINARY_LONG = b'([\x00-\xFF]{4})' # Binary 32-bit field (4 bytes)
SPACE = ' '
START_GROUP = '('
END_GROUP = ')'
# Timestamp at the start of each record: YYYY/MM/DD HH:MM:SS.mmm
TIMESTAMP = START_GROUP + DATE_YYYY_MM_DD_REGEX + SPACE + TIME_HR_MIN_SEC_MSEC_REGEX + END_GROUP
START_METADATA = r'\['
END_METADATA = r'\]'
# Metadata record:
# Timestamp [Text]MoreText newline
METADATA_REGEX = START_GROUP # group a single metadata record
METADATA_REGEX += TIMESTAMP + SPACE # date and time
METADATA_REGEX += START_METADATA # Metadata record starts with '['
METADATA_REGEX += ANY_CHARS # followed by text
METADATA_REGEX += END_METADATA # followed by ']'
METADATA_REGEX += ANY_CHARS # followed by more text
METADATA_REGEX += r'\n' # Metadata record ends with a line-feed
METADATA_REGEX += END_GROUP + '+' # match multiple metadata records together
METADATA_MATCHER = re.compile(METADATA_REGEX)
# Sensor data record:
# Timestamp SATDI<id><ascii data><binary data> newline
SENSOR_DATA_REGEX = TIMESTAMP + SPACE # date and time
SENSOR_DATA_REGEX += START_GROUP # Group data fields (for checksum calc)
SENSOR_DATA_REGEX += r'(SATDI\d)' # ASCII SATDI id
SENSOR_DATA_REGEX += r'(\d{4})' # ASCII serial number
SENSOR_DATA_REGEX += r'(\d{7}\.\d{2})' # ASCII timer
SENSOR_DATA_REGEX += BINARY_SHORT # 2-byte signed sample delay
SENSOR_DATA_REGEX += START_GROUP # Group all the channel ADC counts
SENSOR_DATA_REGEX += BINARY_LONG # 4-byte unsigned Channel 1 ADC count
SENSOR_DATA_REGEX += BINARY_LONG # 4-byte unsigned Channel 2 ADC count
SENSOR_DATA_REGEX += BINARY_LONG # 4-byte unsigned Channel 3 ADC count
SENSOR_DATA_REGEX += BINARY_LONG # 4-byte unsigned Channel 4 ADC count
SENSOR_DATA_REGEX += BINARY_LONG # 4-byte unsigned Channel 5 ADC count
SENSOR_DATA_REGEX += BINARY_LONG # 4-byte unsigned Channel 6 ADC count
SENSOR_DATA_REGEX += BINARY_LONG # 4-byte unsigned Channel 7 ADC count
SENSOR_DATA_REGEX += END_GROUP # End of channel ADC counts group
SENSOR_DATA_REGEX += BINARY_SHORT # 2-byte unsigned Supply Voltage
SENSOR_DATA_REGEX += BINARY_SHORT # 2-byte unsigned Analog Voltage
SENSOR_DATA_REGEX += BINARY_SHORT # 2-byte unsigned Internal Temperature
SENSOR_DATA_REGEX += BINARY_BYTE # 1-byte unsigned Frame Count
SENSOR_DATA_REGEX += BINARY_BYTE # 1-byte unsigned Checksum
SENSOR_DATA_REGEX += END_GROUP # End of all the data group
SENSOR_DATA_REGEX += r'\r\n' # Sensor data record ends with CR-LF
SENSOR_DATA_MATCHER = re.compile(SENSOR_DATA_REGEX)
# The following are indices into SENSOR_DATA_MATCHER.groups()
SENSOR_GROUP_TIMESTAMP = 0
SENSOR_GROUP_YEAR = 1
SENSOR_GROUP_MONTH = 2
SENSOR_GROUP_DAY = 3
SENSOR_GROUP_HOUR = 4
SENSOR_GROUP_MINUTE = 5
SENSOR_GROUP_SECOND = 6
SENSOR_GROUP_MILLI_SECOND = 7
SENSOR_GROUP_CHECKSUM_SECTION = 8
SENSOR_GROUP_ID = 9
SENSOR_GROUP_SERIAL = 10
SENSOR_GROUP_TIMER = 11
SENSOR_GROUP_DELAY = 12
SENSOR_GROUP_ADC_COUNTS = 13
SENSOR_GROUP_SUPPLY_VOLTAGE = 21
SENSOR_GROUP_ANALOG_VOLTAGE = 22
SENSOR_GROUP_TEMPERATURE = 23
SENSOR_GROUP_FRAME_COUNT = 24
SENSOR_GROUP_CHECKSUM = 25
# The following are indices into raw_data
PARTICLE_GROUP_TIMESTAMP = 0
PARTICLE_GROUP_YEAR = 1
PARTICLE_GROUP_MONTH = 2
PARTICLE_GROUP_DAY = 3
PARTICLE_GROUP_HOUR = 4
PARTICLE_GROUP_MINUTE = 5
PARTICLE_GROUP_SECOND = 6
PARTICLE_GROUP_ID = 7
PARTICLE_GROUP_SERIAL = 8
PARTICLE_GROUP_TIMER = 9
PARTICLE_GROUP_DELAY = 10
PARTICLE_GROUP_CHANNEL = 11
PARTICLE_GROUP_SUPPLY_VOLTAGE = 12
PARTICLE_GROUP_ANALOG_VOLTAGE = 13
PARTICLE_GROUP_TEMPERATURE = 14
PARTICLE_GROUP_FRAME_COUNT = 15
PARTICLE_GROUP_CHECKSUM = 16
CHECKSUM_FAILED = 0 # particle value if the checksum does not match
CHECKSUM_PASSED = 1 # particle value if the checksum matches
# This table is used in the generation of the instrument data particle.
# Column 1 - particle parameter name
# Column 2 - index into raw_data
# Column 3 - data encoding function (conversion required - int, float, etc)
INSTRUMENT_PARTICLE_MAP = [
('instrument_id', PARTICLE_GROUP_ID, str),
('serial_number', PARTICLE_GROUP_SERIAL, str),
('timer', PARTICLE_GROUP_TIMER, float),
('sample_delay', PARTICLE_GROUP_DELAY, int),
('channel_array', PARTICLE_GROUP_CHANNEL, list),
('vin_sense', PARTICLE_GROUP_SUPPLY_VOLTAGE, int),
('va_sense', PARTICLE_GROUP_ANALOG_VOLTAGE, int),
('internal_temperature', PARTICLE_GROUP_TEMPERATURE, int),
('frame_counter', PARTICLE_GROUP_FRAME_COUNT, int),
('passed_checksum', PARTICLE_GROUP_CHECKSUM, int)
]
class SpkirDataParticleType(BaseEnum):
REC_INSTRUMENT_PARTICLE = 'spkir_abj_dcl_instrument_recovered'
TEL_INSTRUMENT_PARTICLE = 'spkir_abj_dcl_instrument'
class SpkirAbjDclInstrumentDataParticle(DataParticle):
"""
Class for generating the Spkir instrument particle.
"""
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(SpkirAbjDclInstrumentDataParticle, self).__init__(raw_data,
port_timestamp,
internal_timestamp,
preferred_timestamp,
quality_flag,
new_sequence)
def _build_parsed_values(self):
"""
Build parsed values for Recovered and Telemetered Instrument Data Particle.
"""
# Generate a particle by calling encode_value for each entry
# in the Instrument Particle Mapping table,
# where each entry is a tuple containing the particle field name,
# an index into raw_data, and a function to use for data conversion.
return [self._encode_value(name, self.raw_data[group], function)
for name, group, function in INSTRUMENT_PARTICLE_MAP]
class SpkirAbjDclRecoveredInstrumentDataParticle(SpkirAbjDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Recovered data.
"""
_data_particle_type = SpkirDataParticleType.REC_INSTRUMENT_PARTICLE
class SpkirAbjDclTelemeteredInstrumentDataParticle(SpkirAbjDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Telemetered data.
"""
_data_particle_type = SpkirDataParticleType.TEL_INSTRUMENT_PARTICLE
class SpkirAbjDclParser(SimpleParser):
"""
Parser for Spkir_abj_dcl data.
In addition to the standard constructor parameters,
this constructor takes an additional parameter particle_class.
"""
def __init__(self,
config,
stream_handle,
exception_callback,
particle_class):
super(SpkirAbjDclParser, self).__init__(config,
stream_handle,
exception_callback)
# Default the position within the file to the beginning.
self.particle_class = particle_class
def parse_file(self):
"""
Parse out any pending data chunks in the chunker.
If it is valid data, build a particle.
Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state.
"""
data = self._stream_handle.read()
position = 0 # keep track of where we are in the file
matches = SENSOR_DATA_MATCHER.finditer(data)
for sensor_match in matches:
start = sensor_match.start()
# check to see if we skipped over any data
if start != position:
skipped_data = data[position:start]
meta_match = METADATA_MATCHER.match(skipped_data)
if meta_match.group(0) == skipped_data:
pass # ignore all metadata records
else:
error_message = 'Unknown data found in line %s' % skipped_data
log.warn(error_message)
self._exception_callback(UnexpectedDataException(error_message))
position = sensor_match.end() # increment the position
groups = sensor_match.groups()
# See if the checksum is correct.
# Checksum is the modulo 256 sum of all data bytes.
# If calculated checksum is zero, the record checksum is valid.
buffer_checksum = groups[SENSOR_GROUP_CHECKSUM_SECTION]
checksum = reduce(lambda x, y: x + y,
map(ord, buffer_checksum)) % 256
if checksum == 0:
checksum_status = CHECKSUM_PASSED
else:
checksum_status = CHECKSUM_FAILED
# Create a tuple containing all the data to be used when
# creating the particle.
# The order of the particle data matches the PARTICLE_GROUPS.
particle_data = (
groups[SENSOR_GROUP_TIMESTAMP],
groups[SENSOR_GROUP_YEAR],
groups[SENSOR_GROUP_MONTH],
groups[SENSOR_GROUP_DAY],
groups[SENSOR_GROUP_HOUR],
groups[SENSOR_GROUP_MINUTE],
groups[SENSOR_GROUP_SECOND],
groups[SENSOR_GROUP_ID],
groups[SENSOR_GROUP_SERIAL],
groups[SENSOR_GROUP_TIMER],
struct.unpack('>h', groups[SENSOR_GROUP_DELAY])[0],
list(struct.unpack('>7I', groups[SENSOR_GROUP_ADC_COUNTS])),
struct.unpack('>H', groups[SENSOR_GROUP_SUPPLY_VOLTAGE])[0],
struct.unpack('>H', groups[SENSOR_GROUP_ANALOG_VOLTAGE])[0],
struct.unpack('>H', groups[SENSOR_GROUP_TEMPERATURE])[0],
struct.unpack('>B', groups[SENSOR_GROUP_FRAME_COUNT])[0],
checksum_status
)
# DCL Controller timestamp is the port_timestamp
port_timestamp = dcl_time_to_ntp(groups[SENSOR_GROUP_TIMESTAMP])
particle = self._extract_sample(self.particle_class,
None,
particle_data,
port_timestamp=port_timestamp,
preferred_ts=DataParticleKey.PORT_TIMESTAMP)
self._record_buffer.append(particle)
# It's not a sensor data record, see if it's a metadata record.
# If not a valid metadata record, generate warning.
# Valid Metadata records produce no particles and are silently ignored.
# else:
# meta_match = METADATA_MATCHER.match(line)
# if meta_match is None:
# error_message = 'Unknown data found in line %s' % line
# log.warn(error_message)
# self._exception_callback(UnexpectedDataException(error_message))
class SpkirAbjDclRecoveredParser(SpkirAbjDclParser):
"""
This is the entry point for the Recovered Spkir_abj_dcl parser.
"""
def __init__(self,
config,
stream_handle,
exception_callback):
super(SpkirAbjDclRecoveredParser, self).__init__(config,
stream_handle,
exception_callback,
SpkirAbjDclRecoveredInstrumentDataParticle)
class SpkirAbjDclTelemeteredParser(SpkirAbjDclParser):
"""
This is the entry point for the Telemetered Spkir_abj_dcl parser.
"""
def __init__(self,
config,
stream_handle,
exception_callback):
super(SpkirAbjDclTelemeteredParser, self).__init__(config,
stream_handle,
exception_callback,
SpkirAbjDclTelemeteredInstrumentDataParticle)
| bsd-2-clause | 2,326,846,086,161,012,700 | 39.126027 | 104 | 0.611839 | false |
himacro/Vintageous | ex/parser/scanner_command_write_and_quit_all.py | 9 | 1591 | from .state import EOF
from .tokens import TokenEof
from .tokens_base import TOKEN_COMMAND_WRITE_AND_QUIT_ALL
from .tokens_base import TokenOfCommand
from Vintageous import ex
from Vintageous.ex.ex_error import ERR_INVALID_ARGUMENT
from Vintageous.ex.ex_error import VimError
plus_plus_translations = {
'ff': 'fileformat',
'bin': 'binary',
'enc': 'fileencoding',
'nobin': 'nobinary',
}
@ex.command('wqall', 'wqa')
@ex.command('xall', 'xa')
class TokenCommandWriteAndQuitAll(TokenOfCommand):
def __init__(self, params, *args, **kwargs):
super().__init__(params,
TOKEN_COMMAND_WRITE_AND_QUIT_ALL,
'wqall', *args, **kwargs)
self.addressable = True
self.target_command = 'ex_write_and_quit_all'
@property
def options(self):
return self.params['++']
def scan_command_write_and_quit_all(state):
params = {
'++': '',
}
state.skip(' ')
state.ignore()
c = state.consume()
if c == '+':
state.expect('+')
state.ignore()
# TODO: expect_match should work with emit()
# http://vimdoc.sourceforge.net/htmldoc/editing.html#[++opt]
m = state.expect_match(
r'(?:f(?:ile)?f(?:ormat)?|(?:file)?enc(?:oding)?|(?:no)?bin(?:ary)?|bad|edit)(?=\s|$)',
lambda: VimError(ERR_INVALID_ARGUMENT))
name = m.group(0)
params['++'] = plus_plus_translations.get(name, name)
state.ignore()
state.expect(EOF)
return None, [TokenCommandWriteAndQuitAll(params), TokenEof()]
| mit | 5,719,361,145,746,287,000 | 26.912281 | 103 | 0.590195 | false |
DianaDespa/marian-train | contrib/vim/.ycm_extra_conf.py | 7 | 5255 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'-DUSE_CLANG_COMPLETER',
'-std=c++14',
'-x',
'cuda',
'--cuda-path', '/usr/local/cuda',
'-I', 'src',
'-I', 'src/3rd_party',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cu' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags }
| mit | 3,280,157,860,109,356,000 | 33.346405 | 80 | 0.707326 | false |
Nick-Hall/gramps | gramps/gen/lib/src.py | 8 | 15993 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2010 Michiel D. Nauta
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Doug Blank <[email protected]>
# Copyright (C) 2017 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Source object for Gramps.
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .primaryobj import PrimaryObject
from .mediabase import MediaBase
from .notebase import NoteBase
from .tagbase import TagBase
from .attrbase import SrcAttributeBase
from .reporef import RepoRef
from .const import DIFFERENT, EQUAL, IDENTICAL
from .citationbase import IndirectCitationBase
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Source class
#
#-------------------------------------------------------------------------
class Source(MediaBase, NoteBase, SrcAttributeBase, IndirectCitationBase,
PrimaryObject):
"""A record of a source of information."""
def __init__(self):
"""Create a new Source instance."""
PrimaryObject.__init__(self)
MediaBase.__init__(self)
NoteBase.__init__(self)
SrcAttributeBase.__init__(self)
self.title = ""
self.author = ""
self.pubinfo = ""
self.abbrev = ""
self.reporef_list = []
def serialize(self):
"""
Convert the object to a serialized tuple of data.
"""
return (self.handle, # 0
self.gramps_id, # 1
str(self.title), # 2
str(self.author), # 3
str(self.pubinfo), # 4
NoteBase.serialize(self), # 5
MediaBase.serialize(self), # 6
str(self.abbrev), # 7
self.change, # 8
SrcAttributeBase.serialize(self), # 9
[rr.serialize() for rr in self.reporef_list], # 10
TagBase.serialize(self), # 11
self.private) # 12
@classmethod
def get_schema(cls):
"""
Returns the JSON Schema for this class.
:returns: Returns a dict containing the schema.
:rtype: dict
"""
from .srcattribute import SrcAttribute
from .reporef import RepoRef
from .mediaref import MediaRef
return {
"type": "object",
"title": _("Source"),
"properties": {
"_class": {"enum": [cls.__name__]},
"handle": {"type": "string",
"maxLength": 50,
"title": _("Handle")},
"gramps_id": {"type": "string",
"title": _("Gramps ID")},
"title": {"type": "string",
"title": _("Title")},
"author": {"type": "string",
"title": _("Author")},
"pubinfo": {"type": "string",
"title": _("Publication info")},
"note_list": {"type": "array",
"items": {"type": "string",
"maxLength": 50},
"title": _("Notes")},
"media_list": {"type": "array",
"items": MediaRef.get_schema(),
"title": _("Media")},
"abbrev": {"type": "string",
"title": _("Abbreviation")},
"change": {"type": "integer",
"title": _("Last changed")},
"attribute_list": {"type": "array",
"items": SrcAttribute.get_schema(),
"title": _("Source Attributes")},
"reporef_list": {"type": "array",
"items": RepoRef.get_schema(),
"title": _("Repositories")},
"tag_list": {"type": "array",
"items": {"type": "string",
"maxLength": 50},
"title": _("Tags")},
"private": {"type": "boolean",
"title": _("Private")}
}
}
def unserialize(self, data):
"""
Convert the data held in a tuple created by the serialize method
back into the data in a Source structure.
"""
(self.handle, # 0
self.gramps_id, # 1
self.title, # 2
self.author, # 3
self.pubinfo, # 4
note_list, # 5
media_list, # 6
self.abbrev, # 7
self.change, # 8
srcattr_list, # 9
reporef_list, # 10
tag_list, # 11
self.private # 12
) = data
NoteBase.unserialize(self, note_list)
MediaBase.unserialize(self, media_list)
TagBase.unserialize(self, tag_list)
SrcAttributeBase.unserialize(self, srcattr_list)
self.reporef_list = [RepoRef().unserialize(item) for item in reporef_list]
return self
def _has_handle_reference(self, classname, handle):
"""
Return True if the object has reference to a given handle of given
primary object type.
:param classname: The name of the primary object class.
:type classname: str
:param handle: The handle to be checked.
:type handle: str
:returns: Returns whether the object has reference to this handle of
this object type.
:rtype: bool
"""
if classname == 'Repository':
return handle in [ref.ref for ref in self.reporef_list]
return False
def _remove_handle_references(self, classname, handle_list):
"""
Remove all references in this object to object handles in the list.
:param classname: The name of the primary object class.
:type classname: str
:param handle_list: The list of handles to be removed.
:type handle_list: str
"""
if classname == 'Repository':
new_list = [ref for ref in self.reporef_list
if ref.ref not in handle_list]
self.reporef_list = new_list
def _replace_handle_reference(self, classname, old_handle, new_handle):
"""
Replace all references to old handle with those to the new handle.
:param classname: The name of the primary object class.
:type classname: str
:param old_handle: The handle to be replaced.
:type old_handle: str
:param new_handle: The handle to replace the old one with.
:type new_handle: str
"""
if classname == 'Repository':
handle_list = [ref.ref for ref in self.reporef_list]
while old_handle in handle_list:
idx = handle_list.index(old_handle)
self.reporef_list[idx].ref = new_handle
handle_list[idx] = ''
def get_text_data_list(self):
"""
Return the list of all textual attributes of the object.
:returns: Returns the list of all textual attributes of the object.
:rtype: list
"""
return [self.title, self.author, self.pubinfo, self.abbrev,
self.gramps_id]
def get_text_data_child_list(self):
"""
Return the list of child objects that may carry textual data.
:returns: Returns the list of child objects that may carry textual data.
:rtype: list
"""
return self.media_list + self.reporef_list + self.attribute_list
def get_citation_child_list(self):
"""
Return the list of child secondary objects that may refer citations.
:returns: Returns the list of child secondary child objects that may
refer citations.
:rtype: list
"""
return self.media_list
def get_note_child_list(self):
"""
Return the list of child secondary objects that may refer notes.
:returns: Returns the list of child secondary child objects that may
refer notes.
:rtype: list
"""
return self.media_list + self.reporef_list
def get_handle_referents(self):
"""
Return the list of child objects which may, directly or through
their children, reference primary objects.
:returns: Returns the list of objects referencing primary objects.
:rtype: list
"""
return self.get_citation_child_list() + self.reporef_list
def get_referenced_handles(self):
"""
Return the list of (classname, handle) tuples for all directly
referenced primary objects.
:returns: List of (classname, handle) tuples for referenced objects.
:rtype: list
"""
return (self.get_referenced_note_handles() +
self.get_referenced_tag_handles())
def merge(self, acquisition):
"""
Merge the content of acquisition into this source.
:param acquisition: The source to merge with the present source.
:type acquisition: Source
"""
self._merge_privacy(acquisition)
self._merge_note_list(acquisition)
self._merge_media_list(acquisition)
self._merge_tag_list(acquisition)
self._merge_attribute_list(acquisition)
self._merge_reporef_list(acquisition)
def set_title(self, title):
"""
Set the descriptive title of the Source object.
:param title: descriptive title to assign to the Source
:type title: str
"""
self.title = title
def get_title(self):
"""
Return the descriptive title of the Place object.
:returns: Returns the descriptive title of the Place
:rtype: str
"""
return self.title
def set_author(self, author):
"""Set the author of the Source."""
self.author = author
def get_author(self):
"""Return the author of the Source."""
return self.author
def set_publication_info(self, text):
"""Set the publication information of the Source."""
self.pubinfo = text
def get_publication_info(self):
"""Return the publication information of the Source."""
return self.pubinfo
def set_abbreviation(self, abbrev):
"""Set the title abbreviation of the Source."""
self.abbrev = abbrev
def get_abbreviation(self):
"""Return the title abbreviation of the Source."""
return self.abbrev
def add_repo_reference(self, repo_ref):
"""
Add a :class:`~.reporef.RepoRef` instance to the Source's reporef list.
:param repo_ref: :class:`~.reporef.RepoRef` instance to be added to the
object's reporef list.
:type repo_ref: :class:`~.reporef.RepoRef`
"""
self.reporef_list.append(repo_ref)
def get_reporef_list(self):
"""
Return the list of :class:`~.reporef.RepoRef` instances associated with
the Source.
:returns: list of :class:`~.reporef.RepoRef` instances associated with
the Source
:rtype: list
"""
return self.reporef_list
def set_reporef_list(self, reporef_list):
"""
Set the list of :class:`~.reporef.RepoRef` instances associated with
the Source. It replaces the previous list.
:param reporef_list: list of :class:`~.reporef.RepoRef` instances to be
assigned to the Source.
:type reporef_list: list
"""
self.reporef_list = reporef_list
def _merge_reporef_list(self, acquisition):
"""
Merge the list of repository references from acquisition with our own.
:param acquisition: the repository references list of this object will
be merged with the current repository references
list.
:type acquisition: RepoRef
"""
reporef_list = self.reporef_list[:]
for addendum in acquisition.get_reporef_list():
for reporef in reporef_list:
equi = reporef.is_equivalent(addendum)
if equi == IDENTICAL:
break
elif equi == EQUAL:
reporef.merge(addendum)
break
else:
self.reporef_list.append(addendum)
def has_repo_reference(self, repo_handle):
"""
Return True if the Source has reference to this Repository handle.
:param repo_handle: The Repository handle to be checked.
:type repo_handle: str
:returns: Returns whether the Source has reference to this Repository
handle.
:rtype: bool
"""
return repo_handle in [repo_ref.ref for repo_ref in self.reporef_list]
def remove_repo_references(self, repo_handle_list):
"""
Remove references to all Repository handles in the list.
:param repo_handle_list: The list of Repository handles to be removed.
:type repo_handle_list: list
"""
new_reporef_list = [repo_ref for repo_ref in self.reporef_list
if repo_ref.ref not in repo_handle_list]
self.reporef_list = new_reporef_list
def replace_repo_references(self, old_handle, new_handle):
"""
Replace all references to old Repository handle with the new handle
and merge equivalent entries.
:param old_handle: The Repository handle to be replaced.
:type old_handle: str
:param new_handle: The Repository handle to replace the old one with.
:type new_handle: str
"""
refs_list = [repo_ref.ref for repo_ref in self.reporef_list]
new_ref = None
if new_handle in refs_list:
new_ref = self.reporef_list[refs_list.index(new_handle)]
n_replace = refs_list.count(old_handle)
for ix_replace in range(n_replace):
idx = refs_list.index(old_handle)
self.reporef_list[idx].ref = new_handle
refs_list[idx] = new_handle
if new_ref:
repo_ref = self.reporef_list[idx]
equi = new_ref.is_equivalent(repo_ref)
if equi != DIFFERENT:
if equi == EQUAL:
new_ref.merge(repo_ref)
self.reporef_list.pop(idx)
refs_list.pop(idx)
| gpl-2.0 | -3,088,902,356,311,433,700 | 36.106729 | 82 | 0.530482 | false |
unreal666/youtube-dl | youtube_dl/extractor/instagram.py | 3 | 13288 | from __future__ import unicode_literals
import itertools
import hashlib
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_HTTPError,
)
from ..utils import (
ExtractorError,
get_element_by_attribute,
int_or_none,
lowercase_escape,
std_headers,
try_get,
url_or_none,
)
class InstagramIE(InfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
'md5': '0d2da106a9d2631273e192b372806516',
'info_dict': {
'id': 'aye83DjauH',
'ext': 'mp4',
'title': 'Video by naomipq',
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1371748545,
'upload_date': '20130620',
'uploader_id': 'naomipq',
'uploader': 'Naomi Leonor Phan-Quang',
'like_count': int,
'comment_count': int,
'comments': list,
},
}, {
# missing description
'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
'info_dict': {
'id': 'BA-pQFBG8HZ',
'ext': 'mp4',
'title': 'Video by britneyspears',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1453760977,
'upload_date': '20160125',
'uploader_id': 'britneyspears',
'uploader': 'Britney Spears',
'like_count': int,
'comment_count': int,
'comments': list,
},
'params': {
'skip_download': True,
},
}, {
# multi video post
'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
'playlist': [{
'info_dict': {
'id': 'BQ0dSaohpPW',
'ext': 'mp4',
'title': 'Video 1',
},
}, {
'info_dict': {
'id': 'BQ0dTpOhuHT',
'ext': 'mp4',
'title': 'Video 2',
},
}, {
'info_dict': {
'id': 'BQ0dT7RBFeF',
'ext': 'mp4',
'title': 'Video 3',
},
}],
'info_dict': {
'id': 'BQ0eAlwhDrw',
'title': 'Post by instagram',
'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
},
}, {
'url': 'https://instagram.com/p/-Cmh1cukG2/',
'only_matching': True,
}, {
'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
'only_matching': True,
}]
@staticmethod
def _extract_embed_url(webpage):
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
webpage)
if mobj:
return mobj.group('url')
blockquote_el = get_element_by_attribute(
'class', 'instagram-media', webpage)
if blockquote_el is None:
return
mobj = re.search(
r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
if mobj:
return mobj.group('link')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = mobj.group('url')
webpage = self._download_webpage(url, video_id)
(video_url, description, thumbnail, timestamp, uploader,
uploader_id, like_count, comment_count, comments, height,
width) = [None] * 11
shared_data = self._parse_json(
self._search_regex(
r'window\._sharedData\s*=\s*({.+?});',
webpage, 'shared data', default='{}'),
video_id, fatal=False)
if shared_data:
media = try_get(
shared_data,
(lambda x: x['entry_data']['PostPage'][0]['graphql']['shortcode_media'],
lambda x: x['entry_data']['PostPage'][0]['media']),
dict)
if media:
video_url = media.get('video_url')
height = int_or_none(media.get('dimensions', {}).get('height'))
width = int_or_none(media.get('dimensions', {}).get('width'))
description = try_get(
media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
compat_str) or media.get('caption')
thumbnail = media.get('display_src')
timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
uploader = media.get('owner', {}).get('full_name')
uploader_id = media.get('owner', {}).get('username')
def get_count(key, kind):
return int_or_none(try_get(
media, (lambda x: x['edge_media_%s' % key]['count'],
lambda x: x['%ss' % kind]['count'])))
like_count = get_count('preview_like', 'like')
comment_count = get_count('to_comment', 'comment')
comments = [{
'author': comment.get('user', {}).get('username'),
'author_id': comment.get('user', {}).get('id'),
'id': comment.get('id'),
'text': comment.get('text'),
'timestamp': int_or_none(comment.get('created_at')),
} for comment in media.get(
'comments', {}).get('nodes', []) if comment.get('text')]
if not video_url:
edges = try_get(
media, lambda x: x['edge_sidecar_to_children']['edges'],
list) or []
if edges:
entries = []
for edge_num, edge in enumerate(edges, start=1):
node = try_get(edge, lambda x: x['node'], dict)
if not node:
continue
node_video_url = url_or_none(node.get('video_url'))
if not node_video_url:
continue
entries.append({
'id': node.get('shortcode') or node['id'],
'title': 'Video %d' % edge_num,
'url': node_video_url,
'thumbnail': node.get('display_url'),
'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
'view_count': int_or_none(node.get('video_view_count')),
})
return self.playlist_result(
entries, video_id,
'Post by %s' % uploader_id if uploader_id else None,
description)
if not video_url:
video_url = self._og_search_video_url(webpage, secure=False)
formats = [{
'url': video_url,
'width': width,
'height': height,
}]
if not uploader_id:
uploader_id = self._search_regex(
r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
webpage, 'uploader id', fatal=False)
if not description:
description = self._search_regex(
r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
if description is not None:
description = lowercase_escape(description)
if not thumbnail:
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'formats': formats,
'ext': 'mp4',
'title': 'Video by %s' % uploader_id,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader_id': uploader_id,
'uploader': uploader,
'like_count': like_count,
'comment_count': comment_count,
'comments': comments,
}
class InstagramUserIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
IE_DESC = 'Instagram user profile'
IE_NAME = 'instagram:user'
_TEST = {
'url': 'https://instagram.com/porsche',
'info_dict': {
'id': 'porsche',
'title': 'porsche',
},
'playlist_count': 5,
'params': {
'extract_flat': True,
'skip_download': True,
'playlistend': 5,
}
}
_gis_tmpl = None
def _entries(self, data):
def get_count(suffix):
return int_or_none(try_get(
node, lambda x: x['edge_media_' + suffix]['count']))
uploader_id = data['entry_data']['ProfilePage'][0]['graphql']['user']['id']
csrf_token = data['config']['csrf_token']
rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
self._set_cookie('instagram.com', 'ig_pr', '1')
cursor = ''
for page_num in itertools.count(1):
variables = json.dumps({
'id': uploader_id,
'first': 12,
'after': cursor,
})
if self._gis_tmpl:
gis_tmpls = [self._gis_tmpl]
else:
gis_tmpls = [
'%s' % rhx_gis,
'',
'%s:%s' % (rhx_gis, csrf_token),
'%s:%s:%s' % (rhx_gis, csrf_token, std_headers['User-Agent']),
]
for gis_tmpl in gis_tmpls:
try:
media = self._download_json(
'https://www.instagram.com/graphql/query/', uploader_id,
'Downloading JSON page %d' % page_num, headers={
'X-Requested-With': 'XMLHttpRequest',
'X-Instagram-GIS': hashlib.md5(
('%s:%s' % (gis_tmpl, variables)).encode('utf-8')).hexdigest(),
}, query={
'query_hash': '42323d64886122307be10013ad2dcc44',
'variables': variables,
})['data']['user']['edge_owner_to_timeline_media']
self._gis_tmpl = gis_tmpl
break
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
if gis_tmpl != gis_tmpls[-1]:
continue
raise
edges = media.get('edges')
if not edges or not isinstance(edges, list):
break
for edge in edges:
node = edge.get('node')
if not node or not isinstance(node, dict):
continue
if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
continue
video_id = node.get('shortcode')
if not video_id:
continue
info = self.url_result(
'https://instagram.com/p/%s/' % video_id,
ie=InstagramIE.ie_key(), video_id=video_id)
description = try_get(
node, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
compat_str)
thumbnail = node.get('thumbnail_src') or node.get('display_src')
timestamp = int_or_none(node.get('taken_at_timestamp'))
comment_count = get_count('to_comment')
like_count = get_count('preview_like')
view_count = int_or_none(node.get('video_view_count'))
info.update({
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'comment_count': comment_count,
'like_count': like_count,
'view_count': view_count,
})
yield info
page_info = media.get('page_info')
if not page_info or not isinstance(page_info, dict):
break
has_next_page = page_info.get('has_next_page')
if not has_next_page:
break
cursor = page_info.get('end_cursor')
if not cursor or not isinstance(cursor, compat_str):
break
def _real_extract(self, url):
username = self._match_id(url)
webpage = self._download_webpage(url, username)
data = self._parse_json(
self._search_regex(
r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'),
username)
return self.playlist_result(
self._entries(data), username, username)
| unlicense | -6,490,519,569,618,890,000 | 35.808864 | 106 | 0.450557 | false |
apple/swift-lldb | packages/Python/lldbsuite/test/benchmarks/expression/TestExpressionCmd.py | 13 | 2526 | """Test lldb's expression evaluations and collect statistics."""
from __future__ import print_function
import sys
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import configuration
from lldbsuite.test import lldbutil
class ExpressionEvaluationCase(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
self.source = 'main.cpp'
self.line_to_break = line_number(
self.source, '// Set breakpoint here.')
self.count = 25
@benchmarks_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_expr_cmd(self):
"""Test lldb's expression commands and collect statistics."""
self.build()
self.exe_name = 'a.out'
print()
self.run_lldb_repeated_exprs(self.exe_name, self.count)
print("lldb expr cmd benchmark:", self.stopwatch)
def run_lldb_repeated_exprs(self, exe_name, count):
import pexpect
exe = self.getBuildArtifact(exe_name)
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
'%s %s %s' %
(lldbtest_config.lldbExec, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline(
'breakpoint set -f %s -l %d' %
(self.source, self.line_to_break))
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
expr_cmd1 = 'expr ptr[j]->point.x'
expr_cmd2 = 'expr ptr[j]->point.y'
with self.stopwatch:
child.sendline(expr_cmd1)
child.expect_exact(prompt)
child.sendline(expr_cmd2)
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.child = None
| apache-2.0 | 7,554,860,002,284,278,000 | 29.804878 | 77 | 0.578781 | false |
sdg-mit/gitless | setup.py | 1 | 2370 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import re
import sys
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('gitless/cli/gl.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
# Build helper
if sys.argv[-1] == 'gl-build':
from subprocess import run
import shutil
import tarfile
import platform
rel = 'gl-v{0}-{1}-{2}'.format(
version, platform.system().lower(), platform.machine())
print('running pyinstaller...')
run(
['pyinstaller', 'gl.spec', '--clean', '--distpath', rel],
stdout=sys.stdout, stderr=sys.stderr)
print('success!! gl binary should be at {0}/gl'.format(rel))
print('creating tar.gz file')
shutil.copy('README.md', rel)
shutil.copy('LICENSE.md', rel)
with tarfile.open(rel + '.tar.gz', 'w:gz') as tar:
tar.add(rel)
print('success!! binary release at {0}'.format(rel + '.tar.gz'))
sys.exit()
ld = """
Gitless is a version control system built on top of Git, that is easy to learn
and use. It features a simple commit workflow, independent branches, and
a friendly command-line interface. Because Gitless is implemented on top of
Git, you can always fall back on Git. And your coworkers you share a repo with
need never know that you're not a Git aficionado.
More info, downloads and documentation @ `Gitless's
website <http://gitless.com>`__.
"""
setup(
name='gitless',
version=version,
description='A simple version control system built on top of Git',
long_description=ld,
author='Santiago Perez De Rosso',
author_email='[email protected]',
url='https://gitless.com',
packages=['gitless', 'gitless.cli'],
install_requires=[
# make sure install_requires is consistent with requirements.txt
'pygit2==1.2.0', # requires libgit2 1.0.x
'argcomplete>=1.11.1'
],
license='MIT',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Software Development :: Version Control'],
entry_points={
'console_scripts': [
'gl = gitless.cli.gl:main'
]},
test_suite='gitless.tests')
| mit | 8,401,610,538,682,015,000 | 27.214286 | 78 | 0.64557 | false |
lzpfmh/electron | script/upload-windows-pdb.py | 156 | 1174 | #!/usr/bin/env python
import os
import glob
import sys
from lib.config import s3_config
from lib.util import atom_gyp, execute, rm_rf, safe_mkdir, s3put
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
SYMBOLS_DIR = 'dist\\symbols'
DOWNLOAD_DIR = 'vendor\\brightray\\vendor\\download\\libchromiumcontent'
PROJECT_NAME = atom_gyp()['project_name%']
PRODUCT_NAME = atom_gyp()['product_name%']
PDB_LIST = [
'out\\R\\{0}.exe.pdb'.format(PROJECT_NAME),
'out\\R\\node.dll.pdb',
]
def main():
os.chdir(SOURCE_ROOT)
rm_rf(SYMBOLS_DIR)
safe_mkdir(SYMBOLS_DIR)
for pdb in PDB_LIST:
run_symstore(pdb, SYMBOLS_DIR, PRODUCT_NAME)
bucket, access_key, secret_key = s3_config()
files = glob.glob(SYMBOLS_DIR + '/*.pdb/*/*.pdb')
files = [f.lower() for f in files]
upload_symbols(bucket, access_key, secret_key, files)
def run_symstore(pdb, dest, product):
execute(['symstore', 'add', '/r', '/f', pdb, '/s', dest, '/t', product])
def upload_symbols(bucket, access_key, secret_key, files):
s3put(bucket, access_key, secret_key, SYMBOLS_DIR, 'atom-shell/symbols',
files)
if __name__ == '__main__':
sys.exit(main())
| mit | 2,435,537,939,567,535,600 | 23.458333 | 74 | 0.663543 | false |
cruzegoodin/TSC-ShippingDetails | flask/lib/python2.7/site-packages/whoosh/lang/__init__.py | 71 | 4308 | # coding=utf-8
# Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
# Exceptions
class NoStemmer(Exception):
pass
class NoStopWords(Exception):
pass
# Data and functions for language names
languages = ("ar", "da", "nl", "en", "fi", "fr", "de", "hu", "it", "no", "pt",
"ro", "ru", "es", "sv", "tr")
aliases = {
# By ISO 639-1 three letter codes
"ara": "ar",
"dan": "da", "nld": "nl", "eng": "en", "fin": "fi", "fra": "fr",
"deu": "de", "hun": "hu", "ita": "it", "nor": "no", "por": "pt",
"ron": "ro", "rus": "ru", "spa": "es", "swe": "sv", "tur": "tr",
# By name in English
"arabic": "ar",
"danish": "da",
"dutch": "nl",
"english": "en",
"finnish": "fi",
"french": "fr",
"german": "de",
"hungarian": "hu",
"italian": "it",
"norwegian": "no",
"portuguese": "pt",
"romanian": "ro",
"russian": "ru",
"spanish": "es",
"swedish": "sw",
"turkish": "tr",
# By name in own language
"العربية": "ar",
"dansk": "da",
"nederlands": "nl",
"suomi": "fi",
"français": "fr",
"deutsch": "de",
"magyar": "hu",
"italiano": "it",
"norsk": "no",
"português": "pt",
"русский язык": "ru",
"español": "es",
"svenska": "sv",
"türkçe": "tr",
}
def two_letter_code(name):
if name in languages:
return name
if name in aliases:
return aliases[name]
return None
# Getter functions
def has_stemmer(lang):
try:
return bool(stemmer_for_language(lang))
except NoStemmer:
return False
def has_stopwords(lang):
try:
return bool(stopwords_for_language(lang))
except NoStopWords:
return False
def stemmer_for_language(lang):
if lang == "en_porter":
# Original porter stemming algorithm is several times faster than the
# more correct porter2 algorithm in snowball package
from .porter import stem as porter_stem
return porter_stem
tlc = two_letter_code(lang)
if tlc == "ar":
from .isri import ISRIStemmer
return ISRIStemmer().stem
from .snowball import classes as snowball_classes
if tlc in snowball_classes:
return snowball_classes[tlc]().stem
raise NoStemmer("No stemmer available for %r" % lang)
def stopwords_for_language(lang):
from .stopwords import stoplists
tlc = two_letter_code(lang)
if tlc in stoplists:
return stoplists[tlc]
raise NoStopWords("No stop-word list available for %r" % lang)
| bsd-3-clause | -3,488,715,890,363,436,500 | 29.607143 | 78 | 0.6014 | false |
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/e/excess_escapes.py | 2 | 1040 | # pylint:disable=pointless-string-statement, fixme, misplaced-comparison-constant, comparison-with-itself
"""Stray backslash escapes may be missing a raw-string prefix."""
__revision__ = '$Id$'
# Bad escape sequences, which probably don't do what you expect.
A = "\[\]\\" # [anomalous-backslash-in-string,anomalous-backslash-in-string]
assert '\/' == '\\/' # [anomalous-backslash-in-string]
ESCAPE_BACKSLASH = '\`' # [anomalous-backslash-in-string]
# Valid escape sequences.
NEWLINE = "\n"
OLD_ESCAPES = '\a\b\f\n\t\r\v'
HEX = '\xad\x0a\x0d'
# +1:[anomalous-backslash-in-string,anomalous-backslash-in-string]
FALSE_OCTAL = '\o123\o000' # Not octal in Python
OCTAL = '\123\000'
NOT_OCTAL = '\888\999' # [anomalous-backslash-in-string,anomalous-backslash-in-string]
NUL = '\0'
UNICODE = u'\u1234'
HIGH_UNICODE = u'\U0000abcd'
QUOTES = '\'\"'
LITERAL_NEWLINE = '\
'
ESCAPE_UNICODE = "\\\\n"
# Bad docstring
# +3:[anomalous-backslash-in-string]
"""Even in a docstring
You shouldn't have ambiguous text like: C:\Program Files\alpha
"""
| mit | 5,189,513,768,835,509,000 | 31.5 | 105 | 0.694231 | false |
allo-/feedjack | feedjack/migrations.south/0015_auto__add_field_feed_skip_errors.py | 2 | 9139 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Feed.skip_errors'
db.add_column('feedjack_feed', 'skip_errors',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Feed.skip_errors'
db.delete_column('feedjack_feed', 'skip_errors')
models = {
'feedjack.feed': {
'Meta': {'ordering': "('name', 'feed_url')", 'object_name': 'Feed'},
'etag': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'filters': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'feeds'", 'blank': 'True', 'to': "orm['feedjack.Filter']"}),
'filters_logic': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'skip_errors': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tagline': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
'feedjack.filter': {
'Meta': {'object_name': 'Filter'},
'base': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filters'", 'to': "orm['feedjack.FilterBase']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'feedjack.filterbase': {
'Meta': {'object_name': 'FilterBase'},
'crossref': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'crossref_rebuild': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'crossref_span': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'crossref_timeline': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'handler_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'feedjack.filterresult': {
'Meta': {'object_name': 'FilterResult'},
'filter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feedjack.Filter']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filtering_results'", 'to': "orm['feedjack.Post']"}),
'result': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'feedjack.link': {
'Meta': {'object_name': 'Link'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'feedjack.post': {
'Meta': {'ordering': "('-date_modified',)", 'unique_together': "(('feed', 'guid'),)", 'object_name': 'Post'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'author_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'comments': ('django.db.models.fields.URLField', [], {'max_length': '511', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['feedjack.Feed']"}),
'filtering_result': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '511', 'db_index': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '511'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['feedjack.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '511'})
},
'feedjack.site': {
'Meta': {'ordering': "('name',)", 'object_name': 'Site'},
'cache_duration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '86400'}),
'default_site': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'greets': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['feedjack.Link']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_posts_by': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'posts_per_page': ('django.db.models.fields.PositiveIntegerField', [], {'default': '20'}),
'show_tagcloud': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'tagcloud_levels': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'use_internal_cache': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'welcome': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'feedjack.subscriber': {
'Meta': {'ordering': "('site', 'name', 'feed')", 'unique_together': "(('site', 'feed'),)", 'object_name': 'Subscriber'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feedjack.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feedjack.Site']"})
},
'feedjack.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '127'})
}
}
complete_apps = ['feedjack'] | bsd-3-clause | 292,323,603,956,105,540 | 72.709677 | 181 | 0.545136 | false |
fishroot/nemoa | nemoa/base/nbase.py | 1 | 18737 | # -*- coding: utf-8 -*-
"""Base classes."""
__author__ = 'Patrick Michl'
__email__ = '[email protected]'
__license__ = 'GPLv3'
__docformat__ = 'google'
from pathlib import Path
from nemoa.base import entity, check, ndict, env
from nemoa.types import Any, ClassVar, Dict, OptInt, OptStr, PathLike
class ObjectIP:
"""Base class for objects subjected to intellectual property.
Resources like datasets, networks, systems and models share common
descriptive metadata comprising authorship and copyright, as well as
administrative metadata like branch and version. This base class is
intended to provide a unified interface to access those attributes.
Attributes:
about (str): Short description of the content of the resource.
Hint: Read- & writeable wrapping attribute.
author (str): A person, an organization, or a service that is
responsible for the creation of the content of the resource.
Hint: Read- & writeable wrapping attribute.
branch (str): Name of a duplicate of the original resource.
Hint: Read- & writeable wrapping attribute.
copyright (str): Notice of statutorily prescribed form that informs
users of the underlying resource to published copyright ownership.
Hint: Read- & writeable wrapping attribute.
email (str): Email address to a person, an organization, or a service
that is responsible for the content of the resource.
Hint: Read- & writeable wrapping attribute.
fullname (str): String concatenation of name, branch and version.
Branch and version are only conatenated if they exist.
Hint: Readonly wrapping attribute.
license (str): Namereference to a legal document giving official
permission to do something with the resource.
Hint: Read- & writeable wrapping attribute.
name (str): Name of the resource.
Hint: Read- & writeable wrapping attribute.
path (str): Path to a file containing or referencing the resource.
Hint: Read- & writeable wrapping attribute.
type (str): String concatenation of module name and class name
of the instance.
Hint: Readonly wrapping attribute.
version (int): Versionnumber of the resource.
Hint: Read- & writeable wrapping attribute.
"""
_attr: ClassVar[Dict[str, int]] = {
'author': 0b11, 'email': 0b11, 'license': 0b11, 'copyright': 0b11,
'fullname': 0b01, 'name': 0b11, 'branch': 0b11, 'version': 0b11,
'about': 0b11, 'type': 0b01, 'path': 0b11
}
_copy: ClassVar[Dict[str, str]] = {
'config': '_config'
}
_config: dict
def __init__(self, **kwds: Any) -> None:
"""Initialize object with given configuration."""
self._config = {}
self._set_copy(**kwds)
def __getattr__(self, key: str) -> None:
"""Wrap attribute requests to private getter methods."""
if key in self._attr:
if not self._attr[key] & 0b01:
raise AttributeError(f"attribute '{key}' is not readable")
if not hasattr(self, '_get_' + key):
raise AttributeError(
f"{self.__class__.__name__} instance "
f"has no attribute '_get_{key}'")
return getattr(self, '_get_' + key)()
raise AttributeError(
f"{self.__class__.__name__} instance "
f"has no attribute '{key}'")
def __setattr__(self, key: str, val: Any) -> None:
"""Wrap attribute requests to private setter methods."""
if key in self._attr:
if not self._attr[key] & 0b10:
raise AttributeError(
f"attribute '{key}' is not writeable")
if not hasattr(self, '_set_' + key):
raise AttributeError(
f"{self.__class__.__name__} instance has "
f"no attribute '_set_{key}'")
getattr(self, '_set_' + key)(val)
else:
self.__dict__[key] = val
def get(self, *args: Any, **kwds: Any) -> Any:
"""Get the value of an object property.
Args:
key: Property name of which the value is to be returned. If key is
not given, then a copy of all data is returned
*args: Arguments of arbitrary types.
**kwds: Keyword arguments of arbitrary types
Returns:
Arbitrary typed return value of the respective private getter
method of the class instance.
"""
# default: get() -> get('copy')
if args:
key, args = args[0], args[1:]
else:
key = 'copy'
# get readable attributes
if self._attr.get(key, 0b00) & 0b01:
return getattr(self, '_get_' + key)(*args, **kwds)
# call getter method if it exists
if hasattr(self, '_get_' + key):
return getattr(self, '_get_' + key)(*args, **kwds)
raise KeyError(f"key '{key}' is not valid")
def _get_about(self) -> OptStr:
"""Get a short description of the content of the resource.
Short description of the content of the resource.
Returns:
String containing a description of the resource.
"""
return self._config.get('about', None)
def _get_author(self) -> OptStr:
"""Get the name of the author of the resource.
A person, an organization, or a service that is responsible for
the creation of the content of the resource.
Returns:
String containing the name of the author.
"""
return self._config.get('author', None)
def _get_branch(self) -> OptStr:
"""Get the name of the current branch.
Name of a duplicate of the original resource.
Returns:
String containing the name of the branch.
"""
return self._config.get('branch', None)
def _get_config(self, key: OptStr = None) -> Any:
"""Get configuration or configuration value.
Args:
key: Name of entry in configuration dictionary. If key is None,
then all entries are returned. Default: None
Returns:
Dictionary containing a copy of configuration.
"""
check.has_opt_type("argument 'key'", key, str)
import copy
conf = self._config or {}
if key is None:
return copy.deepcopy(conf)
if key in conf:
return copy.deepcopy(conf[key])
raise KeyError(f"key '{key}' is not valid")
def _get_copy(self, key: OptStr = None) -> Any:
"""Get copy of configuration and named resources.
Args:
key: Name of resource to return. If key is None, then all resources
that are specified in self._copy are returned. Default: None
Returns:
Copy of configuration and named resources.
"""
check.has_opt_type("argument 'key'", key, str)
import copy
# get mapping for internal datastorage
cmap = getattr(self, '_copy', None) \
or {k.strip('_'): k for k in self.__dict__}
# remove class variables from mapping
cmap.pop('attr', None)
cmap.pop('copy', None)
getter = self._get_getter()
if key is None:
dcopy = {}
for k in cmap.keys():
dcopy[k] = getattr(self, '_get_' + k)() \
or copy.deepcopy(self.__dict__[cmap[k]])
return dcopy
if key in cmap.keys():
if key in getter:
return getattr(self, '_get_' + key)()
return copy.deepcopy(self.__dict__[cmap[key]])
raise KeyError(f"key '{str(key)}' is not valid")
def _get_copyright(self) -> OptStr:
"""Get the copyright notice of the resource.
Notice of statutorily prescribed form that informs users of the
underlying resource to published copyright ownership.
Returns:
String containing the copyright notice of the resource.
"""
return self._config.get('copyright', None)
def _get_email(self) -> OptStr:
"""Get an email address of the author.
Email address to a person, an organization, or a service that is
responsible for the content of the resource.
Returns:
String containing an email address of the author.
"""
return self._config.get('email', None)
def _get_fullname(self) -> str:
"""Get full name including 'branch' and 'version'.
String concatenation of 'name', 'branch' and 'version'. Branch
and version are only conatenated if they have already been set.
The fullname has to be unique for a given class and a given
workspace.
Returns:
String containing fullname of the resource.
"""
l = [self._get_name(), self._get_branch(), self._get_version()]
return '.'.join([str(val) for val in l if val])
def _get_getter(self) -> list:
"""Get sorted list of keys, which are accepted by the 'get' method.
The class method 'get' wraps given keys to private getter methods of
the class instance, which are identified by an initial prefix '_get_'
in the method name.
Returns:
Sorted list of keys, which are accepted by the 'get' method.
"""
gdict = entity.get_methods(self, pattern='_get_*')
glist = sorted(ndict.crop(gdict, '_get_'))
return glist
def _get_license(self) -> OptStr:
"""Get the license of the resource.
Namereference to a legal document giving specified users an
official permission to do something with the resource.
Returns:
String containing the license reference of the resource.
"""
return self._config.get('license', None)
def _get_name(self) -> OptStr:
"""Get the name of the resource.
The name has to be unique for a given class and a given
workspace in the sence, that all resources with the same name
have to be branches or other versions of the same resource.
Returns:
String containing the name of the resource.
"""
return self._config.get('name', None)
def _get_path(self) -> OptStr:
"""Get filepath.
Path to a potential file containing or referencing the resource.
Returns:
String containing the (potential) path of the resource.
"""
if 'path' in self._config:
path = str(self._config['path'])
if path:
return path
from nemoa import session
mname = self.__module__.rsplit('.', 1)[-1]
dname = session.path(mname + 's')
if not dname:
return None
fbase = env.clear_filename(self._get_fullname())
if not fbase:
return None
fext = session.get('default', 'filetype', mname)
if not fext:
return None
return str(env.join_path(dname, fbase + '.' + fext))
def _get_setter(self) -> list:
"""Get sorted list of keys, which are accepted by the 'set' method.
The class method 'set' wraps given keys to private getter methods of
the class instance, which are identified by an initial prefix '_set_'
in the method name.
Returns:
Sorted list of keys, which are accepted by the 'set' method.
"""
sdict = entity.get_methods(self, pattern='_set_*')
slist = sorted(ndict.crop(sdict, '_set_'))
return slist
def _get_type(self) -> OptStr:
"""Get instance type, using module name and class name.
String concatenation of module name and class name of the instance.
Returns:
String containing instance type identifier.
"""
mname = self.__module__.rsplit('.', 1)[-1]
cname = self.__class__.__name__
return '.'.join([mname, cname])
def _get_version(self) -> OptInt:
"""Get the version number of the branch of the resource.
Versionnumber of branch of the resource.
Returns:
Integer value used as the version number of the resource.
"""
return self._config.get('version', None)
def set(self, key: str, *args: Any, **kwds: Any) -> bool:
"""Set a private instance variable to a given value.
Args:
key: Name of variable, that is to be changed
*args: Arguments of arbitrary types
**kwds: Keyword arguments of arbitrary types
Returns:
Boolean value, which is returned by the respective private setter
method of the class instance.
"""
# set writeable attributes
if self._attr.get(key, 0b00) & 0b10:
return getattr(self, '_set_' + key)(*args, **kwds)
# supplementary setter methods
if hasattr(self, '_get_' + key):
return getattr(self, '_get_' + key)(*args, **kwds)
raise KeyError(f"key '{key}' is not valid")
def _set_copy(self, **kwds: Any) -> bool:
"""Call setter methods for all keyword arguments.
Args:
**kwds: Items of arbitrary types.
Returns:
Bool which is True if and only if no error occured.
"""
import copy
setter = self._get_setter()
for key, val in kwds.items():
if key not in self._copy.keys():
raise KeyError(f"key '{key}' is not valid")
if key in setter:
self.set(key, val)
else:
self.__dict__[self._copy[key]] = copy.deepcopy(val)
return True
def _set_about(self, val: str) -> bool:
"""Set short description of the content of the resource.
Short description of the content of the resource.
Returns:
Boolean value which is True on success, else False.
"""
if not isinstance(val, str):
raise TypeError(
"attribute 'about' is required to be of type 'str'"
f", not '{type(val)}'")
self._config['about'] = val
return True
def _set_author(self, val: str) -> bool:
"""Set the name of the author of the resource.
A person, an organization, or a service that is responsible for
the creation of the content of the resource.
Returns:
Boolean value which is True on success, else False.
"""
if not isinstance(val, str):
raise TypeError(
"attribute 'author' is required to be of type 'str'"
f", not '{type(val)}'")
self._config['author'] = val
return True
def _set_branch(self, val: str) -> bool:
"""Set the name of the current branch.
Name of a duplicate of the original resource.
Returns:
Boolean value which is True on success, else False.
"""
if not isinstance(val, str):
raise TypeError(
"attribute 'branch' is required to be of type 'str'"
f", not '{type(val)}'")
self._config['branch'] = val
return True
def _set_copyright(self, val: str) -> bool:
"""Set a copyright notice.
Notice of statutorily prescribed form that informs users of the
underlying resource to published copyright ownership.
Returns:
Boolean value which is True on success, else False.
"""
if not isinstance(val, str):
raise TypeError(
"attribute 'copyright' is required to be of type 'str'"
f", not '{type(val)}'")
self._config['copyright'] = val
return True
def _set_email(self, val: str) -> bool:
"""Set an email address of the author.
Email address to a person, an organization, or a service that is
responsible for the content of the resource.
Returns:
Boolean value which is True on success, else False.
"""
if not isinstance(val, str):
raise TypeError(
"attribute 'email' is required to be of type 'str'"
f", not '{type(val)}'")
self._config['email'] = val
return True
def _set_license(self, val: str) -> bool:
"""Set a license for the usage of the resource.
Namereference to a legal document giving specified users an
official permission to do something with the resource.
Returns:
Boolean value which is True on success, else False.
"""
if not isinstance(val, str):
raise TypeError(
"attribute 'license' is required to be of type 'str'"
f", not '{type(val)}'")
self._config['license'] = val
return True
def _set_name(self, val: str) -> bool:
"""Set the name of the resource.
The name has to be unique for a given class and a given
workspace in the sence, that all resources with the same name
have to be branches or other versions of the same resource.
Returns:
Boolean value which is True on success, else False.
"""
if not isinstance(val, str):
raise TypeError(
"attribute 'name' is required to be of type 'str'"
f", not '{type(val)}'")
self._config['name'] = val
return True
def _set_path(self, path: PathLike) -> bool:
"""Set filepath.
Path to a file containing or referencing the resource.
Returns:
Boolean value which is True on success, else False.
"""
if not isinstance(path, (str, tuple, Path)):
raise TypeError(
"attribute 'path' is required to be path-like"
f", not '{type(path)}'")
self._config['path'] = env.expand(path)
return True
def _set_version(self, val: int) -> bool:
"""Set the version number of the branch of the resource.
Version number of the branch of the resource.
Returns:
Boolean value which is True on success, else False.
"""
if not isinstance(val, int):
raise TypeError(
"attribute 'version' is required to be of type 'int'"
f", not '{type(val)}'")
self._config['version'] = val
return True
| gpl-3.0 | 2,705,300,763,946,531,300 | 31.083904 | 79 | 0.570529 | false |
simongoffin/website_version | addons/report_intrastat/__openerp__.py | 116 | 1888 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Intrastat Reporting',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
A module that adds intrastat reports.
=====================================
This module gives the details of the goods traded between the countries of
European Union.""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base', 'product', 'stock', 'sale', 'purchase'],
'data': [
'security/ir.model.access.csv',
'report_intrastat_view.xml',
'intrastat_report.xml',
'report_intrastat_data.xml',
'views/report_intrastatinvoice.xml'
],
'demo': [],
'test': ['test/report_intrastat_report.yml'],
'installable': True,
'auto_install': False,
'images': ['images/country_intrastat_code.jpeg','images/intrastat_code.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,186,504,309,289,817,000 | 37.530612 | 82 | 0.599576 | false |
chris-chris/tensorflow | tensorflow/python/training/moving_averages.py | 41 | 20098 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Maintain moving averages of parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
# TODO(touts): switch to variables.Variable.
def assign_moving_average(variable, value, decay, zero_debias=True, name=None):
"""Compute the moving average of a variable.
The moving average of 'variable' updated with 'value' is:
variable * decay + value * (1 - decay)
The returned Operation sets 'variable' to the newly computed moving average.
The new value of 'variable' can be set with the 'AssignSub' op as:
variable -= (1 - decay) * (variable - value)
Since variables that are initialized to a `0` value will be `0` biased,
`zero_debias` optionally enables scaling by the mathematically correct
debiasing factor of
1 - decay ** num_updates
See `ADAM: A Method for Stochastic Optimization` Section 3 for more details
(https://arxiv.org/abs/1412.6980).
Args:
variable: A Variable.
value: A tensor with the same shape as 'variable'.
decay: A float Tensor or float value. The moving average decay.
zero_debias: A python bool. If true, assume the variable is 0-intialized and
unbias it, as in https://arxiv.org/abs/1412.6980. See docstring in
`_zero_debias` for more details.
name: Optional name of the returned operation.
Returns:
A reference to the input 'variable' tensor with the newly computed
moving average.
"""
with ops.name_scope(name, "AssignMovingAvg",
[variable, value, decay]) as scope:
with ops.colocate_with(variable):
decay = ops.convert_to_tensor(1.0 - decay, name="decay")
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
if zero_debias:
update_delta = _zero_debias(variable, value, decay)
else:
update_delta = (variable - value) * decay
return state_ops.assign_sub(variable, update_delta, name=scope)
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation.
Defaults to "WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(name, "WeightedMovingAvg",
[value, weight, decay]) as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
shape=weight.get_shape(),
dtype=weight.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
numerator = assign_moving_average(
value_x_weight_var, value * weight, decay, zero_debias=False)
denominator = assign_moving_average(
weight_var, weight, decay, zero_debias=False)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
def _zero_debias(unbiased_var, value, decay):
"""Compute the delta required for a debiased Variable.
All exponential moving averages initialized with Tensors are initialized to 0,
and therefore are biased to 0. Variables initialized to 0 and used as EMAs are
similarly biased. This function creates the debias updated amount according to
a scale factor, as in https://arxiv.org/abs/1412.6980.
To demonstrate the bias the results from 0-initialization, take an EMA that
was initialized to `0` with decay `b`. After `t` timesteps of seeing the
constant `c`, the variable have the following value:
```
EMA = 0*b^(t) + c*(1 - b)*b^(t-1) + c*(1 - b)*b^(t-2) + ...
= c*(1 - b^t)
```
To have the true value `c`, we would divide by the scale factor `1 - b^t`.
In order to perform debiasing, we use two shadow variables. One keeps track of
the biased estimate, and the other keeps track of the number of updates that
have occurred.
Args:
unbiased_var: A Variable representing the current value of the unbiased EMA.
value: A Tensor representing the most recent value.
decay: A Tensor representing `1-decay` for the EMA.
Returns:
The amount that the unbiased variable should be updated. Computing this
tensor will also update the shadow variables appropriately.
"""
with variable_scope.variable_scope(
unbiased_var.op.name, values=[unbiased_var, value, decay]) as scope:
with ops.colocate_with(unbiased_var):
with ops.control_dependencies(None):
biased_initializer = init_ops.zeros_initializer(
dtype=unbiased_var.dtype)(unbiased_var.get_shape())
local_step_initializer = init_ops.zeros_initializer()
biased_var = variable_scope.get_variable(
"biased", initializer=biased_initializer, trainable=False)
local_step = variable_scope.get_variable(
"local_step",
shape=[],
dtype=unbiased_var.dtype,
initializer=local_step_initializer,
trainable=False)
# Get an update ops for both shadow variables.
update_biased = state_ops.assign_sub(biased_var,
(biased_var - value) * decay,
name=scope.name)
update_local_step = local_step.assign_add(1)
# Compute the value of the delta to update the unbiased EMA. Make sure to
# use the new values of the biased variable and the local step.
with ops.control_dependencies([update_biased, update_local_step]):
# This function gets `1 - decay`, so use `1.0 - decay` in the exponent.
unbiased_ema_delta = (unbiased_var - biased_var.read_value() /
(1 - math_ops.pow(
1.0 - decay, local_step.read_value())))
return unbiased_ema_delta
class ExponentialMovingAverage(object):
"""Maintains moving averages of variables by employing an exponential decay.
When training a model, it is often beneficial to maintain moving averages of
the trained parameters. Evaluations that use averaged parameters sometimes
produce significantly better results than the final trained values.
The `apply()` method adds shadow copies of trained variables and add ops that
maintain a moving average of the trained variables in their shadow copies.
It is used when building the training model. The ops that maintain moving
averages are typically run after each training step.
The `average()` and `average_name()` methods give access to the shadow
variables and their names. They are useful when building an evaluation
model, or when restoring a model from a checkpoint file. They help use the
moving averages in place of the last trained values for evaluations.
The moving averages are computed using exponential decay. You specify the
decay value when creating the `ExponentialMovingAverage` object. The shadow
variables are initialized with the same initial values as the trained
variables. When you run the ops to maintain the moving averages, each
shadow variable is updated with the formula:
`shadow_variable -= (1 - decay) * (shadow_variable - variable)`
This is mathematically equivalent to the classic formula below, but the use
of an `assign_sub` op (the `"-="` in the formula) allows concurrent lockless
updates to the variables:
`shadow_variable = decay * shadow_variable + (1 - decay) * variable`
Reasonable values for `decay` are close to 1.0, typically in the
multiple-nines range: 0.999, 0.9999, etc.
Example usage when creating a training model:
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
...
# Create an op that applies the optimizer. This is what we usually
# would use as a training op.
opt_op = opt.minimize(my_loss, [var0, var1])
# Create an ExponentialMovingAverage object
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
# Create the shadow variables, and add ops to maintain moving averages
# of var0 and var1.
maintain_averages_op = ema.apply([var0, var1])
# Create an op that will update the moving averages after each training
# step. This is what we will use in place of the usual training op.
with tf.control_dependencies([opt_op]):
training_op = tf.group(maintain_averages_op)
...train the model by running training_op...
```
There are two ways to use the moving averages for evaluations:
* Build a model that uses the shadow variables instead of the variables.
For this, use the `average()` method which returns the shadow variable
for a given variable.
* Build a model normally but load the checkpoint files to evaluate by using
the shadow variable names. For this use the `average_name()` method. See
the @{tf.train.Saver} for more
information on restoring saved variables.
Example of restoring the shadow variable values:
```python
# Create a Saver that loads variables from their saved shadow values.
shadow_var0_name = ema.average_name(var0)
shadow_var1_name = ema.average_name(var1)
saver = tf.train.Saver({shadow_var0_name: var0, shadow_var1_name: var1})
saver.restore(...checkpoint filename...)
# var0 and var1 now hold the moving average values
```
"""
def __init__(self, decay, num_updates=None, zero_debias=False,
name="ExponentialMovingAverage"):
"""Creates a new ExponentialMovingAverage object.
The `apply()` method has to be called to create shadow variables and add
ops to maintain moving averages.
The optional `num_updates` parameter allows one to tweak the decay rate
dynamically. It is typical to pass the count of training steps, usually
kept in a variable that is incremented at each step, in which case the
decay rate is lower at the start of training. This makes moving averages
move faster. If passed, the actual decay rate used is:
`min(decay, (1 + num_updates) / (10 + num_updates))`
Args:
decay: Float. The decay to use.
num_updates: Optional count of number of updates applied to variables.
zero_debias: If `True`, zero debias moving-averages that are initialized
with tensors.
name: String. Optional prefix name to use for the name of ops added in
`apply()`.
"""
self._decay = decay
self._num_updates = num_updates
self._zero_debias = zero_debias
self._name = name
self._averages = {}
def apply(self, var_list=None):
"""Maintains moving averages of variables.
`var_list` must be a list of `Variable` or `Tensor` objects. This method
creates shadow variables for all elements of `var_list`. Shadow variables
for `Variable` objects are initialized to the variable's initial value.
They will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
For `Tensor` objects, the shadow variables are initialized to 0 and zero
debiased (see docstring in `assign_moving_average` for more details).
shadow variables are created with `trainable=False` and added to the
`GraphKeys.ALL_VARIABLES` collection. They will be returned by calls to
`tf.global_variables()`.
Returns an op that updates all shadow variables as described above.
Note that `apply()` can be called multiple times with different lists of
variables.
Args:
var_list: A list of Variable or Tensor objects. The variables
and Tensors must be of types float16, float32, or float64.
Returns:
An Operation that updates the moving averages.
Raises:
TypeError: If the arguments are not all float16, float32, or float64.
ValueError: If the moving average of one of the variables is already
being computed.
"""
# TODO(touts): op_scope
if var_list is None:
var_list = variables.trainable_variables()
zero_debias_true = set() # set of vars to set `zero_debias=True`
for var in var_list:
if var.dtype.base_dtype not in [dtypes.float16, dtypes.float32,
dtypes.float64]:
raise TypeError("The variables must be half, float, or double: %s" %
var.name)
if var in self._averages:
raise ValueError("Moving average already computed for: %s" % var.name)
# For variables: to lower communication bandwidth across devices we keep
# the moving averages on the same device as the variables. For other
# tensors, we rely on the existing device allocation mechanism.
with ops.control_dependencies(None):
if isinstance(var, variables.Variable):
avg = slot_creator.create_slot(var,
var.initialized_value(),
self._name,
colocate_with_primary=True)
# NOTE(mrry): We only add `tf.Variable` objects to the
# `MOVING_AVERAGE_VARIABLES` collection.
ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
else:
avg = slot_creator.create_zeros_slot(
var,
self._name,
colocate_with_primary=(var.op.type in ["Variable", "VariableV2"]))
if self._zero_debias:
zero_debias_true.add(avg)
self._averages[var] = avg
with ops.name_scope(self._name) as scope:
decay = ops.convert_to_tensor(self._decay, name="decay")
if self._num_updates is not None:
num_updates = math_ops.cast(self._num_updates,
dtypes.float32,
name="num_updates")
decay = math_ops.minimum(decay,
(1.0 + num_updates) / (10.0 + num_updates))
updates = []
for var in var_list:
zero_debias = self._averages[var] in zero_debias_true
updates.append(assign_moving_average(
self._averages[var], var, decay, zero_debias=zero_debias))
return control_flow_ops.group(*updates, name=scope)
def average(self, var):
"""Returns the `Variable` holding the average of `var`.
Args:
var: A `Variable` object.
Returns:
A `Variable` object or `None` if the moving average of `var`
is not maintained.
"""
return self._averages.get(var, None)
def average_name(self, var):
"""Returns the name of the `Variable` holding the average for `var`.
The typical scenario for `ExponentialMovingAverage` is to compute moving
averages of variables during training, and restore the variables from the
computed moving averages during evaluations.
To restore variables, you have to know the name of the shadow variables.
That name and the original variable can then be passed to a `Saver()` object
to restore the variable from the moving average value with:
`saver = tf.train.Saver({ema.average_name(var): var})`
`average_name()` can be called whether or not `apply()` has been called.
Args:
var: A `Variable` object.
Returns:
A string: The name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of
`var`.
"""
if var in self._averages:
return self._averages[var].op.name
return ops.get_default_graph().unique_name(
var.op.name + "/" + self._name, mark_as_used=False)
def variables_to_restore(self, moving_avg_variables=None):
"""Returns a map of names to `Variables` to restore.
If a variable has a moving average, use the moving average variable name as
the restore name; otherwise, use the variable name.
For example,
```python
variables_to_restore = ema.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
```
Below is an example of such mapping:
```
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
```
Args:
moving_avg_variables: a list of variables that require to use of the
moving variable name to be restored. If None, it will default to
variables.moving_average_variables() + variables.trainable_variables()
Returns:
A map from restore_names to variables. The restore_name can be the
moving_average version of the variable name if it exist, or the original
variable name.
"""
name_map = {}
if moving_avg_variables is None:
# Include trainable variables and variables which have been explicitly
# added to the moving_average_variables collection.
moving_avg_variables = variables.trainable_variables()
moving_avg_variables += variables.moving_average_variables()
# Remove duplicates
moving_avg_variables = set(moving_avg_variables)
# Collect all the variables with moving average,
for v in moving_avg_variables:
name_map[self.average_name(v)] = v
# Make sure we restore variables without moving average as well.
for v in list(set(variables.global_variables()) - moving_avg_variables):
if v.op.name not in name_map:
name_map[v.op.name] = v
return name_map
| apache-2.0 | -3,701,441,313,788,508,000 | 40.958246 | 80 | 0.675042 | false |
ita1024/samba | third_party/waf/wafadmin/Tools/suncxx.py | 48 | 1840 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
# Ralf Habacker, 2006 (rh)
import os, optparse
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
@conftest
def find_sxx(conf):
v = conf.env
cc = None
if v['CXX']: cc = v['CXX']
elif 'CXX' in conf.environ: cc = conf.environ['CXX']
if not cc: cc = conf.find_program('c++', var='CXX')
if not cc: conf.fatal('sunc++ was not found')
cc = conf.cmd_to_list(cc)
try:
if not Utils.cmd_output(cc + ['-flags']):
conf.fatal('sunc++ %r was not found' % cc)
except ValueError:
conf.fatal('sunc++ -flags could not be executed')
v['CXX'] = cc
v['CXX_NAME'] = 'sun'
@conftest
def sxx_common_flags(conf):
v = conf.env
# CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS
v['CXX_SRC_F'] = ''
v['CXX_TGT_F'] = ['-c', '-o', '']
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX']
v['CXXLNK_SRC_F'] = ''
v['CXXLNK_TGT_F'] = ['-o', ''] # solaris hack, separate the -o from the target
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['CXXDEFINES_ST'] = '-D%s'
v['SONAME_ST'] = '-Wl,-h -Wl,%s'
v['SHLIB_MARKER'] = '-Bdynamic'
v['STATICLIB_MARKER'] = '-Bstatic'
# program
v['program_PATTERN'] = '%s'
# shared library
v['shlib_CXXFLAGS'] = ['-Kpic', '-DPIC']
v['shlib_LINKFLAGS'] = ['-G']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
v['staticlib_LINKFLAGS'] = ['-Bstatic']
v['staticlib_PATTERN'] = 'lib%s.a'
detect = '''
find_sxx
find_cpp
find_ar
sxx_common_flags
cxx_load_tools
cxx_add_flags
link_add_flags
'''
| gpl-3.0 | -7,500,031,606,856,537,000 | 23.533333 | 86 | 0.573913 | false |
etxc/namebench | nb_third_party/dns/rdtypes/ANY/CERT.py | 248 | 4263 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.dnssec
import dns.rdata
import dns.tokenizer
_ctype_by_value = {
1 : 'PKIX',
2 : 'SPKI',
3 : 'PGP',
253 : 'URI',
254 : 'OID',
}
_ctype_by_name = {
'PKIX' : 1,
'SPKI' : 2,
'PGP' : 3,
'URI' : 253,
'OID' : 254,
}
def _ctype_from_text(what):
v = _ctype_by_name.get(what)
if not v is None:
return v
return int(what)
def _ctype_to_text(what):
v = _ctype_by_value.get(what)
if not v is None:
return v
return str(what)
class CERT(dns.rdata.Rdata):
"""CERT record
@ivar certificate_type: certificate type
@type certificate_type: int
@ivar key_tag: key tag
@type key_tag: int
@ivar algorithm: algorithm
@type algorithm: int
@ivar certificate: the certificate or CRL
@type certificate: string
@see: RFC 2538"""
__slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate']
def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate):
super(CERT, self).__init__(rdclass, rdtype)
self.certificate_type = certificate_type
self.key_tag = key_tag
self.algorithm = algorithm
self.certificate = certificate
def to_text(self, origin=None, relativize=True, **kw):
certificate_type = _ctype_to_text(self.certificate_type)
return "%s %d %s %s" % (certificate_type, self.key_tag,
dns.dnssec.algorithm_to_text(self.algorithm),
dns.rdata._base64ify(self.certificate))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
certificate_type = _ctype_from_text(tok.get_string())
key_tag = tok.get_uint16()
algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
if algorithm < 0 or algorithm > 255:
raise dns.exception.SyntaxError("bad algorithm type")
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
certificate = b64.decode('base64_codec')
return cls(rdclass, rdtype, certificate_type, key_tag,
algorithm, certificate)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
prefix = struct.pack("!HHB", self.certificate_type, self.key_tag,
self.algorithm)
file.write(prefix)
file.write(self.certificate)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
prefix = wire[current : current + 5]
current += 5
rdlen -= 5
if rdlen < 0:
raise dns.exception.FormError
(certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix)
certificate = wire[current : current + rdlen]
return cls(rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
| apache-2.0 | -8,183,175,487,415,405,000 | 31.541985 | 79 | 0.611776 | false |
ssorgatem/qiime | tests/test_core_microbiome.py | 15 | 4217 | #!/usr/bin/env python
# File created on 08 Jun 2012
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from unittest import TestCase, main
from biom.parse import parse_biom_table
from qiime.core_microbiome import (core_observations_across_sample_ids)
class ComputeCoreMicrobiomeTests(TestCase):
""" """
def setUp(self):
""" """
self.otu_table_data1 = parse_biom_table(otu_table1)
self.otu_table_data2 = parse_biom_table(otu_table2)
def test_core_observations_across_sample_ids(self):
""" core_observations_across_sample_ids functions as expected
"""
actual = core_observations_across_sample_ids(self.otu_table_data1,
["S1", "s2"],
fraction_for_core=1.)
expected = ['o1', 'o5']
self.assertEqual(actual, expected)
# fraction_for_core = 0.5
actual = core_observations_across_sample_ids(self.otu_table_data1,
["S1", "s2"],
fraction_for_core=0.5)
expected = ['o1', 'o3', 'o5']
self.assertEqual(actual, expected)
def test_core_observations_across_sample_ids_invalid(self):
""" core_observations_across_sample_ids handles invalid input as expected
"""
self.assertRaises(ValueError,
core_observations_across_sample_ids,
self.otu_table_data1,
["S1", "s2"],
fraction_for_core=1.001)
self.assertRaises(ValueError,
core_observations_across_sample_ids,
self.otu_table_data1,
["S1", "s2"],
fraction_for_core=-0.001)
def test_core_observations_across_sample_ids_no_core(self):
"""core_observations_across_sample_ids handles filtering all obs
"""
actual = core_observations_across_sample_ids(self.otu_table_data2,
["S1", "s2", "s3", "s4"],
fraction_for_core=1.)
expected = []
self.assertEqual(actual, expected)
otu_table1 = """{"rows": [{"id": "o1", "metadata": {"OTUMetaData": "Eukarya;Human"}}, {"id": "o2", "metadata": {"OTUMetaData": "Eukarya;Moose"}}, {"id": "o3", "metadata": {"OTUMetaData": "Eukarya;Galapagos Tortoise"}}, {"id": "o4", "metadata": {"OTUMetaData": "Eukarya;Bigfoot"}}, {"id": "o5", "metadata": {"OTUMetaData": "Eukarya;Chicken"}}], "format": "Biological Observation Matrix 0.9.3", "data": [[0, 0, 105.0], [0, 1, 42.0], [0, 2, 99.0], [0, 3, 60000.0], [1, 2, 9.0], [1, 3, 99.0], [2, 0, 45.0], [4, 0, 1.0], [4, 1, 2.0], [4, 3, 3.0]], "columns": [{"id": "S1", "metadata": null}, {"id": "s2", "metadata": null}, {"id": "s3", "metadata": null}, {"id": "s4", "metadata": null}], "generated_by": "BIOM-Format 0.9.3", "matrix_type": "sparse", "shape": [5, 4], "format_url": "http://biom-format.org", "date": "2012-06-08T14:42:46.058411", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
otu_table2 = """{"rows": [{"id": "o1", "metadata": null}, {"id": "o2", "metadata": null}, {"id": "o3", "metadata": null}, {"id": "o4", "metadata": null}, {"id": "o5", "metadata": null}], "format": "Biological Observation Matrix 0.9.3", "data": [[0, 0, 105.0], [0, 1, 42.0], [0, 2, 99.0], [1, 2, 9.0], [1, 3, 99.0], [2, 0, 45.0], [4, 0, 1.0], [4, 1, 2.0], [4, 3, 3.0]], "columns": [{"id": "S1", "metadata": null}, {"id": "s2", "metadata": null}, {"id": "s3", "metadata": null}, {"id": "s4", "metadata": null}], "generated_by": "BIOM-Format 0.9.3", "matrix_type": "sparse", "shape": [5, 4], "format_url": "http://biom-format.org", "date": "2012-06-08T14:43:27.964500", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
if __name__ == "__main__":
main()
| gpl-2.0 | -5,947,229,993,245,698,000 | 55.986486 | 908 | 0.526678 | false |
splunk/splunk-webframework | contrib/django/django/contrib/gis/gdal/base.py | 224 | 1155 | from ctypes import c_void_p
from django.contrib.gis.gdal.error import GDALException
from django.utils import six
class GDALBase(object):
"""
Base object for GDAL objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GDALException('GDAL %s pointer no longer valid.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, six.integer_types):
self._ptr = self.ptr_type(ptr)
elif ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
ptr = property(_get_ptr, _set_ptr)
| apache-2.0 | 5,080,525,172,835,518,000 | 31.083333 | 95 | 0.632035 | false |
ageazrael/godot | modules/mono/config.py | 3 | 6083 |
import imp
import os
import sys
from SCons.Script import BoolVariable, Environment, Variables
monoreg = imp.load_source('mono_reg_utils', 'modules/mono/mono_reg_utils.py')
def find_file_in_dir(directory, files, prefix='', extension=''):
if not extension.startswith('.'):
extension = '.' + extension
for curfile in files:
if os.path.isfile(os.path.join(directory, prefix + curfile + extension)):
return curfile
return ''
def can_build(platform):
if platform in ["javascript"]:
return False # Not yet supported
return True
def is_enabled():
# The module is disabled by default. Use module_mono_enabled=yes to enable it.
return False
def copy_file_no_replace(src_dir, dst_dir, name):
from shutil import copyfile
src_path = os.path.join(src_dir, name)
dst_path = os.path.join(dst_dir, name)
need_copy = True
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
elif os.path.exists(dst_path):
need_copy = False
if need_copy:
copyfile(src_path, dst_path)
def configure(env):
env.use_ptrcall = True
env.add_module_version_string("mono")
envvars = Variables()
envvars.Add(BoolVariable('mono_static', 'Statically link mono', False))
envvars.Update(env)
bits = env['bits']
mono_static = env['mono_static']
mono_lib_names = ['mono-2.0-sgen', 'monosgen-2.0']
if env['platform'] == 'windows':
if mono_static:
raise RuntimeError('mono-static: Not supported on Windows')
if bits == '32':
if os.getenv('MONO32_PREFIX'):
mono_root = os.getenv('MONO32_PREFIX')
elif os.name == 'nt':
mono_root = monoreg.find_mono_root_dir(bits)
else:
if os.getenv('MONO64_PREFIX'):
mono_root = os.getenv('MONO64_PREFIX')
elif os.name == 'nt':
mono_root = monoreg.find_mono_root_dir(bits)
if not mono_root:
raise RuntimeError('Mono installation directory not found')
mono_lib_path = os.path.join(mono_root, 'lib')
env.Append(LIBPATH=mono_lib_path)
env.Append(CPPPATH=os.path.join(mono_root, 'include', 'mono-2.0'))
mono_lib_name = find_file_in_dir(mono_lib_path, mono_lib_names, extension='.lib')
if not mono_lib_name:
raise RuntimeError('Could not find mono library in: ' + mono_lib_path)
if os.getenv('VCINSTALLDIR'):
env.Append(LINKFLAGS=mono_lib_name + Environment()['LIBSUFFIX'])
else:
env.Append(LIBS=mono_lib_name)
mono_bin_path = os.path.join(mono_root, 'bin')
mono_dll_name = find_file_in_dir(mono_bin_path, mono_lib_names, extension='.dll')
if not mono_dll_name:
raise RuntimeError('Could not find mono shared library in: ' + mono_bin_path)
copy_file_no_replace(mono_bin_path, 'bin', mono_dll_name + '.dll')
else:
sharedlib_ext = '.dylib' if sys.platform == 'darwin' else '.so'
mono_root = ''
if bits == '32':
if os.getenv('MONO32_PREFIX'):
mono_root = os.getenv('MONO32_PREFIX')
else:
if os.getenv('MONO64_PREFIX'):
mono_root = os.getenv('MONO64_PREFIX')
if mono_root:
mono_lib_path = os.path.join(mono_root, 'lib')
env.Append(LIBPATH=mono_lib_path)
env.Append(CPPPATH=os.path.join(mono_root, 'include', 'mono-2.0'))
mono_lib = find_file_in_dir(mono_lib_path, mono_lib_names, prefix='lib', extension='.a')
if not mono_lib:
raise RuntimeError('Could not find mono library in: ' + mono_lib_path)
env.Append(CPPFLAGS=['-D_REENTRANT'])
if mono_static:
mono_lib_file = os.path.join(mono_lib_path, 'lib' + mono_lib + '.a')
if sys.platform == "darwin":
env.Append(LINKFLAGS=['-Wl,-force_load,' + mono_lib_file])
elif sys.platform == "linux" or sys.platform == "linux2":
env.Append(LINKFLAGS=['-Wl,-whole-archive', mono_lib_file, '-Wl,-no-whole-archive'])
else:
raise RuntimeError('mono-static: Not supported on this platform')
else:
env.Append(LIBS=[mono_lib])
if sys.platform == "darwin":
env.Append(LIBS=['iconv', 'pthread'])
elif sys.platform == "linux" or sys.platform == "linux2":
env.Append(LIBS=['m', 'rt', 'dl', 'pthread'])
if not mono_static:
mono_so_name = find_file_in_dir(mono_lib_path, mono_lib_names, prefix='lib', extension=sharedlib_ext)
if not mono_so_name:
raise RuntimeError('Could not find mono shared library in: ' + mono_lib_path)
copy_file_no_replace(mono_lib_path, 'bin', 'lib' + mono_so_name + sharedlib_ext)
else:
if mono_static:
raise RuntimeError('mono-static: Not supported with pkg-config. Specify a mono prefix manually')
env.ParseConfig('pkg-config monosgen-2 --cflags --libs')
mono_lib_path = ''
mono_so_name = ''
tmpenv = Environment()
tmpenv.ParseConfig('pkg-config monosgen-2 --libs-only-L')
for hint_dir in tmpenv['LIBPATH']:
name_found = find_file_in_dir(hint_dir, mono_lib_names, prefix='lib', extension=sharedlib_ext)
if name_found:
mono_lib_path = hint_dir
mono_so_name = name_found
break
if not mono_so_name:
raise RuntimeError('Could not find mono shared library in: ' + str(tmpenv['LIBPATH']))
copy_file_no_replace(mono_lib_path, 'bin', 'lib' + mono_so_name + sharedlib_ext)
env.Append(LINKFLAGS='-rdynamic')
def get_doc_classes():
return ["@C#", "CSharpScript", "GodotSharp"]
def get_doc_path():
return "doc_classes"
| mit | -1,885,713,694,005,796,900 | 32.059783 | 117 | 0.570278 | false |
augustomatheuss/myjoy | src/myjoy.py | 1 | 3782 | # Copyright (c) 2015, Augusto Damasceno.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import pygame
import time
print 'Init joystick module with pygame...\n'
# Initialize the joystick module.
pygame.joystick.init()
# Get the number of joysticks.
num = pygame.joystick.get_count()
if num <> 0:
c = 1
if num <> 1:
print 'There is %d joysticks, choose one.' % num
c = input(">> ")
while (c < 1) or (c > num):
print 'Wrong joystick number, choose again.'
c = input(">> ")
print 'Try to initialize joystick %d.' % c
myJoystick = pygame.joystick.Joystick(0)
myJoystick.init()
if myJoystick.get_init():
# Initialize all imported pygame modules
pygame.init()
print 'Joystick %d successful initialized.' % c
print 'Id: %s' % myJoystick.get_id()
print 'Name: %s' % myJoystick.get_name()
axes = myJoystick.get_numaxes()
print 'Axes: %s' % axes
balls = myJoystick.get_numballs()
print 'Balls: %s' % balls
buttons = myJoystick.get_numbuttons()
print 'Buttons: %s' % buttons
hats = myJoystick.get_numhats()
print 'Hats: %s' % hats
quit = False
# Main loop.
while quit == False:
# Write here your code using the joystick!!
# Internally process pygame event handlers.
pygame.event.pump()
# Get events from the queue.
for event in pygame.event.get():
if event.type == pygame.JOYAXISMOTION:
print 'Axis %d: %f' % (event.axis,event.value)
if event.type == pygame.JOYBALLMOTION:
print 'Ball %d: <%d , %d>' % (event.ball,event.rel)
if event.type == pygame.JOYBUTTONDOWN:
print 'Button %d pressed.' % event.button
if event.type == pygame.JOYBUTTONUP:
print 'Button %d released.' % event.button
if event.type == pygame.JOYHATMOTION:
print 'Hat %d: ' % event.hat
print event.value
# Stopping criterion.
#if 'remove this comment and write your condition here':
# quit = True
else:
print 'Joystick %d not initialized, end of program.' % c
else:
print 'Insert a joystick before run this program.'
# Uninitialize the joystick module.
pygame.joystick.quit()
# Uninitialize all pygame modules.
pygame.quit()
| bsd-2-clause | -8,496,281,784,079,442,000 | 41.977273 | 88 | 0.637758 | false |
crepererum/invenio | invenio/modules/oaiharvester/tasks/harvesting.py | 13 | 8009 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Tasks used for main OAI harvesting workflow."""
import os
import time
from functools import wraps
from invenio.base.globals import cfg
def init_harvesting(obj, eng):
"""Get all the options from previous state.
This function gets all the option linked to the task and stores them into the
object to be used later.
:param obj: Bibworkflow Object to process
:param eng: BibWorkflowEngine processing the object
"""
try:
obj.extra_data["options"] = eng.extra_data["options"]
except KeyError:
eng.log.error("No options for this task have been found. It is possible"
"that the following task could failed or work not as expected")
obj.extra_data["options"] = {}
eng.log.info("end of init_harvesting")
init_harvesting.description = 'Start harvesting'
def filtering_oai_pmh_identifier(obj, eng):
"""Check if the current OAI record has been processed already this run."""
from ..utils import identifier_extraction_from_string
if "oaiharvest" not in eng.extra_data:
eng.extra_data["oaiharvest"] = {}
if "identifiers" not in eng.extra_data["oaiharvest"]:
eng.extra_data["oaiharvest"]["identifiers"] = []
if not obj.data:
obj.log.error("No data found in object!")
return False
elif isinstance(obj.data, list):
# In case it is a list
obj.data = obj.data[0]
identifier = (identifier_extraction_from_string(obj.data) or
identifier_extraction_from_string(obj.data, oai_namespace="") or
"")
obj.extra_data["oai_identifier"] = identifier
if identifier in eng.extra_data["oaiharvest"]["identifiers"]:
# The record has already been harvested in this run
return False
else:
eng.extra_data["oaiharvest"]["identifiers"].append(identifier)
return True
def get_repositories_list(repositories=()):
"""Get repository list in options.
Here we are retrieving the oaiharvest configuration for the task.
It will allows in the future to do all the correct operations.
:param repositories:
"""
from invenio.modules.oaiharvester.models import OaiHARVEST
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
@wraps(get_repositories_list)
def _get_repositories_list(obj, eng):
repositories_to_harvest = repositories
reposlist_temp = []
if obj.extra_data["options"]["repository"]:
repositories_to_harvest = obj.extra_data["options"]["repository"]
if repositories_to_harvest:
for reposname in repositories_to_harvest:
try:
reposlist_temp.append(
OaiHARVEST.get(OaiHARVEST.name == reposname).one())
except (MultipleResultsFound, NoResultFound):
eng.log.critical(
"Repository %s doesn't exit into our database",
reposname)
else:
reposlist_temp = OaiHARVEST.get(OaiHARVEST.name != "").all()
true_repo_list = []
for repo in reposlist_temp:
true_repo_list.append(repo.to_dict())
if true_repo_list:
return true_repo_list
else:
eng.halt(
"No Repository named %s. Impossible to harvest non-existing things."
% repositories_to_harvest)
return _get_repositories_list
def harvest_records(obj, eng):
"""Run the harvesting task.
The row argument is the oaiharvest task queue row, containing if, arguments,
etc.
Return 1 in case of success and 0 in case of failure.
:param obj: BibworkflowObject being
:param eng: BibWorkflowEngine processing the object
"""
from invenio.legacy.oaiharvest.utils import (collect_identifiers,
harvest_step)
from invenio.modules.workflows.errors import WorkflowError
harvested_identifier_list = []
harvestpath = "%s_%d_%s_" % (
"%s/oaiharvest_%s" % (cfg['CFG_TMPSHAREDDIR'], eng.uuid),
1, time.strftime("%Y%m%d%H%M%S"))
# ## go ahead: check if user requested from-until harvesting
try:
if "dates" not in obj.extra_data["options"]:
obj.extra_data["options"]["dates"] = []
if "identifiers" not in obj.extra_data["options"]:
obj.extra_data["options"]["identifiers"] = []
except TypeError:
obj.extra_data["options"] = {"dates": [], "identifiers": []}
arguments = obj.extra_data["repository"]["arguments"]
if arguments:
eng.log.info("running with post-processes: %r" % (arguments,))
else:
eng.log.error(
"No arguments found... It can be causing major error after this point.")
# Harvest phase
try:
harvested_files_list = harvest_step(obj,
harvestpath)
except Exception as e:
eng.log.error("Error while harvesting %s. Skipping." % (obj.data,))
raise WorkflowError(
"Error while harvesting %r. Skipping : %s." % (obj.data, repr(e)),
id_workflow=eng.uuid, id_object=obj.id)
if len(harvested_files_list) == 0:
eng.log.info("No records harvested for %s" % (obj.data["name"],))
# Retrieve all OAI IDs and set active list
harvested_identifier_list.append(collect_identifiers(harvested_files_list))
if len(harvested_files_list) != len(harvested_identifier_list[0]):
# Harvested files and its identifiers are 'out of sync', abort harvest
raise WorkflowError(
"Harvested files miss identifiers for %s" % (arguments,),
id_workflow=eng.uuid,
id_object=obj.id)
obj.extra_data['harvested_files_list'] = harvested_files_list
eng.log.info(
"%d files harvested and processed \n End harvest records task" % (
len(harvested_files_list),))
def get_records_from_file(path=None):
"""Allow to retrieve the records from a file."""
from ..utils import record_extraction_from_file
@wraps(get_records_from_file)
def _get_records_from_file(obj, eng):
if "_LoopData" not in eng.extra_data:
eng.extra_data["_LoopData"] = {}
if "get_records_from_file" not in eng.extra_data["_LoopData"]:
eng.extra_data["_LoopData"]["get_records_from_file"] = {}
if path:
eng.extra_data["_LoopData"]["get_records_from_file"].update(
{"data": record_extraction_from_file(path)})
else:
eng.extra_data["_LoopData"]["get_records_from_file"].update(
{"data": record_extraction_from_file(obj.data)})
eng.extra_data["_LoopData"]["get_records_from_file"][
"path"] = obj.data
elif os.path.isfile(obj.data) and obj.data != \
eng.extra_data["_LoopData"]["get_records_from_file"]["path"]:
eng.extra_data["_LoopData"]["get_records_from_file"].update(
{"data": record_extraction_from_file(obj.data)})
return eng.extra_data["_LoopData"]["get_records_from_file"]["data"]
return _get_records_from_file
| gpl-2.0 | 6,134,715,825,674,059,000 | 36.957346 | 85 | 0.624298 | false |
taohaoge/vincent | vincent/marks.py | 9 | 3996 | # -*- coding: utf-8 -*-
"""
Marks: Classes to define Vega Marks
"""
from .core import grammar, GrammarClass, KeyedList
from .values import ValueRef
from .properties import PropertySet
from ._compat import str_types
class MarkProperties(GrammarClass):
"""Sets of all Mark properties
Mark properties can change depending on user interaction or changing
data. This class defines four events for which the properties may
change.
"""
@grammar(PropertySet)
def enter(value):
"""PropertySet : properties applied when data is loaded
"""
@grammar(PropertySet)
def exit(value):
"""PropertySet : properties applied when data is removed
"""
@grammar(PropertySet)
def update(value):
"""PropertySet : properties applied for all non-exiting data
(This is vague. Need better Vega docs.)
"""
@grammar(PropertySet)
def hover(value):
"""PropertySet, properties applied on mouse-over
On mouse out, the ``update`` properties are applied.
"""
class MarkRef(GrammarClass):
"""Definitions for Mark source data
"""
@grammar(str_types)
def data(value):
"""string : Name of the source `Data`"""
@grammar(list)
def transform(value):
"""list : List of transforms to apply to the data"""
class Mark(GrammarClass):
"""Definitions for data marks
Marks are the fundamental component that the viewer sees - such as a
bar, line etc.. This class defines how the marks appear and what data
the marks represent.
"""
_valid_type_values = frozenset(['rect', 'symbol', 'path', 'arc', 'area',
'line', 'image', 'text', 'group'])
@grammar(str_types)
def name(value):
"""string : Optional unique name for mark"""
@grammar(str_types)
def description(value):
"""string : Optional description for mark"""
@grammar(str_types)
def type(value):
"""string : Type of mark
Valid types are ``'rect'``, ``'symbol'``, ``'path'``, ``'arc'``,
``'area'``, ``'line'``, ``'image'``, and ``'text'``.
"""
if value not in Mark._valid_type_values:
raise ValueError(
'invalid mark type %s, valid types are %s' % (
value, Mark._valid_type_values))
@grammar(grammar_type=MarkRef, grammar_name='from')
def from_(value):
"""dict : Description of data to visualize
Note that although the property has the name ``from_`` (using
``from`` is invalid Python syntax), the JSON will contain the
correct property ``from``.
"""
@grammar(MarkProperties)
def properties(value):
"""MarkProperties : Mark property set definitions"""
@grammar(str_types)
def key(value):
"""string : Field to use for data binding
When updating data dynamically, restrict dynamic transitions from
affecting data with the given key. This can be useful for something
like scrolling time series. See the Vega examples.
"""
@grammar(ValueRef)
def delay(value):
"""ValueRef, number : Transitional delay in milliseconds.
"""
@grammar(str_types)
def ease(value):
"""string : Type of transition easing
Valid types are ``'linear'``, ``'quad'``, ``'cubic'``, ``'sin'``,
``'exp'``, ``'circle'``, and ``'bounce'``, which can be appended
with the modifiers ``'in'``, ``'out'``, ``'in-out'``, and
``'out-in'``. The default is ``'cubic-in-out'``.
See the documentation for the d3 ease function for more details.
"""
@grammar(list)
def marks(value):
"""list: For grouped marks, you can define a "marks" with a mark
"""
@grammar((list, KeyedList))
def scales(value):
"""list or KeyedList: For grouped marks, you can define a set of scales
for within the mark groups
"""
| mit | -7,876,248,159,342,856,000 | 28.6 | 79 | 0.597097 | false |
dfunckt/django | tests/get_earliest_or_latest/tests.py | 12 | 6850 | from __future__ import unicode_literals
from datetime import datetime
from django.test import TestCase
from .models import Article, IndexErrorArticle, Person
class EarliestOrLatestTests(TestCase):
"""Tests for the earliest() and latest() objects methods"""
def tearDown(self):
"""Makes sure Article has a get_latest_by"""
if not Article._meta.get_latest_by:
Article._meta.get_latest_by = 'pub_date'
def test_earliest(self):
# Because no Articles exist yet, earliest() raises ArticleDoesNotExist.
with self.assertRaises(Article.DoesNotExist):
Article.objects.earliest()
a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
a2 = Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28)
)
Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 8, 27)
)
Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30)
)
# Get the earliest Article.
self.assertEqual(Article.objects.earliest(), a1)
# Get the earliest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).earliest(),
a2
)
# Pass a custom field name to earliest() to change the field that's used
# to determine the earliest object.
self.assertEqual(Article.objects.earliest('expire_date'), a2)
self.assertEqual(Article.objects.filter(
pub_date__gt=datetime(2005, 7, 26)).earliest('expire_date'), a2)
# earliest() overrides any other ordering specified on the query.
# Refs #11283.
self.assertEqual(Article.objects.order_by('id').earliest(), a1)
# Error is raised if the user forgot to add a get_latest_by
# in the Model.Meta
Article.objects.model._meta.get_latest_by = None
with self.assertRaisesMessage(
AssertionError,
"earliest() and latest() require either a field_name parameter or "
"'get_latest_by' in the model"
):
Article.objects.earliest()
def test_latest(self):
# Because no Articles exist yet, latest() raises ArticleDoesNotExist.
with self.assertRaises(Article.DoesNotExist):
Article.objects.latest()
a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28)
)
a3 = Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 8, 27)
)
a4 = Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30)
)
# Get the latest Article.
self.assertEqual(Article.objects.latest(), a4)
# Get the latest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__lt=datetime(2005, 7, 27)).latest(),
a1
)
# Pass a custom field name to latest() to change the field that's used
# to determine the latest object.
self.assertEqual(Article.objects.latest('expire_date'), a1)
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).latest('expire_date'),
a3,
)
# latest() overrides any other ordering specified on the query (#11283).
self.assertEqual(Article.objects.order_by('id').latest(), a4)
# Error is raised if get_latest_by isn't in Model.Meta.
Article.objects.model._meta.get_latest_by = None
with self.assertRaisesMessage(
AssertionError,
"earliest() and latest() require either a field_name parameter or "
"'get_latest_by' in the model"
):
Article.objects.latest()
def test_latest_manual(self):
# You can still use latest() with a model that doesn't have
# "get_latest_by" set -- just pass in the field name manually.
Person.objects.create(name="Ralph", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(name="Stephanie", birthday=datetime(1960, 2, 3))
with self.assertRaises(AssertionError):
Person.objects.latest()
self.assertEqual(Person.objects.latest("birthday"), p2)
class TestFirstLast(TestCase):
def test_first(self):
p1 = Person.objects.create(name="Bob", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(name="Alice", birthday=datetime(1961, 2, 3))
self.assertEqual(Person.objects.first(), p1)
self.assertEqual(Person.objects.order_by('name').first(), p2)
self.assertEqual(Person.objects.filter(birthday__lte=datetime(1955, 1, 1)).first(), p1)
self.assertIsNone(Person.objects.filter(birthday__lte=datetime(1940, 1, 1)).first())
def test_last(self):
p1 = Person.objects.create(name="Alice", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(name="Bob", birthday=datetime(1960, 2, 3))
# Note: by default PK ordering.
self.assertEqual(Person.objects.last(), p2)
self.assertEqual(Person.objects.order_by('-name').last(), p1)
self.assertEqual(Person.objects.filter(birthday__lte=datetime(1955, 1, 1)).last(), p1)
self.assertIsNone(Person.objects.filter(birthday__lte=datetime(1940, 1, 1)).last())
def test_index_error_not_suppressed(self):
"""
#23555 -- Unexpected IndexError exceptions in QuerySet iteration
shouldn't be suppressed.
"""
def check():
# We know that we've broken the __iter__ method, so the queryset
# should always raise an exception.
with self.assertRaises(IndexError):
IndexErrorArticle.objects.all()[0]
with self.assertRaises(IndexError):
IndexErrorArticle.objects.all().first()
with self.assertRaises(IndexError):
IndexErrorArticle.objects.all().last()
check()
# And it does not matter if there are any records in the DB.
IndexErrorArticle.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
check()
| bsd-3-clause | -6,846,425,904,974,773,000 | 39.532544 | 95 | 0.612701 | false |
mmauroy/SickRage | lib/sqlalchemy/ext/associationproxy.py | 76 | 33046 | # ext/associationproxy.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Contain the ``AssociationProxy`` class.
The ``AssociationProxy`` is a Python property object which provides
transparent proxied access to the endpoint of an association object.
See the example ``examples/association/proxied_association.py``.
"""
import itertools
import operator
import weakref
from .. import exc, orm, util
from ..orm import collections, interfaces
from ..sql import not_, or_
def association_proxy(target_collection, attr, **kw):
"""Return a Python property implementing a view of a target
attribute which references an attribute on members of the
target.
The returned value is an instance of :class:`.AssociationProxy`.
Implements a Python property representing a relationship as a collection
of simpler values, or a scalar value. The proxied property will mimic
the collection type of the target (list, dict or set), or, in the case of
a one to one relationship, a simple scalar value.
:param target_collection: Name of the attribute we'll proxy to.
This attribute is typically mapped by
:func:`~sqlalchemy.orm.relationship` to link to a target collection, but
can also be a many-to-one or non-scalar relationship.
:param attr: Attribute on the associated instance or instances we'll
proxy for.
For example, given a target collection of [obj1, obj2], a list created
by this proxy property would look like [getattr(obj1, *attr*),
getattr(obj2, *attr*)]
If the relationship is one-to-one or otherwise uselist=False, then
simply: getattr(obj, *attr*)
:param creator: optional.
When new items are added to this proxied collection, new instances of
the class collected by the target collection will be created. For list
and set collections, the target class constructor will be called with
the 'value' for the new instance. For dict types, two arguments are
passed: key and value.
If you want to construct instances differently, supply a *creator*
function that takes arguments as above and returns instances.
For scalar relationships, creator() will be called if the target is None.
If the target is present, set operations are proxied to setattr() on the
associated object.
If you have an associated object with multiple attributes, you may set
up multiple association proxies mapping to different attributes. See
the unit tests for examples, and for examples of how creator() functions
can be used to construct the scalar relationship on-demand in this
situation.
:param \*\*kw: Passes along any other keyword arguments to
:class:`.AssociationProxy`.
"""
return AssociationProxy(target_collection, attr, **kw)
ASSOCIATION_PROXY = util.symbol('ASSOCIATION_PROXY')
"""Symbol indicating an :class:`_InspectionAttr` that's
of type :class:`.AssociationProxy`.
Is assigned to the :attr:`._InspectionAttr.extension_type`
attibute.
"""
class AssociationProxy(interfaces._InspectionAttr):
"""A descriptor that presents a read/write view of an object attribute."""
is_attribute = False
extension_type = ASSOCIATION_PROXY
def __init__(self, target_collection, attr, creator=None,
getset_factory=None, proxy_factory=None,
proxy_bulk_set=None):
"""Construct a new :class:`.AssociationProxy`.
The :func:`.association_proxy` function is provided as the usual
entrypoint here, though :class:`.AssociationProxy` can be instantiated
and/or subclassed directly.
:param target_collection: Name of the collection we'll proxy to,
usually created with :func:`.relationship`.
:param attr: Attribute on the collected instances we'll proxy
for. For example, given a target collection of [obj1, obj2], a
list created by this proxy property would look like
[getattr(obj1, attr), getattr(obj2, attr)]
:param creator: Optional. When new items are added to this proxied
collection, new instances of the class collected by the target
collection will be created. For list and set collections, the
target class constructor will be called with the 'value' for the
new instance. For dict types, two arguments are passed:
key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
:param getset_factory: Optional. Proxied attribute access is
automatically handled by routines that get and set values based on
the `attr` argument for this proxy.
If you would like to customize this behavior, you may supply a
`getset_factory` callable that produces a tuple of `getter` and
`setter` functions. The factory is called with two arguments, the
abstract type of the underlying collection and this proxy instance.
:param proxy_factory: Optional. The type of collection to emulate is
determined by sniffing the target collection. If your collection
type can't be determined by duck typing or you'd like to use a
different collection implementation, you may supply a factory
function to produce those collections. Only applicable to
non-scalar relationships.
:param proxy_bulk_set: Optional, use with proxy_factory. See
the _set() method for details.
"""
self.target_collection = target_collection
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.owning_class = None
self.key = '_%s_%s_%s' % (
type(self).__name__, target_collection, id(self))
self.collection_class = None
@property
def remote_attr(self):
"""The 'remote' :class:`.MapperProperty` referenced by this
:class:`.AssociationProxy`.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.attr`
:attr:`.AssociationProxy.local_attr`
"""
return getattr(self.target_class, self.value_attr)
@property
def local_attr(self):
"""The 'local' :class:`.MapperProperty` referenced by this
:class:`.AssociationProxy`.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.attr`
:attr:`.AssociationProxy.remote_attr`
"""
return getattr(self.owning_class, self.target_collection)
@property
def attr(self):
"""Return a tuple of ``(local_attr, remote_attr)``.
This attribute is convenient when specifying a join
using :meth:`.Query.join` across two relationships::
sess.query(Parent).join(*Parent.proxied.attr)
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.local_attr`
:attr:`.AssociationProxy.remote_attr`
"""
return (self.local_attr, self.remote_attr)
def _get_property(self):
return (orm.class_mapper(self.owning_class).
get_property(self.target_collection))
@util.memoized_property
def target_class(self):
"""The intermediary class handled by this :class:`.AssociationProxy`.
Intercepted append/set/assignment events will result
in the generation of new instances of this class.
"""
return self._get_property().mapper.class_
@util.memoized_property
def scalar(self):
"""Return ``True`` if this :class:`.AssociationProxy` proxies a scalar
relationship on the local side."""
scalar = not self._get_property().uselist
if scalar:
self._initialize_scalar_accessors()
return scalar
@util.memoized_property
def _value_is_scalar(self):
return not self._get_property().\
mapper.get_property(self.value_attr).uselist
@util.memoized_property
def _target_is_object(self):
return getattr(self.target_class, self.value_attr).impl.uses_objects
def __get__(self, obj, class_):
if self.owning_class is None:
self.owning_class = class_ and class_ or type(obj)
if obj is None:
return self
if self.scalar:
target = getattr(obj, self.target_collection)
return self._scalar_get(target)
else:
try:
# If the owning instance is reborn (orm session resurrect,
# etc.), refresh the proxy cache.
creator_id, proxy = getattr(obj, self.key)
if id(obj) == creator_id:
return proxy
except AttributeError:
pass
proxy = self._new(_lazy_collection(obj, self.target_collection))
setattr(obj, self.key, (id(obj), proxy))
return proxy
def __set__(self, obj, values):
if self.owning_class is None:
self.owning_class = type(obj)
if self.scalar:
creator = self.creator and self.creator or self.target_class
target = getattr(obj, self.target_collection)
if target is None:
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
else:
proxy = self.__get__(obj, None)
if proxy is not values:
proxy.clear()
self._set(proxy, values)
def __delete__(self, obj):
if self.owning_class is None:
self.owning_class = type(obj)
delattr(obj, self.key)
def _initialize_scalar_accessors(self):
if self.getset_factory:
get, set = self.getset_factory(None, self)
else:
get, set = self._default_getset(None)
self._scalar_get, self._scalar_set = get, set
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
getter = lambda target: _getter(target) if target is not None else None
if collection_class is dict:
setter = lambda o, k, v: setattr(o, attr, v)
else:
setter = lambda o, v: setattr(o, attr, v)
return getter, setter
def _new(self, lazy_collection):
creator = self.creator and self.creator or self.target_class
self.collection_class = util.duck_type_collection(lazy_collection())
if self.proxy_factory:
return self.proxy_factory(
lazy_collection, creator, self.value_attr, self)
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
if self.collection_class is list:
return _AssociationList(
lazy_collection, creator, getter, setter, self)
elif self.collection_class is dict:
return _AssociationDict(
lazy_collection, creator, getter, setter, self)
elif self.collection_class is set:
return _AssociationSet(
lazy_collection, creator, getter, setter, self)
else:
raise exc.ArgumentError(
'could not guess which interface to use for '
'collection_class "%s" backing "%s"; specify a '
'proxy_factory and proxy_bulk_set manually' %
(self.collection_class.__name__, self.target_collection))
def _inflate(self, proxy):
creator = self.creator and self.creator or self.target_class
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
def _set(self, proxy, values):
if self.proxy_bulk_set:
self.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is set:
proxy.update(values)
else:
raise exc.ArgumentError(
'no proxy_bulk_set supplied for custom '
'collection_class implementation')
@property
def _comparator(self):
return self._get_property().comparator
def any(self, criterion=None, **kwargs):
"""Produce a proxied 'any' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._value_is_scalar:
value_expr = getattr(
self.target_class, self.value_attr).has(criterion, **kwargs)
else:
value_expr = getattr(
self.target_class, self.value_attr).any(criterion, **kwargs)
# check _value_is_scalar here, otherwise
# we're scalar->scalar - call .any() so that
# the "can't call any() on a scalar" msg is raised.
if self.scalar and not self._value_is_scalar:
return self._comparator.has(
value_expr
)
else:
return self._comparator.any(
value_expr
)
def has(self, criterion=None, **kwargs):
"""Produce a proxied 'has' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._target_is_object:
return self._comparator.has(
getattr(self.target_class, self.value_attr).\
has(criterion, **kwargs)
)
else:
if criterion is not None or kwargs:
raise exc.ArgumentError(
"Non-empty has() not allowed for "
"column-targeted association proxy; use ==")
return self._comparator.has()
def contains(self, obj):
"""Produce a proxied 'contains' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
, :meth:`.RelationshipProperty.Comparator.has`,
and/or :meth:`.RelationshipProperty.Comparator.contains`
operators of the underlying proxied attributes.
"""
if self.scalar and not self._value_is_scalar:
return self._comparator.has(
getattr(self.target_class, self.value_attr).contains(obj)
)
else:
return self._comparator.any(**{self.value_attr: obj})
def __eq__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
if obj is None:
return or_(
self._comparator.has(**{self.value_attr: obj}),
self._comparator == None
)
else:
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
return self._comparator.has(
getattr(self.target_class, self.value_attr) != obj)
class _lazy_collection(object):
def __init__(self, obj, target):
self.ref = weakref.ref(obj)
self.target = target
def __call__(self):
obj = self.ref()
if obj is None:
raise exc.InvalidRequestError(
"stale association proxy, parent object has gone out of "
"scope")
return getattr(obj, self.target)
def __getstate__(self):
return {'obj': self.ref(), 'target': self.target}
def __setstate__(self, state):
self.ref = weakref.ref(state['obj'])
self.target = state['target']
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
"""Constructs an _AssociationCollection.
This will always be a subclass of either _AssociationList,
_AssociationSet, or _AssociationDict.
lazy_collection
A callable returning a list-based collection of entities (usually an
object attribute managed by a SQLAlchemy relationship())
creator
A function that creates new target entities. Given one parameter:
value. This assertion is assumed::
obj = creator(somevalue)
assert getter(obj) == somevalue
getter
A function. Given an associated object, return the 'value'.
setter
A function. Given an associated object and a value, store that
value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
col = property(lambda self: self.lazy_collection())
def __len__(self):
return len(self.col)
def __bool__(self):
return bool(self.col)
__nonzero__ = __bool__
def __getstate__(self):
return {'parent': self.parent, 'lazy_collection': self.lazy_collection}
def __setstate__(self, state):
self.parent = state['parent']
self.lazy_collection = state['lazy_collection']
self.parent._inflate(self)
class _AssociationList(_AssociationCollection):
"""Generic, converting, list-to-list proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __getitem__(self, index):
return self._get(self.col[index])
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
start = index.start or 0
rng = list(range(index.start or 0, stop, step))
if step == 1:
for i in rng:
del self[start]
i = start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value),
len(rng)))
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or
just use the underlying collection directly from its property
on the parent.
"""
for member in self.col:
yield self._get(member)
raise StopIteration
def append(self, value):
item = self._create(value)
self.col.append(item)
def count(self, value):
return sum([1 for _ in
util.itertools_filter(lambda v: v == value, iter(self))])
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
"""Not supported, use reversed(mylist)"""
raise NotImplementedError
def sort(self):
"""Not supported, use sorted(mylist)"""
raise NotImplementedError
def clear(self):
del self.col[0:len(self.col)]
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __lt__(self, other):
return list(self) < other
def __le__(self, other):
return list(self) <= other
def __gt__(self, other):
return list(self) > other
def __ge__(self, other):
return list(self) >= other
def __cmp__(self, other):
return cmp(list(self), other)
def __add__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n):
if not isinstance(n, int):
return NotImplemented
return list(self) * n
__rmul__ = __mul__
def __iadd__(self, iterable):
self.extend(iterable)
return self
def __imul__(self, n):
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
return NotImplemented
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol('_NotProvided')
class _AssociationDict(_AssociationCollection):
"""Generic, converting, dict-to-dict proxy."""
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object):
return self.getter(object)
def _set(self, object, key, value):
return self.setter(object, key, value)
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def has_key(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def __iter__(self):
return iter(self.col.keys())
def clear(self):
self.col.clear()
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __cmp__(self, other):
return cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
if util.py2k:
def iteritems(self):
return ((key, self._get(self.col[key])) for key in self.col)
def itervalues(self):
return (self._get(self.col[key]) for key in self.col)
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [self._get(member) for member in self.col.values()]
def items(self):
return [(k, self._get(self.col[k])) for k in self]
else:
def items(self):
return ((key, self._get(self.col[key])) for key in self.col)
def values(self):
return (self._get(self.col[key]) for key in self.col)
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError('update expected at most 1 arguments, got %i' %
len(a))
elif len(a) == 1:
seq_or_map = a[0]
# discern dict from sequence - took the advice from
# http://www.voidspace.org.uk/python/articles/duck_typing.shtml
# still not perfect :(
if hasattr(seq_or_map, 'keys'):
for item in seq_or_map:
self[item] = seq_or_map[item]
else:
try:
for k, v in seq_or_map:
self[k] = v
except ValueError:
raise ValueError(
"dictionary update sequence "
"requires 2-element tuples")
for key, value in kw:
self[key] = value
def copy(self):
return dict(self.items())
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(dict, func_name)):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
class _AssociationSet(_AssociationCollection):
"""Generic, converting, set-to-set proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __len__(self):
return len(self.col)
def __bool__(self):
if self.col:
return True
else:
return False
__nonzero__ = __bool__
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or just use
the underlying collection directly from its property on the parent.
"""
for member in self.col:
yield self._get(member)
raise StopIteration
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError('pop from an empty set')
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
def __ior__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.add(value)
return self
def _set(self):
return set(iter(self))
def union(self, other):
return set(self).union(other)
__or__ = union
def difference(self, other):
return set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
def __isub__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.discard(value)
return self
def intersection(self, other):
return set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __iand__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def symmetric_difference(self, other):
return set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __ixor__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def issubset(self, other):
return set(self).issubset(other)
def issuperset(self, other):
return set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return set(self)
def __eq__(self, other):
return set(self) == other
def __ne__(self, other):
return set(self) != other
def __lt__(self, other):
return set(self) < other
def __le__(self, other):
return set(self) <= other
def __gt__(self, other):
return set(self) > other
def __ge__(self, other):
return set(self) >= other
def __repr__(self):
return repr(set(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(set, func_name)):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func
| gpl-3.0 | 6,393,086,826,157,728,000 | 30.382716 | 84 | 0.581916 | false |
J861449197/edx-platform | common/lib/xmodule/xmodule/contentstore/mongo.py | 51 | 20789 | import pymongo
import gridfs
from gridfs.errors import NoFile
from xmodule.contentstore.content import XASSET_LOCATION_TAG
import logging
from .content import StaticContent, ContentStore, StaticContentStream
from xmodule.exceptions import NotFoundError
from fs.osfs import OSFS
import os
import json
from bson.son import SON
from opaque_keys.edx.keys import AssetKey
from xmodule.modulestore.django import ASSET_IGNORE_REGEX
from xmodule.util.misc import escape_invalid_characters
class MongoContentStore(ContentStore):
# pylint: disable=unused-argument
def __init__(self, host, db, port=27017, user=None, password=None, bucket='fs', collection=None, **kwargs):
"""
Establish the connection with the mongo backend and connect to the collections
:param collection: ignores but provided for consistency w/ other doc_store_config patterns
"""
logging.debug('Using MongoDB for static content serving at host={0} port={1} db={2}'.format(host, port, db))
# Remove the replicaSet parameter.
kwargs.pop('replicaSet', None)
_db = pymongo.database.Database(
pymongo.MongoClient(
host=host,
port=port,
document_class=dict,
**kwargs
),
db
)
if user is not None and password is not None:
_db.authenticate(user, password)
self.fs = gridfs.GridFS(_db, bucket)
self.fs_files = _db[bucket + ".files"] # the underlying collection GridFS uses
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.fs_files.database.connection.close()
def _drop_database(self):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
"""
self.close_connections()
self.fs_files.database.connection.drop_database(self.fs_files.database)
def save(self, content):
content_id, content_son = self.asset_db_key(content.location)
# The way to version files in gridFS is to not use the file id as the _id but just as the filename.
# Then you can upload as many versions as you like and access by date or version. Because we use
# the location as the _id, we must delete before adding (there's no replace method in gridFS)
self.delete(content_id) # delete is a noop if the entry doesn't exist; so, don't waste time checking
thumbnail_location = content.thumbnail_location.to_deprecated_list_repr() if content.thumbnail_location else None
with self.fs.new_file(_id=content_id, filename=unicode(content.location), content_type=content.content_type,
displayname=content.name, content_son=content_son,
thumbnail_location=thumbnail_location,
import_path=content.import_path,
# getattr b/c caching may mean some pickled instances don't have attr
locked=getattr(content, 'locked', False)) as fp:
if hasattr(content.data, '__iter__'):
for chunk in content.data:
fp.write(chunk)
else:
fp.write(content.data)
return content
def delete(self, location_or_id):
if isinstance(location_or_id, AssetKey):
location_or_id, _ = self.asset_db_key(location_or_id)
# Deletes of non-existent files are considered successful
self.fs.delete(location_or_id)
def find(self, location, throw_on_not_found=True, as_stream=False):
content_id, __ = self.asset_db_key(location)
try:
if as_stream:
fp = self.fs.get(content_id)
thumbnail_location = getattr(fp, 'thumbnail_location', None)
if thumbnail_location:
thumbnail_location = location.course_key.make_asset_key(
'thumbnail',
thumbnail_location[4]
)
return StaticContentStream(
location, fp.displayname, fp.content_type, fp, last_modified_at=fp.uploadDate,
thumbnail_location=thumbnail_location,
import_path=getattr(fp, 'import_path', None),
length=fp.length, locked=getattr(fp, 'locked', False)
)
else:
with self.fs.get(content_id) as fp:
thumbnail_location = getattr(fp, 'thumbnail_location', None)
if thumbnail_location:
thumbnail_location = location.course_key.make_asset_key(
'thumbnail',
thumbnail_location[4]
)
return StaticContent(
location, fp.displayname, fp.content_type, fp.read(), last_modified_at=fp.uploadDate,
thumbnail_location=thumbnail_location,
import_path=getattr(fp, 'import_path', None),
length=fp.length, locked=getattr(fp, 'locked', False)
)
except NoFile:
if throw_on_not_found:
raise NotFoundError(content_id)
else:
return None
def export(self, location, output_directory):
content = self.find(location)
filename = content.name
if content.import_path is not None:
output_directory = output_directory + '/' + os.path.dirname(content.import_path)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Escape invalid char from filename.
export_name = escape_invalid_characters(name=filename, invalid_char_list=['/', '\\'])
disk_fs = OSFS(output_directory)
with disk_fs.open(export_name, 'wb') as asset_file:
asset_file.write(content.data)
def export_all_for_course(self, course_key, output_directory, assets_policy_file):
"""
Export all of this course's assets to the output_directory. Export all of the assets'
attributes to the policy file.
Args:
course_key (CourseKey): the :class:`CourseKey` identifying the course
output_directory: the directory under which to put all the asset files
assets_policy_file: the filename for the policy file which should be in the same
directory as the other policy files.
"""
policy = {}
assets, __ = self.get_all_content_for_course(course_key)
for asset in assets:
# TODO: On 6/19/14, I had to put a try/except around this
# to export a course. The course failed on JSON files in
# the /static/ directory placed in it with an import.
#
# If this hasn't been looked at in a while, remove this comment.
#
# When debugging course exports, this might be a good place
# to look. -- pmitros
self.export(asset['asset_key'], output_directory)
for attr, value in asset.iteritems():
if attr not in ['_id', 'md5', 'uploadDate', 'length', 'chunkSize', 'asset_key']:
policy.setdefault(asset['asset_key'].name, {})[attr] = value
with open(assets_policy_file, 'w') as f:
json.dump(policy, f, sort_keys=True, indent=4)
def get_all_content_thumbnails_for_course(self, course_key):
return self._get_all_content_for_course(course_key, get_thumbnails=True)[0]
def get_all_content_for_course(self, course_key, start=0, maxresults=-1, sort=None, filter_params=None):
return self._get_all_content_for_course(
course_key, start=start, maxresults=maxresults, get_thumbnails=False, sort=sort, filter_params=filter_params
)
def remove_redundant_content_for_courses(self):
"""
Finds and removes all redundant files (Mac OS metadata files with filename ".DS_Store"
or filename starts with "._") for all courses
"""
assets_to_delete = 0
for prefix in ['_id', 'content_son']:
query = SON([
('{}.tag'.format(prefix), XASSET_LOCATION_TAG),
('{}.category'.format(prefix), 'asset'),
('{}.name'.format(prefix), {'$regex': ASSET_IGNORE_REGEX}),
])
items = self.fs_files.find(query)
assets_to_delete = assets_to_delete + items.count()
for asset in items:
self.fs.delete(asset[prefix])
self.fs_files.remove(query)
return assets_to_delete
def _get_all_content_for_course(self,
course_key,
get_thumbnails=False,
start=0,
maxresults=-1,
sort=None,
filter_params=None):
'''
Returns a list of all static assets for a course. The return format is a list of asset data dictionary elements.
The asset data dictionaries have the following keys:
asset_key (:class:`opaque_keys.edx.AssetKey`): The key of the asset
displayname: The human-readable name of the asset
uploadDate (datetime.datetime): The date and time that the file was uploadDate
contentType: The mimetype string of the asset
md5: An md5 hash of the asset content
'''
query = query_for_course(course_key, "asset" if not get_thumbnails else "thumbnail")
find_args = {"sort": sort}
if maxresults > 0:
find_args.update({
"skip": start,
"limit": maxresults,
})
if filter_params:
query.update(filter_params)
items = self.fs_files.find(query, **find_args)
count = items.count()
assets = list(items)
# We're constructing the asset key immediately after retrieval from the database so that
# callers are insulated from knowing how our identifiers are stored.
for asset in assets:
asset_id = asset.get('content_son', asset['_id'])
asset['asset_key'] = course_key.make_asset_key(asset_id['category'], asset_id['name'])
return assets, count
def set_attr(self, asset_key, attr, value=True):
"""
Add/set the given attr on the asset at the given location. Does not allow overwriting gridFS built in
attrs such as _id, md5, uploadDate, length. Value can be any type which pymongo accepts.
Returns nothing
Raises NotFoundError if no such item exists
Raises AttributeError is attr is one of the build in attrs.
:param asset_key: an AssetKey
:param attr: which attribute to set
:param value: the value to set it to (any type pymongo accepts such as datetime, number, string)
"""
self.set_attrs(asset_key, {attr: value})
def get_attr(self, location, attr, default=None):
"""
Get the value of attr set on location. If attr is unset, it returns default. Unlike set, this accessor
does allow getting the value of reserved keywords.
:param location: a c4x asset location
"""
return self.get_attrs(location).get(attr, default)
def set_attrs(self, location, attr_dict):
"""
Like set_attr but sets multiple key value pairs.
Returns nothing.
Raises NotFoundError if no such item exists
Raises AttributeError is attr_dict has any attrs which are one of the build in attrs.
:param location: a c4x asset location
"""
for attr in attr_dict.iterkeys():
if attr in ['_id', 'md5', 'uploadDate', 'length']:
raise AttributeError("{} is a protected attribute.".format(attr))
asset_db_key, __ = self.asset_db_key(location)
# catch upsert error and raise NotFoundError if asset doesn't exist
result = self.fs_files.update({'_id': asset_db_key}, {"$set": attr_dict}, upsert=False)
if not result.get('updatedExisting', True):
raise NotFoundError(asset_db_key)
def get_attrs(self, location):
"""
Gets all of the attributes associated with the given asset. Note, returns even built in attrs
such as md5 which you cannot resubmit in an update; so, don't call set_attrs with the result of this
but only with the set of attrs you want to explicitly update.
The attrs will be a superset of _id, contentType, chunkSize, filename, uploadDate, & md5
:param location: a c4x asset location
"""
asset_db_key, __ = self.asset_db_key(location)
item = self.fs_files.find_one({'_id': asset_db_key})
if item is None:
raise NotFoundError(asset_db_key)
return item
def copy_all_course_assets(self, source_course_key, dest_course_key):
"""
See :meth:`.ContentStore.copy_all_course_assets`
This implementation fairly expensively copies all of the data
"""
source_query = query_for_course(source_course_key)
# it'd be great to figure out how to do all of this on the db server and not pull the bits over
for asset in self.fs_files.find(source_query):
asset_key = self.make_id_son(asset)
# don't convert from string until fs access
source_content = self.fs.get(asset_key)
if isinstance(asset_key, basestring):
asset_key = AssetKey.from_string(asset_key)
__, asset_key = self.asset_db_key(asset_key)
asset_key['org'] = dest_course_key.org
asset_key['course'] = dest_course_key.course
if getattr(dest_course_key, 'deprecated', False): # remove the run if exists
if 'run' in asset_key:
del asset_key['run']
asset_id = asset_key
else: # add the run, since it's the last field, we're golden
asset_key['run'] = dest_course_key.run
asset_id = unicode(
dest_course_key.make_asset_key(asset_key['category'], asset_key['name']).for_branch(None)
)
self.fs.put(
source_content.read(),
_id=asset_id, filename=asset['filename'], content_type=asset['contentType'],
displayname=asset['displayname'], content_son=asset_key,
# thumbnail is not technically correct but will be functionally correct as the code
# only looks at the name which is not course relative.
thumbnail_location=asset['thumbnail_location'],
import_path=asset['import_path'],
# getattr b/c caching may mean some pickled instances don't have attr
locked=asset.get('locked', False)
)
def delete_all_course_assets(self, course_key):
"""
Delete all assets identified via this course_key. Dangerous operation which may remove assets
referenced by other runs or other courses.
:param course_key:
"""
course_query = query_for_course(course_key)
matching_assets = self.fs_files.find(course_query)
for asset in matching_assets:
asset_key = self.make_id_son(asset)
self.fs.delete(asset_key)
# codifying the original order which pymongo used for the dicts coming out of location_to_dict
# stability of order is more important than sanity of order as any changes to order make things
# unfindable
ordered_key_fields = ['category', 'name', 'course', 'tag', 'org', 'revision']
@classmethod
def asset_db_key(cls, location):
"""
Returns the database _id and son structured lookup to find the given asset location.
"""
dbkey = SON((field_name, getattr(location, field_name)) for field_name in cls.ordered_key_fields)
if getattr(location, 'deprecated', False):
content_id = dbkey
else:
# NOTE, there's no need to state that run doesn't exist in the negative case b/c access via
# SON requires equivalence (same keys and values in exact same order)
dbkey['run'] = location.run
content_id = unicode(location.for_branch(None))
return content_id, dbkey
def make_id_son(self, fs_entry):
"""
Change the _id field in fs_entry into the properly ordered SON or string
Args:
fs_entry: the element returned by self.fs_files.find
"""
_id_field = fs_entry.get('_id', fs_entry)
if isinstance(_id_field, basestring):
return _id_field
dbkey = SON((field_name, _id_field.get(field_name)) for field_name in self.ordered_key_fields)
if 'run' in _id_field:
# NOTE, there's no need to state that run doesn't exist in the negative case b/c access via
# SON requires equivalence (same keys and values in exact same order)
dbkey['run'] = _id_field['run']
fs_entry['_id'] = dbkey
return dbkey
def ensure_indexes(self):
# Index needed thru 'category' by `_get_all_content_for_course` and others. That query also takes a sort
# which can be `uploadDate`, `display_name`,
self.fs_files.create_index(
[
('_id.tag', pymongo.ASCENDING),
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('_id.category', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('uploadDate', pymongo.DESCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('_id.name', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('content_son.name', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('uploadDate', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('display_name', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('uploadDate', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('display_name', pymongo.ASCENDING)
],
sparse=True,
background=True
)
def query_for_course(course_key, category=None):
"""
Construct a SON object that will query for all assets possibly limited to the given type
(thumbnail v assets) in the course using the index in mongo_indexes.md
"""
if getattr(course_key, 'deprecated', False):
prefix = '_id'
else:
prefix = 'content_son'
dbkey = SON([
('{}.tag'.format(prefix), XASSET_LOCATION_TAG),
('{}.org'.format(prefix), course_key.org),
('{}.course'.format(prefix), course_key.course),
])
if category:
dbkey['{}.category'.format(prefix)] = category
if getattr(course_key, 'deprecated', False):
dbkey['{}.run'.format(prefix)] = {'$exists': False}
else:
dbkey['{}.run'.format(prefix)] = course_key.run
return dbkey
| agpl-3.0 | 8,510,748,736,491,291,000 | 41.168357 | 121 | 0.578094 | false |
castroflavio/ryu | ryu/contrib/tinyrpc/exc.py | 44 | 1239 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class RPCError(Exception):
"""Base class for all excetions thrown by :py:mod:`tinyrpc`."""
class BadRequestError(RPCError):
"""Base class for all errors that caused the processing of a request to
abort before a request object could be instantiated."""
def error_respond(self):
"""Create :py:class:`~tinyrpc.RPCErrorResponse` to respond the error.
:return: A error responce instance or ``None``, if the protocol decides
to drop the error silently."""
raise RuntimeError('Not implemented')
class BadReplyError(RPCError):
"""Base class for all errors that caused processing of a reply to abort
before it could be turned in a response object."""
class InvalidRequestError(BadRequestError):
"""A request made was malformed (i.e. violated the specification) and could
not be parsed."""
class InvalidReplyError(BadReplyError):
"""A reply received was malformed (i.e. violated the specification) and
could not be parsed into a response."""
class MethodNotFoundError(RPCError):
"""The desired method was not found."""
class ServerError(RPCError):
"""An internal error in the RPC system occured."""
| apache-2.0 | -4,202,040,209,265,694,000 | 29.975 | 79 | 0.697337 | false |
sundresh/organize_photos | main/exif_cache.py | 1 | 6076 | #
# organize_photos.py: (C) 2011-2014 Sameer Sundresh. No warranty.
#
# exif_cache.py is a helper for organize_photos.py
#
# It maintains a cache file exif_cache.json in the source directory that
# keeps track of which files have already been copied out of that source
# directory, including where they were copied and their size and filename.
#
# Note that this cache has only been tested in the case where there is
# just one destination directory. If you plan to use this script with the
# same SD card on multiple computers (for example), you should check to
# make sure that it actually works correctly!
#
import json, logging, os, os.path, time
_TIME_PRINT_FORMAT = '%Y-%m-%d %H:%M:%S UTC'
_TIME_PARSE_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
def format_time(timestamp):
return time.strftime(_TIME_PRINT_FORMAT, time.gmtime(timestamp))
def parse_time(time_string):
return (time.mktime(time.strptime(time_string, _TIME_PARSE_FORMAT)) - time.timezone)
def is_direct_rel_path(path):
if path[0] == '/':
return False
path = os.path.join('/', path)
return path == os.path.abspath(path)
def backup_file(file_path):
if os.path.lexists(file_path):
i = 0
while True:
i += 1
bak_path = '%s.bak%i' % (file_path, i)
if not os.path.lexists(bak_path):
os.rename(file_path, bak_path)
return bak_path
def time_close_enough(t0, t1, is_src=False):
if is_src:
return -10 <= ((t0 - t1) - round((t0 - t1) / 3600.0) * 3600) <= 10
else:
return -10 <= (t0 - t1) <= 10
class ExifCache(object):
def __init__(self, src_dir_path, dest_dir_path, autosave_interval=0):
self.src_dir_path = src_dir_path
self.dest_dir_path = dest_dir_path
self.autosave_interval = autosave_interval
self._adds_since_last_save = 0
print 'Loading EXIF cache...'
self.data = self._load()
def _load(self):
# Read the JSON EXIF cache data
exif_cache_path = os.path.join(self.src_dir_path, 'exif_cache.json')
if os.path.lexists(exif_cache_path):
assert not os.path.islink(exif_cache_path)
with open(exif_cache_path, 'r') as f:
exif_cache_data = json.load(f)
else:
exif_cache_data = { }
# Check that the EXIF cache data is well-formed,
# and parse all the time strings as timestamps.
data = { }
for entry in exif_cache_data.iteritems():
try:
(src_img_path, [dest_img_path, size, time_string]) = entry
assert is_direct_rel_path(src_img_path)
assert is_direct_rel_path(dest_img_path)
assert (type(size) == int) and (size >= 0)
timestamp = parse_time(time_string)
data[src_img_path] = (dest_img_path, size, timestamp)
except:
logging.error('Could not decode EXIF cache entry %s' % repr(entry))
return data
def save(self):
if self._adds_since_last_save == 0:
return
print 'Saving EXIF cache...'
# Check that the EXIF cache data is well-formed,
# and format all the timestamps as time string.
exif_cache_data = { }
for (src_img_path, (dest_img_path, size, timestamp)) in self.data.iteritems():
assert is_direct_rel_path(src_img_path)
assert is_direct_rel_path(dest_img_path)
assert (type(size) == int) and (size >= 0)
time_string = format_time(timestamp)
exif_cache_data[src_img_path] = (dest_img_path, size, time_string)
# Backup the old JSON EXIF cache data and write the new data
exif_cache_path = os.path.join(self.src_dir_path, 'exif_cache.json')
backup_file_path = backup_file(exif_cache_path)
with open(exif_cache_path, 'w') as f:
json.dump(exif_cache_data, f)
# Check that the data was written correctly, and if so, remove the backup
if self._load() == self.data:
if backup_file_path:
os.remove(backup_file_path)
else:
logging.error('Error saving EXIF cache')
# Should raise an exception...
self._adds_since_last_save = 0
def check(self, src_img_path):
try:
# Get cache entry
rel_src_img_path = os.path.relpath(src_img_path, self.src_dir_path)
(rel_dest_img_path, size, timestamp) = self.data.get(rel_src_img_path)
# Absolute dest_img_path
dest_img_path = os.path.join(self.dest_dir_path, rel_dest_img_path)
# Check file paths exist
assert os.path.exists(src_img_path) and os.path.exists(dest_img_path)
# Check file sizes match
assert os.path.getsize(src_img_path) == size == os.path.getsize(dest_img_path)
# Check file mtimes match
#assert time_close_enough(os.path.getmtime(src_img_path), timestamp, is_src=True)
assert time_close_enough(os.path.getmtime(dest_img_path), timestamp)
return True
except:
return False
def add(self, src_img_path, dest_img_path):
# Check file paths exist
assert os.path.exists(src_img_path) and os.path.exists(dest_img_path)
rel_src_img_path = os.path.relpath(src_img_path, self.src_dir_path)
rel_dest_img_path = os.path.relpath(dest_img_path, self.dest_dir_path)
# Check file sizes match
size = os.path.getsize(src_img_path)
assert os.path.getsize(dest_img_path) == size
# Check file mtimes match
timestamp = os.path.getmtime(src_img_path)
#assert time_close_enough(os.path.getmtime(dest_img_path), timestamp, is_src=True)
# Write to cache
self.data[rel_src_img_path] = (rel_dest_img_path, size, timestamp)
# Autosave
self._adds_since_last_save += 1
if self.autosave_interval > 0 and self._adds_since_last_save >= self.autosave_interval:
self.save()
| agpl-3.0 | 3,503,629,716,021,574,700 | 39.506667 | 95 | 0.60056 | false |
timedcy/python-goose | goose/article.py | 9 | 3085 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Article(object):
def __init__(self):
# title of the article
self.title = None
# stores the lovely, pure text from the article,
# stripped of html, formatting, etc...
# just raw text with paragraphs separated by newlines.
# This is probably what you want to use.
self.cleaned_text = u""
# meta description field in HTML source
self.meta_description = u""
# meta lang field in HTML source
self.meta_lang = u""
# meta favicon field in HTML source
self.meta_favicon = u""
# meta keywords field in the HTML source
self.meta_keywords = u""
# The canonical link of this article if found in the meta data
self.canonical_link = u""
# holds the domain of this article we're parsing
self.domain = u""
# holds the top Element we think
# is a candidate for the main body of the article
self.top_node = None
# holds the top Image object that
# we think represents this article
self.top_image = None
# holds a set of tags that may have
# been in the artcle, these are not meta keywords
self.tags = set()
# holds a list of any movies
# we found on the page like youtube, vimeo
self.movies = []
# stores the final URL that we're going to try
# and fetch content against, this would be expanded if any
self.final_url = u""
# stores the MD5 hash of the url
# to use for various identification tasks
self.link_hash = ""
# stores the RAW HTML
# straight from the network connection
self.raw_html = u""
# the lxml Document object
self.doc = None
# this is the original JSoup document that contains
# a pure object from the original HTML without any cleaning
# options done on it
self.raw_doc = None
# Sometimes useful to try and know when
# the publish date of an article was
self.publish_date = None
# A property bucket for consumers of goose to store custom data extractions.
self.additional_data = {}
| apache-2.0 | -5,399,423,943,165,036,000 | 31.135417 | 84 | 0.652188 | false |
m11s/MissionPlanner | Lib/site-packages/scipy/misc/doccer.py | 55 | 3935 | ''' Utilities to allow inserting docstring fragments for common
parameters into function and method docstrings'''
import sys
def docformat(docstring, docdict=None):
''' Fill a function docstring from variables in dictionary
Adapt the indent of the inserted docs
Parameters
----------
docstring : string
docstring from function, possibly with dict formatting strings
docdict : dict
dictionary with keys that match the dict formatting strings
and values that are docstring fragments to be inserted. The
indentation of the inserted docstrings is set to match the
minimum indentation of the ``docstring`` by adding this
indentation to all lines of the inserted string, except the
first
Returns
-------
outstring : string
string with requested ``docdict`` strings inserted
Examples
--------
>>> docformat(' Test string with %(value)s', {'value':'inserted value'})
' Test string with inserted value'
>>> docstring = 'First line\\n Second line\\n %(value)s'
>>> inserted_string = "indented\\nstring"
>>> docdict = {'value': inserted_string}
>>> docformat(docstring, docdict)
'First line\\n Second line\\n indented\\n string'
'''
if not docstring:
return docstring
if docdict is None:
docdict = {}
if not docdict:
return docstring
lines = docstring.expandtabs().splitlines()
# Find the minimum indent of the main docstring, after first line
if len(lines) < 2:
icount = 0
else:
icount = indentcount_lines(lines[1:])
indent = ' ' * icount
# Insert this indent to dictionary docstrings
indented = {}
for name, dstr in docdict.items():
lines = dstr.expandtabs().splitlines()
try:
newlines = [lines[0]]
for line in lines[1:]:
newlines.append(indent+line)
indented[name] = '\n'.join(newlines)
except IndexError:
indented[name] = dstr
return docstring % indented
def indentcount_lines(lines):
''' Minumum indent for all lines in line list
>>> lines = [' one', ' two', ' three']
>>> indentcount_lines(lines)
1
>>> lines = []
>>> indentcount_lines(lines)
0
>>> lines = [' one']
>>> indentcount_lines(lines)
1
>>> indentcount_lines([' '])
0
'''
indentno = sys.maxint
for line in lines:
stripped = line.lstrip()
if stripped:
indentno = min(indentno, len(line) - len(stripped))
if indentno == sys.maxint:
return 0
return indentno
def filldoc(docdict, unindent_params=True):
''' Return docstring decorator using docdict variable dictionary
Parameters
----------
docdict : dictionary
dictionary containing name, docstring fragment pairs
unindent_params : {False, True}, boolean, optional
If True, strip common indentation from all parameters in
docdict
Returns
-------
decfunc : function
decorator that applies dictionary to input function docstring
'''
if unindent_params:
docdict = unindent_dict(docdict)
def decorate(f):
f.__doc__ = docformat(f.__doc__, docdict)
return f
return decorate
def unindent_dict(docdict):
''' Unindent all strings in a docdict '''
can_dict = {}
for name, dstr in docdict.items():
can_dict[name] = unindent_string(dstr)
return can_dict
def unindent_string(docstring):
''' Set docstring to minimum indent for all lines, including first
>>> unindent_string(' two')
'two'
>>> unindent_string(' two\\n three')
'two\\n three'
'''
lines = docstring.expandtabs().splitlines()
icount = indentcount_lines(lines)
if icount == 0:
return docstring
return '\n'.join([line[icount:] for line in lines])
| gpl-3.0 | 1,370,763,946,041,247,700 | 28.148148 | 76 | 0.614231 | false |
vacancy/TensorArtist | tartist/nn/optimizer/grad_modifier.py | 1 | 3480 | # -*- coding:utf8 -*-
# File : grad_modifier.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 1/28/17
#
# This file is part of TensorArtist.
import tensorflow as tf
from ..tfutils import clean_name
from ...core.logger import get_logger
from ...core.utils.match import NameMatcher
logger = get_logger(__file__)
__all__ = [
'GradModifierBase', 'NameBasedGradModifierBase', 'GlobalGradModifierBase',
'LearningRateMultiplier', 'WeightDecay',
'GradClip', 'GlobalGradClip', 'GlobalGradClipByAvgNorm'
]
class GradModifierBase(object):
def __call__(self, grads_and_vars):
grads_and_vars = self._do_modification(grads_and_vars)
return grads_and_vars
def _do_modification(self, grad_and_vars):
raise NotImplementedError()
class NameBasedGradModifierBase(GradModifierBase):
def __init__(self, rules=None):
self._matcher = NameMatcher(rules)
@property
def matcher(self):
return self._matcher
def _do_modification(self, grad_and_vars):
self._matcher.begin()
res = []
for g, v in grad_and_vars:
name = clean_name(v)
rule_v = self._matcher.match(name)
if g is not None:
if rule_v is not None:
g = self._op(g, v, rule_v)
res.append((g, v))
matched, unused = self._matcher.end()
if len(matched) > 0:
log_msgs = ['\tuse {} for {} (match: {})'.format(v, k, p) for k, p, v in matched]
log_msgs.insert(0, 'Log grad modification for {}:'.format(self.__class__.__name__))
logger.info('\n'.join(log_msgs))
if len(unused) > 0:
log_msg = 'Log grad modification for {}: unused patterns are {}.'.format(self.__class__.__name__, unused)
logger.warn(log_msg)
return res
def _op(self, grad, var, rule):
raise NotImplementedError()
class GlobalGradModifierBase(GradModifierBase):
def _do_modification(self, grad_and_vars):
res = []
for g, v in grad_and_vars:
if g is not None:
g = self._op(g, v)
res.append((g, v))
return res
def _op(self, grad, var):
raise NotImplementedError()
class LearningRateMultiplier(NameBasedGradModifierBase):
def _op(self, grad, var, rule):
return grad * rule
class WeightDecay(NameBasedGradModifierBase):
def _op(self, grad, var, rule):
return grad + var * rule
class GradClip(NameBasedGradModifierBase):
def _op(self, grad, var, rule):
if type(rule) in (tuple, list):
assert len(rule) == 2, rule
lower, upper = rule
else:
rule = float(rule)
lower, upper = -rule, rule
_ = grad
_ = tf.minimum(_, upper)
_ = tf.maximum(_, lower)
return _
class GlobalGradClip(GlobalGradModifierBase):
def __init__(self, lower, upper=None):
if upper is None:
lower, upper = -lower, lower
self._lower, self._upper = lower, upper
def _op(self, grad, var):
_ = grad
_ = tf.minimum(_, self._upper)
_ = tf.maximum(_, self._lower)
return _
class GlobalGradClipByAvgNorm(GlobalGradModifierBase):
def __init__(self, clip_norm):
self._clip_norm = clip_norm
def _op(self, grad, var):
_ = grad
_ = tf.clip_by_average_norm(_, self._clip_norm)
return _
| mit | 2,537,123,683,788,178,000 | 26.619048 | 117 | 0.576724 | false |
william-richard/moto | tests/test_ec2/test_vpn_connections.py | 2 | 2787 | from __future__ import unicode_literals
import boto
import boto3
import pytest
import sure # noqa
from boto.exception import EC2ResponseError
from moto import mock_ec2, mock_ec2_deprecated
@mock_ec2_deprecated
def test_create_vpn_connections():
conn = boto.connect_vpc("the_key", "the_secret")
vpn_connection = conn.create_vpn_connection(
"ipsec.1", "vgw-0123abcd", "cgw-0123abcd"
)
vpn_connection.should_not.be.none
vpn_connection.id.should.match(r"vpn-\w+")
vpn_connection.type.should.equal("ipsec.1")
@mock_ec2_deprecated
def test_delete_vpn_connections():
conn = boto.connect_vpc("the_key", "the_secret")
vpn_connection = conn.create_vpn_connection(
"ipsec.1", "vgw-0123abcd", "cgw-0123abcd"
)
list_of_vpn_connections = conn.get_all_vpn_connections()
list_of_vpn_connections.should.have.length_of(1)
conn.delete_vpn_connection(vpn_connection.id)
list_of_vpn_connections = conn.get_all_vpn_connections()
list_of_vpn_connections.should.have.length_of(0)
@mock_ec2_deprecated
def test_delete_vpn_connections_bad_id():
conn = boto.connect_vpc("the_key", "the_secret")
with pytest.raises(EC2ResponseError):
conn.delete_vpn_connection("vpn-0123abcd")
@mock_ec2_deprecated
def test_describe_vpn_connections():
conn = boto.connect_vpc("the_key", "the_secret")
list_of_vpn_connections = conn.get_all_vpn_connections()
list_of_vpn_connections.should.have.length_of(0)
conn.create_vpn_connection("ipsec.1", "vgw-0123abcd", "cgw-0123abcd")
list_of_vpn_connections = conn.get_all_vpn_connections()
list_of_vpn_connections.should.have.length_of(1)
vpn = conn.create_vpn_connection("ipsec.1", "vgw-1234abcd", "cgw-1234abcd")
list_of_vpn_connections = conn.get_all_vpn_connections()
list_of_vpn_connections.should.have.length_of(2)
list_of_vpn_connections = conn.get_all_vpn_connections(vpn.id)
list_of_vpn_connections.should.have.length_of(1)
@mock_ec2
def test_create_vpn_connection_with_vpn_gateway():
client = boto3.client("ec2", region_name="us-east-1")
vpn_gateway = client.create_vpn_gateway(Type="ipsec.1").get("VpnGateway", {})
customer_gateway = client.create_customer_gateway(
Type="ipsec.1", PublicIp="205.251.242.54", BgpAsn=65534,
).get("CustomerGateway", {})
vpn_connection = client.create_vpn_connection(
Type="ipsec.1",
VpnGatewayId=vpn_gateway["VpnGatewayId"],
CustomerGatewayId=customer_gateway["CustomerGatewayId"],
).get("VpnConnection", {})
vpn_connection["Type"].should.equal("ipsec.1")
vpn_connection["VpnGatewayId"].should.equal(vpn_gateway["VpnGatewayId"])
vpn_connection["CustomerGatewayId"].should.equal(
customer_gateway["CustomerGatewayId"]
)
| apache-2.0 | -3,591,734,099,145,872,000 | 36.16 | 81 | 0.701112 | false |
Moriadry/tensorflow | tensorflow/contrib/distributions/python/ops/quantized_distribution.py | 81 | 17379 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distributions
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = ["QuantizedDistribution"]
def _logsum_expbig_minus_expsmall(big, small):
"""Stable evaluation of `Log[exp{big} - exp{small}]`.
To work correctly, we should have the pointwise relation: `small <= big`.
Args:
big: Floating-point `Tensor`
small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable
shape.
Returns:
`Tensor` of same `dtype` of `big` and broadcast shape.
"""
with ops.name_scope("logsum_expbig_minus_expsmall", values=[small, big]):
return math_ops.log(1. - math_ops.exp(small - big)) + big
_prob_base_note = """
For whole numbers `y`,
```
P[Y = y] := P[X <= low], if y == low,
:= P[X > high - 1], y == high,
:= 0, if j < low or y > high,
:= P[y - 1 < X <= y], all other y.
```
"""
_prob_note = _prob_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`. If the
base distribution has a `survival_function` method, results will be more
accurate for large values of `y`, and in this case the `survival_function` must
also be defined on `y - 1`.
"""
_log_prob_note = _prob_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`. If the
base distribution has a `log_survival_function` method results will be more
accurate for large values of `y`, and in this case the `log_survival_function`
must also be defined on `y - 1`.
"""
_cdf_base_note = """
For whole numbers `y`,
```
cdf(y) := P[Y <= y]
= 1, if y >= high,
= 0, if y < low,
= P[X <= y], otherwise.
```
Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.
This dictates that fractional `y` are first floored to a whole number, and
then above definition applies.
"""
_cdf_note = _cdf_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`.
"""
_log_cdf_note = _cdf_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`.
"""
_sf_base_note = """
For whole numbers `y`,
```
survival_function(y) := P[Y > y]
= 0, if y >= high,
= 1, if y < low,
= P[X <= y], otherwise.
```
Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.
This dictates that fractional `y` are first floored to a whole number, and
then above definition applies.
"""
_sf_note = _sf_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`.
"""
_log_sf_note = _sf_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`.
"""
class QuantizedDistribution(distributions.Distribution):
"""Distribution representing the quantization `Y = ceiling(X)`.
#### Definition in terms of sampling.
```
1. Draw X
2. Set Y <-- ceiling(X)
3. If Y < low, reset Y <-- low
4. If Y > high, reset Y <-- high
5. Return Y
```
#### Definition in terms of the probability mass function.
Given scalar random variable `X`, we define a discrete random variable `Y`
supported on the integers as follows:
```
P[Y = j] := P[X <= low], if j == low,
:= P[X > high - 1], j == high,
:= 0, if j < low or j > high,
:= P[j - 1 < X <= j], all other j.
```
Conceptually, without cutoffs, the quantization process partitions the real
line `R` into half open intervals, and identifies an integer `j` with the
right endpoints:
```
R = ... (-2, -1](-1, 0](0, 1](1, 2](2, 3](3, 4] ...
j = ... -1 0 1 2 3 4 ...
```
`P[Y = j]` is the mass of `X` within the `jth` interval.
If `low = 0`, and `high = 2`, then the intervals are redrawn
and `j` is re-assigned:
```
R = (-infty, 0](0, 1](1, infty)
j = 0 1 2
```
`P[Y = j]` is still the mass of `X` within the `jth` interval.
#### Caveats
Since evaluation of each `P[Y = j]` involves a cdf evaluation (rather than
a closed form function such as for a Poisson), computations such as mean and
entropy are better done with samples or approximations, and are not
implemented by this class.
"""
def __init__(self,
distribution,
low=None,
high=None,
validate_args=False,
name="QuantizedDistribution"):
"""Construct a Quantized Distribution representing `Y = ceiling(X)`.
Some properties are inherited from the distribution defining `X`. Example:
`allow_nan_stats` is determined for this `QuantizedDistribution` by reading
the `distribution`.
Args:
distribution: The base distribution class to transform. Typically an
instance of `Distribution`.
low: `Tensor` with same `dtype` as this distribution and shape
able to be added to samples. Should be a whole number. Default `None`.
If provided, base distribution's `prob` should be defined at
`low`.
high: `Tensor` with same `dtype` as this distribution and shape
able to be added to samples. Should be a whole number. Default `None`.
If provided, base distribution's `prob` should be defined at
`high - 1`.
`high` must be strictly greater than `low`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: If `dist_cls` is not a subclass of
`Distribution` or continuous.
NotImplementedError: If the base distribution does not implement `cdf`.
"""
parameters = locals()
values = (
list(distribution.parameters.values()) +
[low, high])
with ops.name_scope(name, values=values):
self._dist = distribution
if low is not None:
low = ops.convert_to_tensor(low, name="low")
if high is not None:
high = ops.convert_to_tensor(high, name="high")
check_ops.assert_same_float_dtype(
tensors=[self.distribution, low, high])
# We let QuantizedDistribution access _graph_parents since this class is
# more like a baseclass.
graph_parents = self._dist._graph_parents # pylint: disable=protected-access
checks = []
if validate_args and low is not None and high is not None:
message = "low must be strictly less than high."
checks.append(
check_ops.assert_less(
low, high, message=message))
self._validate_args = validate_args # self._check_integer uses this.
with ops.control_dependencies(checks if validate_args else []):
if low is not None:
self._low = self._check_integer(low)
graph_parents += [self._low]
else:
self._low = None
if high is not None:
self._high = self._check_integer(high)
graph_parents += [self._high]
else:
self._high = None
super(QuantizedDistribution, self).__init__(
dtype=self._dist.dtype,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=self._dist.allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
def _batch_shape_tensor(self):
return self.distribution.batch_shape_tensor()
def _batch_shape(self):
return self.distribution.batch_shape
def _event_shape_tensor(self):
return self.distribution.event_shape_tensor()
def _event_shape(self):
return self.distribution.event_shape
def _sample_n(self, n, seed=None):
low = self._low
high = self._high
with ops.name_scope("transform"):
n = ops.convert_to_tensor(n, name="n")
x_samps = self.distribution.sample(n, seed=seed)
ones = array_ops.ones_like(x_samps)
# Snap values to the intervals (j - 1, j].
result_so_far = math_ops.ceil(x_samps)
if low is not None:
result_so_far = array_ops.where(result_so_far < low,
low * ones, result_so_far)
if high is not None:
result_so_far = array_ops.where(result_so_far > high,
high * ones, result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_log_prob_note)
def _log_prob(self, y):
if not hasattr(self.distribution, "_log_cdf"):
raise NotImplementedError(
"'log_prob' not implemented unless the base distribution implements "
"'log_cdf'")
y = self._check_integer(y)
try:
return self._log_prob_with_logsf_and_logcdf(y)
except NotImplementedError:
return self._log_prob_with_logcdf(y)
def _log_prob_with_logcdf(self, y):
return _logsum_expbig_minus_expsmall(self.log_cdf(y), self.log_cdf(y - 1))
def _log_prob_with_logsf_and_logcdf(self, y):
"""Compute log_prob(y) using log survival_function and cdf together."""
# There are two options that would be equal if we had infinite precision:
# Log[ sf(y - 1) - sf(y) ]
# = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]
# Log[ cdf(y) - cdf(y - 1) ]
# = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]
logsf_y = self.log_survival_function(y)
logsf_y_minus_1 = self.log_survival_function(y - 1)
logcdf_y = self.log_cdf(y)
logcdf_y_minus_1 = self.log_cdf(y - 1)
# Important: Here we use select in a way such that no input is inf, this
# prevents the troublesome case where the output of select can be finite,
# but the output of grad(select) will be NaN.
# In either case, we are doing Log[ exp{big} - exp{small} ]
# We want to use the sf items precisely when we are on the right side of the
# median, which occurs when logsf_y < logcdf_y.
big = array_ops.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y)
small = array_ops.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1)
return _logsum_expbig_minus_expsmall(big, small)
@distribution_util.AppendDocstring(_prob_note)
def _prob(self, y):
if not hasattr(self.distribution, "_cdf"):
raise NotImplementedError(
"'prob' not implemented unless the base distribution implements "
"'cdf'")
y = self._check_integer(y)
try:
return self._prob_with_sf_and_cdf(y)
except NotImplementedError:
return self._prob_with_cdf(y)
def _prob_with_cdf(self, y):
return self.cdf(y) - self.cdf(y - 1)
def _prob_with_sf_and_cdf(self, y):
# There are two options that would be equal if we had infinite precision:
# sf(y - 1) - sf(y)
# cdf(y) - cdf(y - 1)
sf_y = self.survival_function(y)
sf_y_minus_1 = self.survival_function(y - 1)
cdf_y = self.cdf(y)
cdf_y_minus_1 = self.cdf(y - 1)
# sf_prob has greater precision iff we're on the right side of the median.
return array_ops.where(
sf_y < cdf_y, # True iff we're on the right side of the median.
sf_y_minus_1 - sf_y,
cdf_y - cdf_y_minus_1)
@distribution_util.AppendDocstring(_log_cdf_note)
def _log_cdf(self, y):
low = self._low
high = self._high
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= high,
# = 0, if y < low,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
result_so_far = self.distribution.log_cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j < low, neg_inf, result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_cdf_note)
def _cdf(self, y):
low = self._low
high = self._high
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= high,
# = 0, if y < low,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
# P[X <= j], used when low < X < high.
result_so_far = self.distribution.cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.zeros_like(result_so_far),
result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.ones_like(result_so_far),
result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_log_sf_note)
def _log_survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.log_survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.zeros_like(result_so_far),
result_so_far)
if high is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j >= high, neg_inf, result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_sf_note)
def _survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.ones_like(result_so_far),
result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
def _check_integer(self, value):
with ops.name_scope("check_integer", values=[value]):
value = ops.convert_to_tensor(value, name="value")
if not self.validate_args:
return value
dependencies = [distribution_util.assert_integer_form(
value, message="value has non-integer components.")]
return control_flow_ops.with_dependencies(dependencies, value)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._dist
| apache-2.0 | 6,139,454,608,078,837,000 | 33.143418 | 83 | 0.601358 | false |
andreparames/odoo | addons/account_bank_statement_extensions/report/bank_statement_balance_report.py | 378 | 2723 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class bank_statement_balance_report(report_sxw.rml_parse):
def set_context(self, objects, data, ids, report_type=None):
cr = self.cr
cr.execute('SELECT s.name as s_name, s.date AS s_date, j.code as j_code, s.balance_end_real as s_balance ' \
'FROM account_bank_statement s ' \
'INNER JOIN account_journal j on s.journal_id = j.id ' \
'INNER JOIN ' \
'(SELECT journal_id, max(date) as max_date FROM account_bank_statement ' \
'GROUP BY journal_id) d ' \
'ON (s.journal_id = d.journal_id AND s.date = d.max_date) ' \
'ORDER BY j.code')
lines = cr.dictfetchall()
self.localcontext.update( {
'lines': lines,
})
super(bank_statement_balance_report, self).set_context(objects, data, ids, report_type=report_type)
def __init__(self, cr, uid, name, context):
if context is None:
context = {}
super(bank_statement_balance_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
})
self.context = context
class report_bankstatementbalance(osv.AbstractModel):
_name = 'report.account_bank_statement_extensions.report_bankstatementbalance'
_inherit = 'report.abstract_report'
_template = 'account_bank_statement_extensions.report_bankstatementbalance'
_wrapped_report_class = bank_statement_balance_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,009,245,642,738,804,000 | 41.546875 | 116 | 0.598972 | false |
softak/webfaction_demo | vendor-local/lib/python/django_any/contrib/auth.py | 3 | 1115 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User, Permission, Group
from django_any import any_model
def any_user(password=None, permissions=[], groups=[], **kwargs):
"""
Shortcut for creating Users
Permissions could be a list of permission names
If not specified, creates active, non superuser
and non staff user
"""
is_active = kwargs.pop('is_active', True)
is_superuser = kwargs.pop('is_superuser', False)
is_staff = kwargs.pop('is_staff', False)
user = any_model(User, is_active = is_active, is_superuser = is_superuser,
is_staff = is_staff, **kwargs)
for group_name in groups :
group = Group.objects.get(name=group_name)
user.groups.add(group)
for permission_name in permissions:
app_label, codename = permission_name.split('.')
permission = Permission.objects.get(
content_type__app_label=app_label,
codename=codename)
user.user_permissions.add(permission)
if password:
user.set_password(password)
user.save()
return user
| bsd-3-clause | -8,831,076,806,846,703,000 | 28.342105 | 78 | 0.637668 | false |
isandlaTech/cohorte-runtime | python/src/lib/python/jinja2/ext.py | 603 | 25078 | # -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.environment import Environment
from jinja2.runtime import concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup
from jinja2._compat import next, with_metaclass, string_types, iteritems
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(with_metaclass(ExtensionRegistry, object)):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, string_types):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
plural_expr_assignment = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name('_trans', 'load')
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name('_trans', 'store'), var)
else:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(['with'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endwith',),
drop_needle=True))
return node
class AutoEscapeExtension(Extension):
"""Changes auto escape rules for a scope."""
tags = set(['autoescape'])
def parse(self, parser):
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
node.options = [
nodes.Keyword('autoescape', parser.parse_expression())
]
node.body = parser.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
return options.get(key, str(default)).lower() in \
('1', 'on', 'yes', 'true')
silent = getbool(options, 'silent', True)
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
| apache-2.0 | 5,159,654,526,291,423,000 | 38.430818 | 81 | 0.606707 | false |
N-Parsons/exercism-python | exercises/rectangles/example.py | 2 | 2901 | import itertools
class corners(object):
def __init__(self, i, j):
# i, j are position of corner
self.i = i
self.j = j
def __str__(self):
return "[" + str(self.i) + ", " + str(self.j) + "]"
# return corner on the same line
def same_line(index, list):
for c in list:
if c.i == index:
return c
# return corner on the same column
def same_col(index, list):
for c in list:
if c.j == index:
return c
def search_corners(input):
corner_list = []
for i in range(0, len(input)):
for j in range(0, len(input[i])):
if (input[i][j] == "+"):
corner_list.append(corners(i, j))
return corner_list
# validate that 4 points form a
# rectangle by comparing distance to
# centroid of the rectangle for all corners
def possible_rect(quartet):
mid_x = 0
mid_y = 0
# centroid
for c in quartet:
mid_x = mid_x + c.i / 4.0
mid_y = mid_y + c.j / 4.0
# reference distance using first corner
dx = abs(quartet[0].i - mid_x)
dy = abs(quartet[0].j - mid_y)
# Check all the same distance from centroid are equals
for i in range(1, len(quartet)):
if abs(quartet[i].i - mid_x) != dx or abs(quartet[i].j - mid_y) != dy:
return False
return True
# validate path between two corners
def path(c1, c2, input):
if c1.i == c2.i:
for j in range(min(c1.j + 1, c2.j + 1), max(c1.j, c2.j)):
if input[c1.i][j] != "-" and input[c1.i][j] != "+":
return False
return True
elif c1.j == c2.j:
for i in range(min(c1.i + 1, c2.i + 1), max(c1.i, c2.i)):
if input[i][c1.j] != "|" and input[i][c1.j] != "+":
return False
return True
# validate path of rectangle
def validate_rect(rect, input):
# validate connection at every corner
# with neighbours on the same line and col
for i in range(0, len(rect)):
line = same_line(rect[i].i, rect[0:i] + rect[i + 1:])
column = same_col(rect[i].j, rect[0:i] + rect[i + 1:])
if not path(rect[i], line, input) or not path(rect[i], column, input):
return False
return True
# count number of rectangles
# inside ASCII in input lines
def count(lines=""):
nb_rect = 0
# test empty str
if lines == "":
return nb_rect
corners = search_corners(lines)
# no corners in str
if len(corners) == 0:
return nb_rect
# now let the magic begins
# all combinations of 4 corners (python ftw)
q = list(itertools.combinations(corners, r=4))
rectangles = []
for el in q:
if (possible_rect(el)):
rectangles.append(el)
# validate path in found rectangles
for rect in rectangles:
if (validate_rect(rect, lines)):
nb_rect = nb_rect + 1
return nb_rect
| mit | 7,508,009,135,986,596,000 | 25.135135 | 78 | 0.554636 | false |
sippy/b2bua | sippy/UasStateTrying.py | 1 | 7753 | # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from sippy.UaStateGeneric import UaStateGeneric
from sippy.CCEvents import CCEventRing, CCEventConnect, CCEventFail, CCEventRedirect, \
CCEventDisconnect, CCEventPreConnect
from sippy.SipContact import SipContact
from sippy.SipAddress import SipAddress
from sippy.Time.Timeout import TimeoutAbsMono
class UasStateTrying(UaStateGeneric):
sname = 'Trying(UAS)'
def recvEvent(self, event):
if isinstance(event, CCEventRing):
scode = event.getData()
if scode == None:
code, reason, body = (180, 'Ringing', None)
else:
code, reason, body = scode
if code == 100:
return None
if body != None and self.ua.on_local_sdp_change != None and body.needs_update:
self.ua.on_local_sdp_change(body, lambda x: self.ua.recvEvent(event))
return None
self.ua.lSDP = body
self.ua.sendUasResponse(code, reason, body)
if self.ua.no_progress_timer != None:
self.ua.no_progress_timer.cancel()
self.ua.no_progress_timer = None
if self.ua.expire_time != None:
self.ua.expire_timer = TimeoutAbsMono(self.ua.expires, self.ua.expire_mtime)
if self.ua.p1xx_ts == None:
self.ua.p1xx_ts = event.rtime
return (UasStateRinging, self.ua.ring_cbs, event.rtime, event.origin, code)
elif isinstance(event, CCEventConnect) or isinstance(event, CCEventPreConnect):
code, reason, body = event.getData()
if body != None and self.ua.on_local_sdp_change != None and body.needs_update:
self.ua.on_local_sdp_change(body, lambda x: self.ua.recvEvent(event))
return None
if event.extra_headers != None:
extra_headers = tuple(event.extra_headers)
else:
extra_headers = None
self.ua.lSDP = body
if self.ua.no_progress_timer != None:
self.ua.no_progress_timer.cancel()
self.ua.no_progress_timer = None
if isinstance(event, CCEventConnect):
self.ua.sendUasResponse(code, reason, body, (self.ua.lContact,), ack_wait = False, \
extra_headers = extra_headers)
if self.ua.expire_timer != None:
self.ua.expire_timer.cancel()
self.ua.expire_timer = None
self.ua.startCreditTimer(event.rtime)
self.ua.connect_ts = event.rtime
return (UaStateConnected, self.ua.conn_cbs, event.rtime, event.origin)
else:
self.ua.sendUasResponse(code, reason, body, (self.ua.lContact,), ack_wait = True, \
extra_headers = extra_headers)
return (UaStateConnected,)
elif isinstance(event, CCEventRedirect):
scode = event.getData()
contacts = None
if scode == None:
scode = (500, 'Failed', None, None)
elif scode[3] != None:
contacts = tuple(SipContact(address = x) for x in scode[3])
self.ua.sendUasResponse(scode[0], scode[1], scode[2], contacts)
if self.ua.expire_timer != None:
self.ua.expire_timer.cancel()
self.ua.expire_timer = None
if self.ua.no_progress_timer != None:
self.ua.no_progress_timer.cancel()
self.ua.no_progress_timer = None
self.ua.disconnect_ts = event.rtime
return (UaStateFailed, self.ua.fail_cbs, event.rtime, event.origin, scode[0])
elif isinstance(event, CCEventFail):
scode = event.getData()
if scode == None:
scode = (500, 'Failed')
extra_headers = []
if event.extra_headers != None:
extra_headers.extend(event.extra_headers)
if event.challenge != None:
extra_headers.append(event.challenge)
if len(extra_headers) == 0:
extra_headers = None
else:
extra_headers = tuple(extra_headers)
self.ua.sendUasResponse(scode[0], scode[1], reason_rfc3326 = event.reason, \
extra_headers = extra_headers)
if self.ua.expire_timer != None:
self.ua.expire_timer.cancel()
self.ua.expire_timer = None
if self.ua.no_progress_timer != None:
self.ua.no_progress_timer.cancel()
self.ua.no_progress_timer = None
self.ua.disconnect_ts = event.rtime
return (UaStateFailed, self.ua.fail_cbs, event.rtime, event.origin, scode[0])
elif isinstance(event, CCEventDisconnect):
#import sys, traceback
#traceback.print_stack(file = sys.stdout)
self.ua.sendUasResponse(500, 'Disconnected', reason_rfc3326 = event.reason)
if self.ua.expire_timer != None:
self.ua.expire_timer.cancel()
self.ua.expire_timer = None
if self.ua.no_progress_timer != None:
self.ua.no_progress_timer.cancel()
self.ua.no_progress_timer = None
self.ua.disconnect_ts = event.rtime
return (UaStateDisconnected, self.ua.disc_cbs, event.rtime, event.origin, self.ua.last_scode)
#print 'wrong event %s in the Trying state' % event
return None
def cancel(self, rtime, req):
self.ua.disconnect_ts = rtime
self.ua.changeState((UaStateDisconnected, self.ua.disc_cbs, rtime, self.ua.origin))
event = CCEventDisconnect(rtime = rtime, origin = self.ua.origin)
if req != None:
try:
event.reason = req.getHFBody('reason')
except:
pass
self.ua.emitEvent(event)
if not 'UasStateRinging' in globals():
from sippy.UasStateRinging import UasStateRinging
if not 'UaStateFailed' in globals():
from sippy.UaStateFailed import UaStateFailed
if not 'UaStateConnected' in globals():
from sippy.UaStateConnected import UaStateConnected
if not 'UaStateDisconnected' in globals():
from sippy.UaStateDisconnected import UaStateDisconnected
| bsd-2-clause | -432,909,464,323,048,770 | 48.382166 | 105 | 0.617309 | false |
whr20120503/wy | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/mac_tool.py | 377 | 19309 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _CopyStringsFile(self, source, dest):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile('[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line):
print >>sys.stderr, line
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return dict((k, self._ExpandVariables(data[k],
substitutions)) for k in data)
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit | -8,215,567,836,340,460,000 | 36.786693 | 80 | 0.673054 | false |
jun66j5/trac-ja | trac/tests/perm.py | 1 | 14135 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from trac import perm
from trac.core import *
from trac.resource import Resource
from trac.test import EnvironmentStub
class DefaultPermissionStoreTestCase(unittest.TestCase):
def setUp(self):
self.env = \
EnvironmentStub(enable=[perm.DefaultPermissionStore,
perm.DefaultPermissionGroupProvider])
self.store = perm.DefaultPermissionStore(self.env)
def tearDown(self):
self.env.reset_db()
def test_simple_actions(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('john', 'WIKI_MODIFY'),
('john', 'REPORT_ADMIN'),
('kate', 'TICKET_CREATE')])
self.assertEqual(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
self.assertEqual(['TICKET_CREATE'],
self.store.get_user_permissions('kate'))
def test_simple_group(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('john', 'dev')])
self.assertEqual(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
def test_nested_groups(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('admin', 'dev'),
('john', 'admin')])
self.assertEqual(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
def test_mixed_case_group(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('Dev', 'WIKI_MODIFY'),
('Dev', 'REPORT_ADMIN'),
('Admin', 'Dev'),
('john', 'Admin')])
self.assertEqual(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
def test_builtin_groups(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('authenticated', 'WIKI_MODIFY'),
('authenticated', 'REPORT_ADMIN'),
('anonymous', 'TICKET_CREATE')])
self.assertEqual(['REPORT_ADMIN', 'TICKET_CREATE', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
self.assertEqual(['TICKET_CREATE'],
self.store.get_user_permissions('anonymous'))
def test_get_all_permissions(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('john', 'dev')])
expected = [('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('john', 'dev')]
for res in self.store.get_all_permissions():
self.assertFalse(res not in expected)
class TestPermissionRequestor(Component):
implements(perm.IPermissionRequestor)
def get_permission_actions(self):
return ['TEST_CREATE', 'TEST_DELETE', 'TEST_MODIFY',
('TEST_CREATE', []),
('TEST_ADMIN', ['TEST_CREATE', 'TEST_DELETE']),
('TEST_ADMIN', ['TEST_MODIFY'])]
class PermissionErrorTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
def test_default_message(self):
permission_error = perm.PermissionError()
self.assertEqual(None, permission_error.action)
self.assertEqual(None, permission_error.resource)
self.assertEqual(None, permission_error.env)
self.assertEqual("Insufficient privileges to perform this operation.",
unicode(permission_error))
self.assertEqual("Forbidden", permission_error.title)
self.assertEqual(unicode(permission_error), permission_error.msg)
def test_message_specified(self):
message = "The message."
permission_error = perm.PermissionError(msg=message)
self.assertEqual(message, unicode(permission_error))
def test_message_from_action(self):
action = 'WIKI_VIEW'
permission_error = perm.PermissionError(action)
self.assertEqual(action, permission_error.action)
self.assertEqual(None, permission_error.resource)
self.assertEqual(None, permission_error.env)
self.assertEqual("WIKI_VIEW privileges are required to perform this "
"operation. You don't have the required "
"permissions.", unicode(permission_error))
def test_message_from_action_and_resource(self):
action = 'WIKI_VIEW'
resource = Resource('wiki', 'WikiStart')
permission_error = perm.PermissionError(action, resource, self.env)
self.assertEqual(action, permission_error.action)
self.assertEqual(resource, permission_error.resource)
self.assertEqual(self.env, permission_error.env)
self.assertEqual("WIKI_VIEW privileges are required to perform this "
"operation on WikiStart. You don't have the "
"required permissions.", unicode(permission_error))
class PermissionSystemTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.PermissionSystem,
perm.DefaultPermissionStore,
TestPermissionRequestor])
self.perm = perm.PermissionSystem(self.env)
def tearDown(self):
self.env.reset_db()
def test_all_permissions(self):
self.assertEqual({'EMAIL_VIEW': True, 'TRAC_ADMIN': True,
'TEST_CREATE': True, 'TEST_DELETE': True,
'TEST_MODIFY': True, 'TEST_ADMIN': True},
self.perm.get_user_permissions())
def test_simple_permissions(self):
self.perm.grant_permission('bob', 'TEST_CREATE')
self.perm.grant_permission('jane', 'TEST_DELETE')
self.perm.grant_permission('jane', 'TEST_MODIFY')
self.assertEqual({'TEST_CREATE': True},
self.perm.get_user_permissions('bob'))
self.assertEqual({'TEST_DELETE': True, 'TEST_MODIFY': True},
self.perm.get_user_permissions('jane'))
def test_meta_permissions(self):
self.perm.grant_permission('bob', 'TEST_CREATE')
self.perm.grant_permission('jane', 'TEST_ADMIN')
self.assertEqual({'TEST_CREATE': True},
self.perm.get_user_permissions('bob'))
self.assertEqual({'TEST_CREATE': True, 'TEST_DELETE': True,
'TEST_MODIFY': True, 'TEST_ADMIN': True},
self.perm.get_user_permissions('jane'))
def test_get_all_permissions(self):
self.perm.grant_permission('bob', 'TEST_CREATE')
self.perm.grant_permission('jane', 'TEST_ADMIN')
expected = [('bob', 'TEST_CREATE'),
('jane', 'TEST_ADMIN')]
for res in self.perm.get_all_permissions():
self.assertFalse(res not in expected)
def test_expand_actions_iter_7467(self):
# Check that expand_actions works with iterators (#7467)
perms = set(['EMAIL_VIEW', 'TRAC_ADMIN', 'TEST_DELETE', 'TEST_MODIFY',
'TEST_CREATE', 'TEST_ADMIN'])
self.assertEqual(perms, self.perm.expand_actions(['TRAC_ADMIN']))
self.assertEqual(perms, self.perm.expand_actions(iter(['TRAC_ADMIN'])))
class PermissionCacheTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.DefaultPermissionStore,
perm.DefaultPermissionPolicy,
TestPermissionRequestor])
self.env.config.set('trac', 'permission_policies',
'DefaultPermissionPolicy')
self.perm_system = perm.PermissionSystem(self.env)
# by-pass DefaultPermissionPolicy cache:
perm.DefaultPermissionPolicy.CACHE_EXPIRY = -1
self.perm_system.grant_permission('testuser', 'TEST_MODIFY')
self.perm_system.grant_permission('testuser', 'TEST_ADMIN')
self.perm = perm.PermissionCache(self.env, 'testuser')
def tearDown(self):
self.env.reset_db()
def test_contains(self):
self.assertTrue('TEST_MODIFY' in self.perm)
self.assertTrue('TEST_ADMIN' in self.perm)
self.assertFalse('TRAC_ADMIN' in self.perm)
def test_has_permission(self):
self.assertTrue(self.perm.has_permission('TEST_MODIFY'))
self.assertTrue(self.perm.has_permission('TEST_ADMIN'))
self.assertFalse(self.perm.has_permission('TRAC_ADMIN'))
def test_require(self):
self.perm.require('TEST_MODIFY')
self.perm.require('TEST_ADMIN')
self.assertRaises(perm.PermissionError,
self.perm.require, 'TRAC_ADMIN')
def test_assert_permission(self):
self.perm.assert_permission('TEST_MODIFY')
self.perm.assert_permission('TEST_ADMIN')
self.assertRaises(perm.PermissionError,
self.perm.assert_permission, 'TRAC_ADMIN')
def test_cache(self):
self.perm.assert_permission('TEST_MODIFY')
self.perm.assert_permission('TEST_ADMIN')
self.perm_system.revoke_permission('testuser', 'TEST_ADMIN')
# Using cached GRANT here
self.perm.assert_permission('TEST_ADMIN')
def test_cache_shared(self):
# we need to start with an empty cache here (#7201)
perm1 = perm.PermissionCache(self.env, 'testcache')
perm1 = perm1('ticket', 1)
perm2 = perm1('ticket', 1) # share internal cache
self.perm_system.grant_permission('testcache', 'TEST_ADMIN')
perm1.assert_permission('TEST_ADMIN')
self.perm_system.revoke_permission('testcache', 'TEST_ADMIN')
# Using cached GRANT here (from shared cache)
perm2.assert_permission('TEST_ADMIN')
class TestPermissionPolicy(Component):
implements(perm.IPermissionPolicy)
def __init__(self):
self.allowed = {}
self.results = {}
def grant(self, username, permissions):
self.allowed.setdefault(username, set()).update(permissions)
def revoke(self, username, permissions):
self.allowed.setdefault(username, set()).difference_update(permissions)
def check_permission(self, action, username, resource, perm):
result = action in self.allowed.get(username, set()) or None
self.results[(username, action)] = result
return result
class PermissionPolicyTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.DefaultPermissionStore,
perm.DefaultPermissionPolicy,
TestPermissionPolicy,
TestPermissionRequestor])
self.env.config.set('trac', 'permission_policies',
'TestPermissionPolicy')
self.policy = TestPermissionPolicy(self.env)
self.perm = perm.PermissionCache(self.env, 'testuser')
def tearDown(self):
self.env.reset_db()
def test_no_permissions(self):
self.assertRaises(perm.PermissionError,
self.perm.assert_permission, 'TEST_MODIFY')
self.assertRaises(perm.PermissionError,
self.perm.assert_permission, 'TEST_ADMIN')
self.assertEqual(self.policy.results,
{('testuser', 'TEST_MODIFY'): None,
('testuser', 'TEST_ADMIN'): None})
def test_grant_revoke_permissions(self):
self.policy.grant('testuser', ['TEST_MODIFY', 'TEST_ADMIN'])
self.assertEqual('TEST_MODIFY' in self.perm, True)
self.assertEqual('TEST_ADMIN' in self.perm, True)
self.assertEqual(self.policy.results,
{('testuser', 'TEST_MODIFY'): True,
('testuser', 'TEST_ADMIN'): True})
def test_policy_chaining(self):
self.env.config.set('trac', 'permission_policies',
'TestPermissionPolicy,DefaultPermissionPolicy')
self.policy.grant('testuser', ['TEST_MODIFY'])
system = perm.PermissionSystem(self.env)
system.grant_permission('testuser', 'TEST_ADMIN')
self.assertEqual(list(system.policies),
[self.policy,
perm.DefaultPermissionPolicy(self.env)])
self.assertEqual('TEST_MODIFY' in self.perm, True)
self.assertEqual('TEST_ADMIN' in self.perm, True)
self.assertEqual(self.policy.results,
{('testuser', 'TEST_MODIFY'): True,
('testuser', 'TEST_ADMIN'): None})
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DefaultPermissionStoreTestCase))
suite.addTest(unittest.makeSuite(PermissionErrorTestCase))
suite.addTest(unittest.makeSuite(PermissionSystemTestCase))
suite.addTest(unittest.makeSuite(PermissionCacheTestCase))
suite.addTest(unittest.makeSuite(PermissionPolicyTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause | -2,807,932,945,174,019,600 | 40.819527 | 79 | 0.600637 | false |
nguy/PyDisdrometer | pydsd/utility/expfit.py | 3 | 2022 | import numpy as np
from scipy.optimize import curve_fit
def expfit(x, y):
"""
expfit calculates an exponential power law fit based upon levenburg-marquardt minimization. Fits
are of the form. y = ax**b
Parameters:
-----------
x: array_like
independent variable
y: array_like
dependent variable
Returns:
--------
popt : tuple
Scale and exponential parameters a & b
pcov: tuple
Covariance of the fit
Notes:
------
There are some stability issues if bad data is passed into it.
"""
x_array = np.array(x)
y_array = np.array(y)
x_finite_index = np.isfinite(x_array)
y_finite_index = np.isfinite(y_array)
mask = np.logical_and(x_finite_index, y_finite_index)
expfunc = lambda x, a, b: a * np.power(x, b)
popt, pcov = curve_fit(expfunc, x_array[mask], y_array[mask])
return popt, pcov
def expfit2(x, y):
"""
expfit2 calculates an exponential power law fit based upon levenburg-marquardt minimization. Fits
are of the form. y = a(x[0]**b)(x[1]**c)
Parameters:
-----------
x: array_like
independent variables packed. x[0] is first independent variable tuple, x[1] the second.
y: array_like
dependent variable
Returns:
--------
popt : tuple
Scale and exponential parameters a & b
pcov: tuple
Covariance of the fit
Notes:
------
There are some stability issues if bad data is passed into it.
"""
x1_array = np.array(x[0])
x2_array = np.array(x[1])
y_array = np.array(y)
x1_finite_index = np.isfinite(x1_array)
x2_finite_index = np.isfinite(x2_array)
y_finite_index = np.isfinite(y_array)
mask = np.logical_and(
x2_finite_index, np.logical_and(x1_finite_index, y_finite_index)
)
expfunc = lambda x, a, b, c: a * np.power(x[0], b) * np.power(x[1], c)
popt, pcov = curve_fit(expfunc, [x1_array[mask], x2_array[mask]], y_array[mask])
return popt, pcov
| lgpl-2.1 | -2,990,485,505,601,125,000 | 24.594937 | 101 | 0.606825 | false |
ryepdx/shipping_api | report/summary_report.py | 1 | 2179 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class summary_report_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(summary_report_print, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_total': self.get_total,
'get_items': self.get_items,
})
def get_total(self):
ship_ids = self.pool.get('shipping.move').search(self.cr, self.uid, [('state', '=', 'ready_pick')])
return str(len(ship_ids))
def get_items(self):
ret = {}
ship_ids = self.pool.get('shipping.move').search(self.cr, self.uid, [('state', '=', 'ready_pick')])
if ship_ids:
for ship_id in self.pool.get('shipping.move').browse(self.cr, self.uid, ship_ids):
key = ship_id.service and ship_id.service.description or ''
ret[key] = 1 + ret.get(key, 0)
return ret.items()
report_sxw.report_sxw(
'report.summary_report_print',
'shipping.move',
'addons/shipping_api/report/summary_report.rml',
parser=summary_report_print
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 | -4,062,300,994,077,641,700 | 39.37037 | 107 | 0.598899 | false |
eric-stanley/RetroArch | gfx/glsym/glgen.py | 4 | 4663 | #!/usr/bin/env python3
"""
License statement applies to this file (glgen.py) only.
"""
"""
Permission is hereby granted, free of charge,
to any person obtaining a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import os
import re
banned_ext = [ 'AMD', 'APPLE', 'EXT', 'NV', 'NVX', 'ATI', '3DLABS', 'SUN', 'SGI', 'SGIX', 'SGIS', 'INTEL', '3DFX', 'IBM', 'MESA', 'GREMEDY', 'OML', 'PGI', 'I3D', 'INGL', 'MTX', 'QCOM', 'IMG', 'ANGLE', 'SUNX', 'INGR' ]
def noext(sym):
for ext in banned_ext:
if sym.endswith(ext):
return False
return True
def find_gl_symbols(lines):
typedefs = []
syms = []
for line in lines:
m = re.search(r'^typedef.+PFN(\S+)PROC.+$', line)
g = re.search(r'^.+(gl\S+)\W*\(.+\).*$', line)
if m and noext(m.group(1)):
typedefs.append(m.group(0).replace('PFN', 'RGLSYM').replace('GLDEBUGPROC', 'RGLGENGLDEBUGPROC'))
if g and noext(g.group(1)):
syms.append(g.group(1))
return (typedefs, syms)
def generate_defines(gl_syms):
res = []
for line in gl_syms:
res.append('#define {} __rglgen_{}'.format(line, line))
return res
def generate_declarations(gl_syms):
return ['RGLSYM' + x.upper() + 'PROC ' + '__rglgen_' + x + ';' for x in gl_syms]
def generate_macros(gl_syms):
return [' SYM(' + x.replace('gl', '') + '),' for x in gl_syms]
def dump(f, lines):
f.write('\n'.join(lines))
f.write('\n\n')
if __name__ == '__main__':
if len(sys.argv) > 4:
for banned in sys.argv[4:]:
banned_ext.append(banned)
with open(sys.argv[1], 'r') as f:
lines = f.readlines()
typedefs, syms = find_gl_symbols(lines)
overrides = generate_defines(syms)
declarations = generate_declarations(syms)
externs = ['extern ' + x for x in declarations]
macros = generate_macros(syms)
with open(sys.argv[2], 'w') as f:
f.write('#ifndef RGLGEN_DECL_H__\n')
f.write('#define RGLGEN_DECL_H__\n')
f.write('#ifdef __cplusplus\n')
f.write('extern "C" {\n')
f.write('#endif\n')
f.write('#ifdef GL_APIENTRY\n')
f.write('typedef void (GL_APIENTRY *RGLGENGLDEBUGPROC)(GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar*, GLvoid*);\n')
f.write('#else\n')
f.write('#ifndef APIENTRY\n')
f.write('#define APIENTRY\n')
f.write('#endif\n')
f.write('#ifndef APIENTRYP\n')
f.write('#define APIENTRYP APIENTRY *\n')
f.write('#endif\n')
f.write('typedef void (APIENTRY *RGLGENGLDEBUGPROCARB)(GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar*, GLvoid*);\n')
f.write('typedef void (APIENTRY *RGLGENGLDEBUGPROC)(GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar*, GLvoid*);\n')
f.write('#endif\n')
f.write('#ifndef GL_OES_EGL_image\n')
f.write('typedef void *GLeglImageOES;\n')
f.write('#endif\n')
f.write('#if !defined(GL_OES_fixed_point) && !defined(HAVE_OPENGLES2)\n')
f.write('typedef GLint GLfixed;\n')
f.write('#endif\n')
dump(f, typedefs)
dump(f, overrides)
dump(f, externs)
f.write('struct rglgen_sym_map { const char *sym; void *ptr; };\n')
f.write('extern const struct rglgen_sym_map rglgen_symbol_map[];\n')
f.write('#ifdef __cplusplus\n')
f.write('}\n')
f.write('#endif\n')
f.write('#endif\n')
with open(sys.argv[3], 'w') as f:
f.write('#include "glsym.h"\n')
f.write('#include <stddef.h>\n')
f.write('#define SYM(x) { "gl" #x, &(gl##x) }\n')
f.write('const struct rglgen_sym_map rglgen_symbol_map[] = {\n')
dump(f, macros)
f.write(' { NULL, NULL },\n')
f.write('};\n')
dump(f, declarations)
| gpl-3.0 | 6,069,777,845,376,129,000 | 34.59542 | 217 | 0.62063 | false |
mdrumond/tensorflow | tensorflow/python/profiler/tfprof_logger_test.py | 52 | 2982 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TFProfLoggerTest(test.TestCase):
def _BuildSmallPlaceholderlModel(self):
a = array_ops.placeholder(dtypes.int32, [2, 2])
b = array_ops.placeholder(dtypes.int32, [2, 2])
y = math_ops.matmul(a, b)
return a, b, y
def _BuildSmallModel(self):
a = constant_op.constant([[1, 2], [3, 4]])
b = constant_op.constant([[1, 2], [3, 4]])
return math_ops.matmul(a, b)
# pylint: disable=pointless-string-statement
"""# TODO(xpan): This this out of core so it doesn't depend on contrib.
def testFillMissingShape(self):
a, b, y = self._BuildSmallPlaceholderlModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y,
options=run_options,
run_metadata=run_metadata,
feed_dict={a: [[1, 2], [2, 3]],
b: [[1, 2], [2, 3]]})
graph2 = ops.Graph()
# Use copy_op_to_graph to remove shape information.
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEquals('<unknown>', str(y2.get_shape()))
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEquals('(2, 2)', str(y2.get_shape()))
def testFailedFillMissingShape(self):
y = self._BuildSmallModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y, options=run_options, run_metadata=run_metadata)
graph2 = ops.Graph()
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEquals('<unknown>', str(y2.get_shape()))
# run_metadata has special name for MatMul, hence failed to fill shape.
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEquals('<unknown>', str(y2.get_shape()))
"""
if __name__ == '__main__':
test.main()
| apache-2.0 | 5,156,236,285,362,299,000 | 36.275 | 80 | 0.666331 | false |
urandu/rethinkdb | external/v8_3.30.33.16/tools/stats-viewer.py | 143 | 15033 | #!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A cross-platform execution counter viewer.
The stats viewer reads counters from a binary file and displays them
in a window, re-reading and re-displaying with regular intervals.
"""
import mmap
import optparse
import os
import re
import struct
import sys
import time
import Tkinter
# The interval, in milliseconds, between ui updates
UPDATE_INTERVAL_MS = 100
# Mapping from counter prefix to the formatting to be used for the counter
COUNTER_LABELS = {"t": "%i ms.", "c": "%i"}
# The magic numbers used to check if a file is not a counters file
COUNTERS_FILE_MAGIC_NUMBER = 0xDEADFACE
CHROME_COUNTERS_FILE_MAGIC_NUMBER = 0x13131313
class StatsViewer(object):
"""The main class that keeps the data used by the stats viewer."""
def __init__(self, data_name, name_filter):
"""Creates a new instance.
Args:
data_name: the name of the file containing the counters.
name_filter: The regexp filter to apply to counter names.
"""
self.data_name = data_name
self.name_filter = name_filter
# The handle created by mmap.mmap to the counters file. We need
# this to clean it up on exit.
self.shared_mmap = None
# A mapping from counter names to the ui element that displays
# them
self.ui_counters = {}
# The counter collection used to access the counters file
self.data = None
# The Tkinter root window object
self.root = None
def Run(self):
"""The main entry-point to running the stats viewer."""
try:
self.data = self.MountSharedData()
# OpenWindow blocks until the main window is closed
self.OpenWindow()
finally:
self.CleanUp()
def MountSharedData(self):
"""Mount the binary counters file as a memory-mapped file. If
something goes wrong print an informative message and exit the
program."""
if not os.path.exists(self.data_name):
maps_name = "/proc/%s/maps" % self.data_name
if not os.path.exists(maps_name):
print "\"%s\" is neither a counter file nor a PID." % self.data_name
sys.exit(1)
maps_file = open(maps_name, "r")
try:
self.data_name = None
for m in re.finditer(r"/dev/shm/\S*", maps_file.read()):
if os.path.exists(m.group(0)):
self.data_name = m.group(0)
break
if self.data_name is None:
print "Can't find counter file in maps for PID %s." % self.data_name
sys.exit(1)
finally:
maps_file.close()
data_file = open(self.data_name, "r")
size = os.fstat(data_file.fileno()).st_size
fileno = data_file.fileno()
self.shared_mmap = mmap.mmap(fileno, size, access=mmap.ACCESS_READ)
data_access = SharedDataAccess(self.shared_mmap)
if data_access.IntAt(0) == COUNTERS_FILE_MAGIC_NUMBER:
return CounterCollection(data_access)
elif data_access.IntAt(0) == CHROME_COUNTERS_FILE_MAGIC_NUMBER:
return ChromeCounterCollection(data_access)
print "File %s is not stats data." % self.data_name
sys.exit(1)
def CleanUp(self):
"""Cleans up the memory mapped file if necessary."""
if self.shared_mmap:
self.shared_mmap.close()
def UpdateCounters(self):
"""Read the contents of the memory-mapped file and update the ui if
necessary. If the same counters are present in the file as before
we just update the existing labels. If any counters have been added
or removed we scrap the existing ui and draw a new one.
"""
changed = False
counters_in_use = self.data.CountersInUse()
if counters_in_use != len(self.ui_counters):
self.RefreshCounters()
changed = True
else:
for i in xrange(self.data.CountersInUse()):
counter = self.data.Counter(i)
name = counter.Name()
if name in self.ui_counters:
value = counter.Value()
ui_counter = self.ui_counters[name]
counter_changed = ui_counter.Set(value)
changed = (changed or counter_changed)
else:
self.RefreshCounters()
changed = True
break
if changed:
# The title of the window shows the last time the file was
# changed.
self.UpdateTime()
self.ScheduleUpdate()
def UpdateTime(self):
"""Update the title of the window with the current time."""
self.root.title("Stats Viewer [updated %s]" % time.strftime("%H:%M:%S"))
def ScheduleUpdate(self):
"""Schedules the next ui update."""
self.root.after(UPDATE_INTERVAL_MS, lambda: self.UpdateCounters())
def RefreshCounters(self):
"""Tear down and rebuild the controls in the main window."""
counters = self.ComputeCounters()
self.RebuildMainWindow(counters)
def ComputeCounters(self):
"""Group the counters by the suffix of their name.
Since the same code-level counter (for instance "X") can result in
several variables in the binary counters file that differ only by a
two-character prefix (for instance "c:X" and "t:X") counters are
grouped by suffix and then displayed with custom formatting
depending on their prefix.
Returns:
A mapping from suffixes to a list of counters with that suffix,
sorted by prefix.
"""
names = {}
for i in xrange(self.data.CountersInUse()):
counter = self.data.Counter(i)
name = counter.Name()
names[name] = counter
# By sorting the keys we ensure that the prefixes always come in the
# same order ("c:" before "t:") which looks more consistent in the
# ui.
sorted_keys = names.keys()
sorted_keys.sort()
# Group together the names whose suffix after a ':' are the same.
groups = {}
for name in sorted_keys:
counter = names[name]
if ":" in name:
name = name[name.find(":")+1:]
if not name in groups:
groups[name] = []
groups[name].append(counter)
return groups
def RebuildMainWindow(self, groups):
"""Tear down and rebuild the main window.
Args:
groups: the groups of counters to display
"""
# Remove elements in the current ui
self.ui_counters.clear()
for child in self.root.children.values():
child.destroy()
# Build new ui
index = 0
sorted_groups = groups.keys()
sorted_groups.sort()
for counter_name in sorted_groups:
counter_objs = groups[counter_name]
if self.name_filter.match(counter_name):
name = Tkinter.Label(self.root, width=50, anchor=Tkinter.W,
text=counter_name)
name.grid(row=index, column=0, padx=1, pady=1)
count = len(counter_objs)
for i in xrange(count):
counter = counter_objs[i]
name = counter.Name()
var = Tkinter.StringVar()
if self.name_filter.match(name):
value = Tkinter.Label(self.root, width=15, anchor=Tkinter.W,
textvariable=var)
value.grid(row=index, column=(1 + i), padx=1, pady=1)
# If we know how to interpret the prefix of this counter then
# add an appropriate formatting to the variable
if (":" in name) and (name[0] in COUNTER_LABELS):
format = COUNTER_LABELS[name[0]]
else:
format = "%i"
ui_counter = UiCounter(var, format)
self.ui_counters[name] = ui_counter
ui_counter.Set(counter.Value())
index += 1
self.root.update()
def OpenWindow(self):
"""Create and display the root window."""
self.root = Tkinter.Tk()
# Tkinter is no good at resizing so we disable it
self.root.resizable(width=False, height=False)
self.RefreshCounters()
self.ScheduleUpdate()
self.root.mainloop()
class UiCounter(object):
"""A counter in the ui."""
def __init__(self, var, format):
"""Creates a new ui counter.
Args:
var: the Tkinter string variable for updating the ui
format: the format string used to format this counter
"""
self.var = var
self.format = format
self.last_value = None
def Set(self, value):
"""Updates the ui for this counter.
Args:
value: The value to display
Returns:
True if the value had changed, otherwise False. The first call
always returns True.
"""
if value == self.last_value:
return False
else:
self.last_value = value
self.var.set(self.format % value)
return True
class SharedDataAccess(object):
"""A utility class for reading data from the memory-mapped binary
counters file."""
def __init__(self, data):
"""Create a new instance.
Args:
data: A handle to the memory-mapped file, as returned by mmap.mmap.
"""
self.data = data
def ByteAt(self, index):
"""Return the (unsigned) byte at the specified byte index."""
return ord(self.CharAt(index))
def IntAt(self, index):
"""Return the little-endian 32-byte int at the specified byte index."""
word_str = self.data[index:index+4]
result, = struct.unpack("I", word_str)
return result
def CharAt(self, index):
"""Return the ascii character at the specified byte index."""
return self.data[index]
class Counter(object):
"""A pointer to a single counter withing a binary counters file."""
def __init__(self, data, offset):
"""Create a new instance.
Args:
data: the shared data access object containing the counter
offset: the byte offset of the start of this counter
"""
self.data = data
self.offset = offset
def Value(self):
"""Return the integer value of this counter."""
return self.data.IntAt(self.offset)
def Name(self):
"""Return the ascii name of this counter."""
result = ""
index = self.offset + 4
current = self.data.ByteAt(index)
while current:
result += chr(current)
index += 1
current = self.data.ByteAt(index)
return result
class CounterCollection(object):
"""An overlay over a counters file that provides access to the
individual counters contained in the file."""
def __init__(self, data):
"""Create a new instance.
Args:
data: the shared data access object
"""
self.data = data
self.max_counters = data.IntAt(4)
self.max_name_size = data.IntAt(8)
def CountersInUse(self):
"""Return the number of counters in active use."""
return self.data.IntAt(12)
def Counter(self, index):
"""Return the index'th counter."""
return Counter(self.data, 16 + index * self.CounterSize())
def CounterSize(self):
"""Return the size of a single counter."""
return 4 + self.max_name_size
class ChromeCounter(object):
"""A pointer to a single counter withing a binary counters file."""
def __init__(self, data, name_offset, value_offset):
"""Create a new instance.
Args:
data: the shared data access object containing the counter
name_offset: the byte offset of the start of this counter's name
value_offset: the byte offset of the start of this counter's value
"""
self.data = data
self.name_offset = name_offset
self.value_offset = value_offset
def Value(self):
"""Return the integer value of this counter."""
return self.data.IntAt(self.value_offset)
def Name(self):
"""Return the ascii name of this counter."""
result = ""
index = self.name_offset
current = self.data.ByteAt(index)
while current:
result += chr(current)
index += 1
current = self.data.ByteAt(index)
return result
class ChromeCounterCollection(object):
"""An overlay over a counters file that provides access to the
individual counters contained in the file."""
_HEADER_SIZE = 4 * 4
_COUNTER_NAME_SIZE = 64
_THREAD_NAME_SIZE = 32
def __init__(self, data):
"""Create a new instance.
Args:
data: the shared data access object
"""
self.data = data
self.max_counters = data.IntAt(8)
self.max_threads = data.IntAt(12)
self.counter_names_offset = \
self._HEADER_SIZE + self.max_threads * (self._THREAD_NAME_SIZE + 2 * 4)
self.counter_values_offset = \
self.counter_names_offset + self.max_counters * self._COUNTER_NAME_SIZE
def CountersInUse(self):
"""Return the number of counters in active use."""
for i in xrange(self.max_counters):
name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
if self.data.ByteAt(name_offset) == 0:
return i
return self.max_counters
def Counter(self, i):
"""Return the i'th counter."""
name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
value_offset = self.counter_values_offset + i * self.max_threads * 4
return ChromeCounter(self.data, name_offset, value_offset)
def Main(data_file, name_filter):
"""Run the stats counter.
Args:
data_file: The counters file to monitor.
name_filter: The regexp filter to apply to counter names.
"""
StatsViewer(data_file, name_filter).Run()
if __name__ == "__main__":
parser = optparse.OptionParser("usage: %prog [--filter=re] "
"<stats data>|<test_shell pid>")
parser.add_option("--filter",
default=".*",
help=("regexp filter for counter names "
"[default: %default]"))
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(1)
Main(args[0], re.compile(options.filter))
| agpl-3.0 | 3,964,033,559,789,936,000 | 30.849576 | 79 | 0.653762 | false |
tomkralidis/inasafe | safe/gui/tools/wizard_dialog.py | 1 | 179113 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid **GUI InaSAFE Wizard Dialog.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.. todo:: Check raster is single band
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '21/02/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import logging
import re
import json
from collections import OrderedDict
from sqlite3 import OperationalError
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly
import numpy
from qgis.core import (
QgsCoordinateTransform,
QgsBrowserModel,
QgsDataItem,
QgsVectorLayer,
QgsRasterLayer,
QgsDataSourceURI,
QgsMapLayerRegistry)
# noinspection PyPackageRequirements
from PyQt4 import QtGui, QtCore
# noinspection PyPackageRequirements
from PyQt4.QtCore import pyqtSignature, QSettings, QPyNullVariant
# noinspection PyPackageRequirements
from PyQt4.QtGui import (
QDialog,
QListWidgetItem,
QPixmap,
QSortFilterProxyModel)
# pylint: disable=F0401
from db_manager.db_plugins.postgis.connector import PostGisDBConnector
# pylint: enable=F0401
# pylint: disable=unused-import
# TODO: to get rid of the following import,
# TODO: we need to get rid of all those evals...TS
from safe import definitions
# pylint: enable=unused-import
from safe.definitions import (
global_default_attribute,
do_not_use_attribute,
continuous_hazard_unit,
exposure_unit,
raster_hazard_classification,
vector_hazard_classification,
layer_purpose_hazard,
layer_purpose_exposure,
layer_purpose_aggregation,
hazard_category_single_event,
layer_geometry_point,
layer_geometry_line,
layer_geometry_polygon,
layer_geometry_raster,
layer_mode_continuous,
layer_mode_classified)
from safe.impact_functions.impact_function_manager import ImpactFunctionManager
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.analysis_handler import AnalysisHandler
from safe.utilities.gis import (
is_raster_layer,
is_point_layer,
is_polygon_layer,
layer_attribute_names)
from safe.utilities.utilities import get_error_message, compare_version
from safe.defaults import get_defaults
from safe.common.exceptions import (
HashNotFoundError,
NoKeywordsFoundError,
KeywordNotFoundError,
InvalidParameterError,
UnsupportedProviderError,
InsufficientOverlapError,
InaSAFEError)
from safe.common.resource_parameter import ResourceParameter
from safe.common.version import get_version
from safe_extras.parameters.group_parameter import GroupParameter
from safe.utilities.resources import get_ui_class, resources_path
from safe.impact_statistics.function_options_dialog import (
FunctionOptionsDialog)
from safe.utilities.unicode import get_unicode
from safe.utilities.i18n import tr
from safe.gui.tools.wizard_strings import (
category_question,
category_question_hazard,
category_question_exposure,
category_question_aggregation,
hazard_category_question,
layermode_raster_question,
layermode_vector_question,
unit_question,
allow_resampling_question,
field_question_subcategory_unit,
field_question_subcategory_classified,
field_question_aggregation,
classification_question,
classify_vector_question,
classify_raster_question,
select_function_constraints2_question,
select_function_question,
select_hazard_origin_question,
select_hazlayer_from_canvas_question,
select_hazlayer_from_browser_question,
select_exposure_origin_question,
select_explayer_from_canvas_question,
select_explayer_from_browser_question,
create_postGIS_connection_first)
# TODO(Ismail): We need a better way to import all of these string
# pylint: disable=unused-import
from safe.gui.tools.wizard_strings import (
earthquake_mmi_question,
exposure_question,
flood_feet_depth_question,
flood_metres_depth_question,
flood_wetdry_question,
hazard_question,
population_density_question,
population_number_question,
road_road_type_question,
structure_building_type_question,
tephra_kgm2_question,
tsunami_feet_depth_question,
tsunami_metres_depth_question,
tsunami_wetdry_question,
volcano_volcano_categorical_question
)
# pylint: enable=unused-import
LOGGER = logging.getLogger('InaSAFE')
FORM_CLASS = get_ui_class('wizard_dialog_base.ui')
# Constants: tab numbers for steps
step_kw_category = 1
step_kw_subcategory = 2
step_kw_hazard_category = 3
step_kw_layermode = 4
step_kw_unit = 5
step_kw_classification = 6
step_kw_field = 7
step_kw_resample = 8
step_kw_classify = 9
step_kw_extrakeywords = 10
step_kw_aggregation = 11
step_kw_source = 12
step_kw_title = 13
step_fc_function_1 = 14
step_fc_function_2 = 15
step_fc_function_3 = 16
step_fc_hazlayer_origin = 17
step_fc_hazlayer_from_canvas = 18
step_fc_hazlayer_from_browser = 19
step_fc_explayer_origin = 20
step_fc_explayer_from_canvas = 21
step_fc_explayer_from_browser = 22
step_fc_disjoint_layers = 23
step_fc_agglayer_origin = 24
step_fc_agglayer_from_canvas = 25
step_fc_agglayer_from_browser = 26
step_fc_agglayer_disjoint = 27
step_fc_extent = 28
step_fc_extent_disjoint = 29
step_fc_params = 30
step_fc_summary = 31
step_fc_analysis = 32
# Aggregations' keywords
DEFAULTS = get_defaults()
female_ratio_attribute_key = DEFAULTS['FEMALE_RATIO_ATTR_KEY']
female_ratio_default_key = DEFAULTS['FEMALE_RATIO_KEY']
youth_ratio_attribute_key = DEFAULTS['YOUTH_RATIO_ATTR_KEY']
youth_ratio_default_key = DEFAULTS['YOUTH_RATIO_KEY']
adult_ratio_attribute_key = DEFAULTS['ADULT_RATIO_ATTR_KEY']
adult_ratio_default_key = DEFAULTS['ADULT_RATIO_KEY']
elderly_ratio_attribute_key = DEFAULTS['ELDERLY_RATIO_ATTR_KEY']
elderly_ratio_default_key = DEFAULTS['ELDERLY_RATIO_KEY']
# Data roles
RoleFunctions = QtCore.Qt.UserRole
RoleHazard = QtCore.Qt.UserRole + 1
RoleExposure = QtCore.Qt.UserRole + 2
RoleHazardConstraint = QtCore.Qt.UserRole + 3
RoleExposureConstraint = QtCore.Qt.UserRole + 4
def get_question_text(constant):
"""Find a constant by name and return its value.
:param constant: The name of the constant to look for.
:type constant: string
:returns: The value of the constant or red error message.
:rtype: string
"""
try:
# TODO Eval = bad
return eval(constant) # pylint: disable=eval-used
except NameError:
return '<b>MISSING CONSTANT: %s</b>' % constant
class LayerBrowserProxyModel(QSortFilterProxyModel):
"""Proxy model for hiding unsupported branches in the layer browser."""
def __init__(self, parent):
"""Constructor for the model.
:param parent: Parent widget of this model.
:type parent: QWidget
"""
QSortFilterProxyModel.__init__(self, parent)
def filterAcceptsRow(self, source_row, source_parent):
"""The filter method
.. note:: This filter hides top-level items of unsupported branches
and also leaf items containing xml files.
Enabled root items: QgsDirectoryItem, QgsFavouritesItem,
QgsPGRootItem.
Disabled root items: QgsMssqlRootItem, QgsSLRootItem,
QgsOWSRootItem, QgsWCSRootItem, QgsWFSRootItem, QgsWMSRootItem.
Disabled leaf items: QgsLayerItem and QgsOgrLayerItem with path
ending with '.xml'
:param source_row: Parent widget of the model
:type source_row: int
:param source_parent: Parent item index
:type source_parent: QModelIndex
:returns: Item validation result
:rtype: bool
"""
source_index = self.sourceModel().index(source_row, 0, source_parent)
item = self.sourceModel().dataItem(source_index)
if item.metaObject().className() in [
'QgsMssqlRootItem',
'QgsSLRootItem',
'QgsOWSRootItem',
'QgsWCSRootItem',
'QgsWFSRootItem',
'QgsWMSRootItem']:
return False
if (item.metaObject().className() in [
'QgsLayerItem',
'QgsOgrLayerItem'] and
item.path().endswith('.xml')):
return False
return True
class WizardDialog(QDialog, FORM_CLASS):
"""Dialog implementation class for the InaSAFE wizard."""
def __init__(self, parent=None, iface=None, dock=None):
"""Constructor for the dialog.
.. note:: In QtDesigner the advanced editor's predefined keywords
list should be shown in english always, so when adding entries to
cboKeyword, be sure to choose :safe_qgis:`Properties<<` and untick
the :safe_qgis:`translatable` property.
:param parent: Parent widget of this dialog.
:type parent: QWidget
:param iface: QGIS QGisAppInterface instance.
:type iface: QGisAppInterface
:param dock: Dock widget instance that we can notify of changes to
the keywords. Optional.
:type dock: Dock
"""
QDialog.__init__(self, parent)
self.setupUi(self)
self.setWindowTitle('InaSAFE')
# Constants
self.keyword_creation_wizard_name = 'InaSAFE Keywords Creation Wizard'
self.ifcw_name = 'InaSAFE Impact Function Centric Wizard'
# Note the keys should remain untranslated as we need to write
# english to the keywords file.
# Save reference to the QGIS interface and parent
self.iface = iface
self.parent = parent
self.dock = dock
self.suppress_warning_dialog = False
self.set_tool_tip()
# Set icons
self.lblMainIcon.setPixmap(
QPixmap(resources_path('img', 'icons', 'icon-white.svg')))
self.lblIconDisjoint_1.setPixmap(
QPixmap(resources_path('img', 'wizard', 'icon-stop.svg')))
self.lblIconDisjoint_2.setPixmap(
QPixmap(resources_path('img', 'wizard', 'icon-stop.svg')))
self.lblIconDisjoint_3.setPixmap(
QPixmap(resources_path('img', 'wizard', 'icon-stop.svg')))
# Set models for browsers
browser_model = QgsBrowserModel()
proxy_model = LayerBrowserProxyModel(self)
proxy_model.setSourceModel(browser_model)
self.tvBrowserHazard.setModel(proxy_model)
browser_model = QgsBrowserModel()
proxy_model = LayerBrowserProxyModel(self)
proxy_model.setSourceModel(browser_model)
self.tvBrowserExposure.setModel(proxy_model)
browser_model = QgsBrowserModel()
proxy_model = LayerBrowserProxyModel(self)
proxy_model.setSourceModel(browser_model)
self.tvBrowserAggregation.setModel(proxy_model)
self.parameter_dialog = None
self.extent_dialog = None
self.keyword_io = KeywordIO()
self.twParams = None
self.swExtent = None
self.is_selected_layer_keywordless = False
self.parent_step = None
self.pbnBack.setEnabled(False)
self.pbnNext.setEnabled(False)
# Collect some serial widgets
self.extra_keywords_widgets = [
{'cbo': self.cboExtraKeyword1, 'lbl': self.lblExtraKeyword1},
{'cbo': self.cboExtraKeyword2, 'lbl': self.lblExtraKeyword2},
{'cbo': self.cboExtraKeyword3, 'lbl': self.lblExtraKeyword3},
{'cbo': self.cboExtraKeyword4, 'lbl': self.lblExtraKeyword4},
{'cbo': self.cboExtraKeyword5, 'lbl': self.lblExtraKeyword5},
{'cbo': self.cboExtraKeyword6, 'lbl': self.lblExtraKeyword6},
{'cbo': self.cboExtraKeyword7, 'lbl': self.lblExtraKeyword7},
{'cbo': self.cboExtraKeyword8, 'lbl': self.lblExtraKeyword8}
]
for ekw in self.extra_keywords_widgets:
ekw['key'] = None
ekw['slave_key'] = None
# noinspection PyUnresolvedReferences
self.tvBrowserHazard.selectionModel().selectionChanged.connect(
self.tvBrowserHazard_selection_changed)
self.tvBrowserExposure.selectionModel().selectionChanged.connect(
self.tvBrowserExposure_selection_changed)
self.tvBrowserAggregation.selectionModel().selectionChanged.connect(
self.tvBrowserAggregation_selection_changed)
self.treeClasses.itemChanged.connect(self.update_dragged_item_flags)
self.pbnCancel.released.connect(self.reject)
# string constants
self.global_default_string = global_default_attribute['name']
self.global_default_data = global_default_attribute['id']
self.do_not_use_string = do_not_use_attribute['name']
self.do_not_use_data = do_not_use_attribute['id']
self.defaults = get_defaults()
# Initialize attributes
self.impact_function_manager = ImpactFunctionManager()
self.existing_keywords = None
self.layer = None
self.hazard_layer = None
self.exposure_layer = None
self.aggregation_layer = None
self.if_params = None
self.analysis_handler = None
def set_mode_label_to_keywords_creation(self):
"""Set the mode label to the Keywords Creation/Update mode
"""
self.setWindowTitle(self.keyword_creation_wizard_name)
if self.get_existing_keyword('layer_purpose'):
mode_name = (self.tr(
'Keywords update wizard for layer <b>%s</b>'
) % self.layer.name())
else:
mode_name = (self.tr(
'Keywords creation wizard for layer <b>%s</b>'
) % self.layer.name())
self.lblSubtitle.setText(mode_name)
def set_mode_label_to_ifcw(self):
"""Set the mode label to the IFCW
"""
self.setWindowTitle(self.ifcw_name)
self.lblSubtitle.setText(self.tr(
'Use this wizard to run a guided impact assessment'))
def set_keywords_creation_mode(self, layer=None):
"""Set the Wizard to the Keywords Creation mode
:param layer: Layer to set the keywords for
:type layer: QgsMapLayer
"""
self.layer = layer or self.iface.mapCanvas().currentLayer()
try:
self.existing_keywords = self.keyword_io.read_keywords(self.layer)
# if 'layer_purpose' not in self.existing_keywords:
# self.existing_keywords = None
except (HashNotFoundError,
OperationalError,
NoKeywordsFoundError,
KeywordNotFoundError,
InvalidParameterError,
UnsupportedProviderError):
self.existing_keywords = None
self.set_mode_label_to_keywords_creation()
self.set_widgets_step_kw_category()
self.go_to_step(step_kw_category)
def set_function_centric_mode(self):
"""Set the Wizard to the Function Centric mode"""
self.set_mode_label_to_ifcw()
new_step = step_fc_function_1
self.set_widgets_step_fc_function_1()
self.pbnNext.setEnabled(self.is_ready_to_next_step(new_step))
self.go_to_step(new_step)
def update_MessageViewer_size(self):
"""Update maximumHeight size of the MessageViewer to fit its parent tab
This is a workaround for a bug that makes MessageViewer
flooding up to maximumHeight on Windows.
"""
self.wvResults.setMaximumHeight(self.pgF25Progress.height() - 90)
# pylint: disable=unused-argument
def resizeEvent(self, ev):
"""Trigger MessageViewer size update on window resize
.. note:: This is an automatic Qt slot
executed when the window size changes.
"""
pass
# self.update_MessageViewer_size()
# pylint: disable=unused-argument
def purposes_for_layer(self):
"""Return a list of valid purposes for the current layer.
:returns: A list where each value represents a valid purpose.
:rtype: list
"""
layer_geometry_id = self.get_layer_geometry_id()
return self.impact_function_manager.purposes_for_layer(
layer_geometry_id)
def subcategories_for_layer(self):
"""Return a list of valid subcategories for a layer.
Subcategory is hazard type or exposure type.
:returns: A list where each value represents a valid subcategory.
:rtype: list
"""
purpose = self.selected_category()
layer_geometry_id = self.get_layer_geometry_id()
if purpose == layer_purpose_hazard:
return self.impact_function_manager.hazards_for_layer(
layer_geometry_id)
elif purpose == layer_purpose_exposure:
return self.impact_function_manager.exposures_for_layer(
layer_geometry_id)
def hazard_categories_for_layer(self):
"""Return a list of valid hazard categories for a layer.
:returns: A list where each value represents a valid hazard category.
:rtype: list
"""
layer_geometry_id = self.get_layer_geometry_id()
if self.selected_category() != layer_purpose_hazard:
return []
hazard_type_id = self.selected_subcategory()['key']
return self.impact_function_manager.hazard_categories_for_layer(
layer_geometry_id, hazard_type_id)
def layermodes_for_layer(self):
"""Return a list of valid layer modes for a layer.
:returns: A list where each value represents a valid layer mode.
:rtype: list
"""
purpose = self.selected_category()
subcategory = self.selected_subcategory()
layer_geometry_id = self.get_layer_geometry_id()
if purpose == layer_purpose_hazard:
hazard_category = self.selected_hazard_category()
return self.impact_function_manager.available_hazard_layer_modes(
subcategory['key'], layer_geometry_id, hazard_category['key'])
elif purpose == layer_purpose_exposure:
return self.impact_function_manager.available_exposure_layer_modes(
subcategory['key'], layer_geometry_id)
def classifications_for_layer(self):
"""Return a list of valid classifications for a layer.
:returns: A list where each value represents a valid classification.
:rtype: list
"""
layer_geometry_id = self.get_layer_geometry_id()
layer_mode_id = self.selected_layermode()['key']
subcategory_id = self.selected_subcategory()['key']
if self.selected_category() == layer_purpose_hazard:
hazard_category_id = self.selected_hazard_category()['key']
if is_raster_layer(self.layer):
return self.impact_function_manager.\
raster_hazards_classifications_for_layer(
subcategory_id,
layer_geometry_id,
layer_mode_id,
hazard_category_id)
else:
return self.impact_function_manager\
.vector_hazards_classifications_for_layer(
subcategory_id,
layer_geometry_id,
layer_mode_id,
hazard_category_id)
else:
# There are no classifications for exposures defined yet
return []
def additional_keywords_for_the_layer(self):
"""Return a list of valid additional keywords for the current layer.
:returns: A list where each value represents a valid additional kw.
:rtype: list
"""
layer_geometry_key = self.get_layer_geometry_id()
layer_mode_key = self.selected_layermode()['key']
if self.selected_category() == layer_purpose_hazard:
hazard_category_key = self.selected_hazard_category()['key']
hazard_key = self.selected_subcategory()['key']
return self.impact_function_manager.hazard_additional_keywords(
layer_mode_key, layer_geometry_key,
hazard_category_key, hazard_key)
else:
exposure_key = self.selected_subcategory()['key']
return self.impact_function_manager.exposure_additional_keywords(
layer_mode_key, layer_geometry_key, exposure_key)
def field_keyword_for_the_layer(self):
"""Return the proper keyword for field for the current layer.
Expected values are: 'field', 'structure_class_field', road_class_field
:returns: the field keyword
:rtype: string
"""
if self.selected_category() == layer_purpose_aggregation:
# purpose: aggregation
return 'aggregation attribute'
elif self.selected_category() == layer_purpose_hazard:
# purpose: hazard
if (self.selected_layermode() == layer_mode_classified and
is_point_layer(self.layer)):
# No field for classified point hazards
return ''
else:
# purpose: exposure
layer_mode_key = self.selected_layermode()['key']
layer_geometry_key = self.get_layer_geometry_id()
exposure_key = self.selected_subcategory()['key']
exposure_class_fields = self.impact_function_manager.\
exposure_class_fields(
layer_mode_key, layer_geometry_key, exposure_key)
if exposure_class_fields and len(exposure_class_fields) == 1:
return exposure_class_fields[0]['key']
# Fallback to default
return 'field'
# ===========================
# STEP_KW_CATEGORY
# ===========================
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_lstCategories_itemSelectionChanged(self):
"""Update purpose description label.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
# Clear all further steps in order to properly calculate the prev step
self.lstHazardCategories.clear()
self.lstSubcategories.clear()
self.lstLayerModes.clear()
self.lstUnits.clear()
self.lstFields.clear()
self.lstClassifications.clear()
# Set widgets
category = self.selected_category()
# Exit if no selection
if not category:
return
# Set description label
self.lblDescribeCategory.setText(category["description"])
self.lblIconCategory.setPixmap(QPixmap(
resources_path('img', 'wizard', 'keyword-category-%s.svg'
% (category['key'] or 'notset'))))
# Enable the next button
self.pbnNext.setEnabled(True)
def selected_category(self):
"""Obtain the layer purpose selected by user.
:returns: Metadata of the selected layer purpose.
:rtype: dict, None
"""
item = self.lstCategories.currentItem()
try:
# pylint: disable=eval-used
return eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
except (AttributeError, NameError):
return None
def set_widgets_step_kw_category(self):
"""Set widgets on the layer purpose tab."""
# Clear all further steps in order to properly calculate the prev step
self.lstHazardCategories.clear()
self.lstSubcategories.clear()
self.lstLayerModes.clear()
self.lstUnits.clear()
self.lstFields.clear()
self.lstClassifications.clear()
# Set widgets
self.lstCategories.clear()
self.lblDescribeCategory.setText('')
self.lblIconCategory.setPixmap(QPixmap())
self.lblSelectCategory.setText(
category_question % self.layer.name())
categories = self.purposes_for_layer()
if self.get_layer_geometry_id() == 'polygon':
categories += ['aggregation']
for category in categories:
if not isinstance(category, dict):
# pylint: disable=eval-used
category = eval('definitions.layer_purpose_%s' % category)
# pylint: enable=eval-used
item = QListWidgetItem(category['name'], self.lstCategories)
item.setData(QtCore.Qt.UserRole, unicode(category))
self.lstCategories.addItem(item)
# Check if layer keywords are already assigned
category_keyword = self.get_existing_keyword('layer_purpose')
# Check if it's KW mode embedded in IFCW mode
if self.parent_step:
if self.parent_step in [step_fc_hazlayer_from_canvas,
step_fc_hazlayer_from_browser]:
category_keyword = layer_purpose_hazard['key']
elif self.parent_step in [step_fc_explayer_from_canvas,
step_fc_explayer_from_browser]:
category_keyword = layer_purpose_exposure['key']
else:
category_keyword = layer_purpose_aggregation['key']
# Set values based on existing keywords or parent mode
if category_keyword:
categories = []
for index in xrange(self.lstCategories.count()):
item = self.lstCategories.item(index)
# pylint: disable=eval-used
category = eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
categories.append(category['key'])
if category_keyword in categories:
self.lstCategories.setCurrentRow(
categories.index(category_keyword))
self.auto_select_one_item(self.lstCategories)
# ===========================
# STEP_KW_SUBCATEGORY
# ===========================
# noinspection PyPep8Naming
def on_lstSubcategories_itemSelectionChanged(self):
"""Update subcategory description label.
.. note:: This is an automatic Qt slot
executed when the subcategory selection changes.
"""
# Clear all further steps in order to properly calculate the prev step
self.lstHazardCategories.clear()
self.lstLayerModes.clear()
self.lstUnits.clear()
self.lstFields.clear()
self.lstClassifications.clear()
# Set widgets
subcategory = self.selected_subcategory()
# Exit if no selection
if not subcategory:
return
# Set description label
self.lblDescribeSubcategory.setText(subcategory['description'])
icon_path = resources_path('img', 'wizard',
'keyword-subcategory-%s.svg'
% (subcategory['key'] or 'notset'))
if not os.path.exists(icon_path):
category = self.selected_category()
icon_path = resources_path('img', 'wizard',
'keyword-category-%s.svg'
% (category['key']))
self.lblIconSubcategory.setPixmap(QPixmap(icon_path))
# Enable the next button
self.pbnNext.setEnabled(True)
def selected_subcategory(self):
"""Obtain the subcategory selected by user.
:returns: Metadata of the selected subcategory.
:rtype: dict, None
"""
item = self.lstSubcategories.currentItem()
try:
# pylint: disable=eval-used
return eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
except (AttributeError, NameError):
return None
def set_widgets_step_kw_subcategory(self):
"""Set widgets on the Subcategory tab."""
# Clear all further steps in order to properly calculate the prev step
self.lstHazardCategories.clear()
self.lstLayerModes.clear()
self.lstUnits.clear()
self.lstFields.clear()
self.lstClassifications.clear()
# Set widgets
category = self.selected_category()
self.lstSubcategories.clear()
self.lblDescribeSubcategory.setText('')
self.lblIconSubcategory.setPixmap(QPixmap())
self.lblSelectSubcategory.setText(
get_question_text('%s_question' % category['key']))
for i in self.subcategories_for_layer():
item = QListWidgetItem(i['name'], self.lstSubcategories)
item.setData(QtCore.Qt.UserRole, unicode(i))
self.lstSubcategories.addItem(item)
# Check if layer keywords are already assigned
key = self.selected_category()['key']
keyword = self.get_existing_keyword(key)
# Check if it's KW mode embedded in IFCW
if self.parent_step:
h, e, _hc, _ec = self.selected_impact_function_constraints()
if self.parent_step in [step_fc_hazlayer_from_canvas,
step_fc_hazlayer_from_browser]:
keyword = h['key']
elif self.parent_step in [step_fc_explayer_from_canvas,
step_fc_explayer_from_browser]:
keyword = e['key']
# Set values based on existing keywords or parent mode
if keyword:
subcategories = []
for index in xrange(self.lstSubcategories.count()):
item = self.lstSubcategories.item(index)
# pylint: disable=eval-used
subcategory = eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
subcategories.append(subcategory['key'])
if keyword in subcategories:
self.lstSubcategories.setCurrentRow(
subcategories.index(keyword))
self.auto_select_one_item(self.lstSubcategories)
# ===========================
# STEP_KW_HAZARD_CATEGORY
# ===========================
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_lstHazardCategories_itemSelectionChanged(self):
"""Update hazard category description label.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
# Clear all further steps in order to properly calculate the prev step
self.lstLayerModes.clear()
self.lstUnits.clear()
self.lstFields.clear()
self.lstClassifications.clear()
# Set widgets
hazard_category = self.selected_hazard_category()
# Exit if no selection
if not hazard_category:
return
# Set description label
self.lblDescribeHazardCategory.setText(hazard_category["description"])
# Enable the next button
self.pbnNext.setEnabled(True)
def selected_hazard_category(self):
"""Obtain the hazard category selected by user.
:returns: Metadata of the selected hazard category.
:rtype: dict, None
"""
item = self.lstHazardCategories.currentItem()
try:
# pylint: disable=eval-used
return eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
except (AttributeError, NameError):
return None
def set_widgets_step_kw_hazard_category(self):
"""Set widgets on the Hazard Category tab."""
# Clear all further steps in order to properly calculate the prev step
self.lstLayerModes.clear()
self.lstUnits.clear()
self.lstFields.clear()
self.lstClassifications.clear()
# Set widgets
self.lstHazardCategories.clear()
self.lblDescribeHazardCategory.setText('')
self.lblSelectHazardCategory.setText(
hazard_category_question)
hazard_categories = self.hazard_categories_for_layer()
for hazard_category in hazard_categories:
if not isinstance(hazard_category, dict):
# pylint: disable=eval-used
hazard_category = eval('definitions.hazard_category_%s'
% hazard_category)
# pylint: enable=eval-used
item = QListWidgetItem(hazard_category['name'],
self.lstHazardCategories)
item.setData(QtCore.Qt.UserRole, unicode(hazard_category))
self.lstHazardCategories.addItem(item)
# Set values based on existing keywords (if already assigned)
category_keyword = self.get_existing_keyword('hazard_category')
if category_keyword:
categories = []
for index in xrange(self.lstHazardCategories.count()):
item = self.lstHazardCategories.item(index)
# pylint: disable=eval-used
hazard_category = eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
categories.append(hazard_category['key'])
if category_keyword in categories:
self.lstHazardCategories.setCurrentRow(
categories.index(category_keyword))
self.auto_select_one_item(self.lstHazardCategories)
# ===========================
# STEP_KW_LAYERMODE
# ===========================
# noinspection PyPep8Naming
def on_lstLayerModes_itemSelectionChanged(self):
"""Update layer mode description label and unit widgets.
.. note:: This is an automatic Qt slot
executed when the subcategory selection changes.
"""
# Clear all further steps in order to properly calculate the prev step
self.lstUnits.clear()
self.lstFields.clear()
self.lstClassifications.clear()
# Set widgets
layer_mode = self.selected_layermode()
# Exit if no selection
if not layer_mode:
self.lblDescribeLayerMode.setText('')
return
# Set description label
self.lblDescribeLayerMode.setText(layer_mode['description'])
# Enable the next button
self.pbnNext.setEnabled(True)
def selected_layermode(self):
"""Obtain the layer mode selected by user.
:returns: selected layer mode.
:rtype: string, None
"""
item = self.lstLayerModes.currentItem()
try:
# pylint: disable=eval-used
return eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
except (AttributeError, NameError):
return None
def set_widgets_step_kw_layermode(self):
"""Set widgets on the LayerMode tab."""
# Clear all further steps in order to properly calculate the prev step
self.lstUnits.clear()
self.lstFields.clear()
self.lstClassifications.clear()
# Set widgets
category = self.selected_category()
subcategory = self.selected_subcategory()
layer_mode_question = (
layermode_raster_question
if is_raster_layer(self.layer)
else layermode_vector_question)
self.lblSelectLayerMode.setText(
layer_mode_question % (subcategory['name'], category['name']))
self.lblDescribeLayerMode.setText('')
self.lstLayerModes.clear()
self.lstUnits.clear()
self.lstFields.clear()
layer_modes = self.layermodes_for_layer()
for layer_mode in layer_modes:
item = QListWidgetItem(layer_mode['name'], self.lstLayerModes)
item.setData(QtCore.Qt.UserRole, unicode(layer_mode))
self.lstUnits.addItem(item)
# Set value to existing keyword or default value
layermode_keys = [m['key'] for m in layer_modes]
layermode_keyword = self.get_existing_keyword('layer_mode')
if layermode_keyword in layermode_keys:
indx = layermode_keys.index(layermode_keyword)
elif layer_mode_continuous['key'] in layermode_keys:
# Set default value
indx = layermode_keys.index(layer_mode_continuous['key'])
else:
indx = -1
self.lstLayerModes.setCurrentRow(indx)
self.auto_select_one_item(self.lstLayerModes)
# ===========================
# STEP_KW_UNIT
# ===========================
# noinspection PyPep8Naming
def on_lstUnits_itemSelectionChanged(self):
"""Update unit description label and field widgets.
.. note:: This is an automatic Qt slot
executed when the unit selection changes.
"""
# Clear all further steps in order to properly calculate the prev step
self.lstFields.clear()
self.lstClassifications.clear()
# Set widgets
unit = self.selected_unit()
# Exit if no selection
if not unit:
return
self.lblDescribeUnit.setText(unit['description'])
# Enable the next button
self.pbnNext.setEnabled(True)
def selected_unit(self):
"""Obtain the unit selected by user.
:returns: Metadata of the selected unit.
:rtype: dict, None
"""
item = self.lstUnits.currentItem()
try:
# pylint: disable=eval-used
return eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
except (AttributeError, NameError):
return None
def set_widgets_step_kw_unit(self):
"""Set widgets on the Unit tab."""
# Clear all further steps in order to properly calculate the prev step
self.lstFields.clear()
self.lstClassifications.clear()
# Set widgets
category = self.selected_category()
subcategory = self.selected_subcategory()
self.lblSelectUnit.setText(
unit_question % (subcategory['name'], category['name']))
self.lblDescribeUnit.setText('')
self.lstUnits.clear()
subcat = self.selected_subcategory()['key']
laygeo = self.get_layer_geometry_id()
laymod = self.selected_layermode()['key']
if category == layer_purpose_hazard:
hazcat = self.selected_hazard_category()['key']
units_for_layer = self.impact_function_manager.\
continuous_hazards_units_for_layer(
subcat, laygeo, laymod, hazcat)
else:
units_for_layer = self.impact_function_manager\
.exposure_units_for_layer(
subcat, laygeo, laymod)
for unit_for_layer in units_for_layer:
# if (self.get_layer_geometry_id() == 'raster' and
# 'constraint' in unit_for_layer and
# unit_for_layer['constraint'] == 'categorical'):
# continue
# else:
item = QListWidgetItem(unit_for_layer['name'], self.lstUnits)
item.setData(QtCore.Qt.UserRole, unicode(unit_for_layer))
self.lstUnits.addItem(item)
# Set values based on existing keywords (if already assigned)
if self.selected_category() == layer_purpose_hazard:
key = continuous_hazard_unit['key']
else:
key = exposure_unit['key']
unit_id = self.get_existing_keyword(key)
# unit_id = definitions.old_to_new_unit_id(unit_id)
if unit_id:
units = []
for index in xrange(self.lstUnits.count()):
item = self.lstUnits.item(index)
# pylint: disable=eval-used
unit = eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
units.append(unit['key'])
if unit_id in units:
self.lstUnits.setCurrentRow(units.index(unit_id))
self.auto_select_one_item(self.lstUnits)
# ===========================
# STEP_KW_CLASSIFICATION
# ===========================
def on_lstClassifications_itemSelectionChanged(self):
"""Update classification description label and unlock the Next button.
.. note:: This is an automatic Qt slot
executed when the field selection changes.
"""
self.lstFields.clear()
self.treeClasses.clear()
classification = self.selected_classification()
# Exit if no selection
if not classification:
return
# Set description label
self.lblDescribeClassification.setText(classification["description"])
# Enable the next button
self.pbnNext.setEnabled(True)
def selected_classification(self):
"""Obtain the classification selected by user.
:returns: Metadata of the selected classification.
:rtype: dict, None
"""
item = self.lstClassifications.currentItem()
try:
# pylint: disable=eval-used
return eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
except (AttributeError, NameError):
return None
def set_widgets_step_kw_classification(self):
"""Set widgets on the Classification tab."""
self.lstFields.clear()
self.treeClasses.clear()
category = self.selected_category()['name']
subcategory = self.selected_subcategory()['name']
self.lstClassifications.clear()
self.lblDescribeClassification.setText('')
self.lblSelectClassification.setText(
classification_question % (subcategory, category))
classifications = self.classifications_for_layer()
for classification in classifications:
if not isinstance(classification, dict):
# pylint: disable=eval-used
classification = eval('definitions.%s' % classification)
# pylint: enable=eval-used
item = QListWidgetItem(classification['name'],
self.lstClassifications)
item.setData(QtCore.Qt.UserRole, unicode(classification))
self.lstClassifications.addItem(item)
# Set values based on existing keywords (if already assigned)
geom = 'raster' if is_raster_layer(self.layer) else 'vector'
key = '%s_%s_classification' % (geom, self.selected_category()['key'])
classification_keyword = self.get_existing_keyword(key)
if classification_keyword:
classifications = []
for index in xrange(self.lstClassifications.count()):
item = self.lstClassifications.item(index)
# pylint: disable=eval-used
classification = eval(item.data(QtCore.Qt.UserRole))
# pylint: enable=eval-used
classifications.append(classification['key'])
if classification_keyword in classifications:
self.lstClassifications.setCurrentRow(
classifications.index(classification_keyword))
self.auto_select_one_item(self.lstClassifications)
# ===========================
# STEP_KW_FIELD
# ===========================
# noinspection PyPep8Naming
def on_lstFields_itemSelectionChanged(self):
"""Update field description label and unlock the Next button.
.. note:: This is an automatic Qt slot
executed when the field selection changes.
"""
self.treeClasses.clear()
field = self.selected_field()
# Exit if no selection
if not field:
return
fields = self.layer.dataProvider().fields()
field_type = fields.field(field).typeName()
field_index = fields.indexFromName(self.selected_field())
unique_values = self.layer.uniqueValues(field_index)[0:48]
unique_values_str = [i is not None and unicode(i) or 'NULL'
for i in unique_values]
if unique_values != self.layer.uniqueValues(field_index):
unique_values_str += ['...']
desc = '<br/>%s: %s<br/><br/>' % (self.tr('Field type'), field_type)
desc += self.tr('Unique values: %s') % ', '.join(unique_values_str)
self.lblDescribeField.setText(desc)
# Enable the next button
self.pbnNext.setEnabled(True)
def selected_field(self):
"""Obtain the field selected by user.
:returns: Keyword of the selected field.
:rtype: string, None
"""
item = self.lstFields.currentItem()
if item:
return item.text()
else:
return None
def set_widgets_step_kw_field(self):
"""Set widgets on the Field tab."""
self.treeClasses.clear()
category = self.selected_category()
subcategory = self.selected_subcategory()
unit = self.selected_unit()
if category == layer_purpose_aggregation:
question_text = field_question_aggregation
elif self.selected_layermode() == layer_mode_continuous and unit:
# unique values, continuous or categorical data
subcategory_unit_relation = get_question_text(
'%s_%s_question' % (subcategory['key'], unit['key']))
question_text = field_question_subcategory_unit % (
category['name'],
subcategory['name'],
unit['name'],
subcategory_unit_relation)
else:
question_text = field_question_subcategory_classified % (
subcategory['name'])
self.lblSelectField.setText(question_text)
self.lstFields.clear()
default_item = None
for field in self.layer.dataProvider().fields():
field_name = field.name()
item = QListWidgetItem(field_name, self.lstFields)
item.setData(QtCore.Qt.UserRole, field_name)
# Select the item if it match the unit's default_attribute
if unit and 'default_attribute' in unit \
and field_name == unit['default_attribute']:
default_item = item
# For continuous data, gray out id, gid, fid and text fields
if self.selected_layermode() == layer_mode_continuous and unit:
field_type = field.type()
if field_type > 9 or re.match(
'.{0,2}id$', field_name, re.I):
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsEnabled)
if default_item:
self.lstFields.setCurrentItem(default_item)
self.lblDescribeField.clear()
# Set values based on existing keywords (if already assigned)
field_keyword = self.field_keyword_for_the_layer()
field = self.get_existing_keyword(field_keyword)
if field:
fields = []
for index in xrange(self.lstFields.count()):
fields.append(str(self.lstFields.item(index).text()))
if field in fields:
self.lstFields.setCurrentRow(fields.index(field))
self.auto_select_one_item(self.lstFields)
# ===========================
# STEP_KW_RESAMPLE
# ===========================
def selected_allowresampling(self):
"""Obtain the allow_resampling state selected by user.
.. note:: Returns none if not set or not relevant
:returns: Value of the allow_resampling or None for not-set.
:rtype: boolean or None
"""
if not is_raster_layer(self.layer):
return None
if self.selected_category() != layer_purpose_exposure:
return None
# Only return false if checked, otherwise None for not-set.
if self.chkAllowResample.isChecked():
return False
else:
return None
def set_widgets_step_kw_resample(self):
"""Set widgets on the Resample tab."""
category = self.selected_category()
subcategory = self.selected_subcategory()
layer_mode = self.selected_layermode()
self.lblSelectAllowResample.setText(
allow_resampling_question % (
subcategory['name'], category['name'], layer_mode['name']))
# Set value based on existing keyword (if already assigned)
if self.get_existing_keyword('allow_resampling') is False:
self.chkAllowResample.setChecked(True)
# ===========================
# STEP_KW_CLASSIFY
# ===========================
# noinspection PyMethodMayBeStatic
def update_dragged_item_flags(self, item, column):
"""Fix the drop flag after the item is dropped.
Check if it looks like an item dragged from QListWidget
to QTreeWidget and disable the drop flag.
For some reasons the flag is set when dragging.
:param item:
:param column:
.. note:: This is a slot executed when the item change.
"""
# Treat var as unused
_ = column
if int(item.flags() & QtCore.Qt.ItemIsDropEnabled) \
and int(item.flags() & QtCore.Qt.ItemIsDragEnabled):
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsDropEnabled)
def selected_mapping(self):
"""Obtain the value-to-class mapping set by user.
:returns: The complete mapping as a dict of lists.
:rtype: dict
"""
value_map = {}
tree_clone = self.treeClasses.invisibleRootItem().clone()
for tree_branch in tree_clone.takeChildren():
value_list = []
for tree_leaf in tree_branch.takeChildren():
value_list += [tree_leaf.data(0, QtCore.Qt.UserRole)]
if value_list:
value_map[tree_branch.text(0)] = value_list
return value_map
def set_widgets_step_kw_classify(self):
"""Set widgets on the Classify tab."""
category = self.selected_category()
subcategory = self.selected_subcategory()
classification = self.selected_classification()
default_classes = classification['classes']
if is_raster_layer(self.layer):
self.lblClassify.setText(classify_raster_question % (
subcategory['name'], category['name'], classification['name']))
ds = gdal.Open(self.layer.source(), GA_ReadOnly)
unique_values = numpy.unique(numpy.array(
ds.GetRasterBand(1).ReadAsArray()))
field_type = 0
else:
field = self.selected_field()
field_index = self.layer.dataProvider().fields().indexFromName(
self.selected_field())
field_type = self.layer.dataProvider().fields()[field_index].type()
self.lblClassify.setText(classify_vector_question % (
subcategory['name'], category['name'],
classification['name'], field.upper()))
unique_values = self.layer.uniqueValues(field_index)
# Assign unique values to classes (according to defauls)
unassigned_values = list()
assigned_values = dict()
for default_class in default_classes:
assigned_values[default_class['name']] = list()
for unique_value in unique_values:
if unique_value is None or isinstance(
unique_value, QPyNullVariant):
# Don't classify features with NULL value
continue
value_as_string = unicode(unique_value)
assigned = False
for default_class in default_classes:
condition_1 = (
field_type > 9 and
value_as_string.upper() in [
c.upper() for c in default_class['string_defaults']])
condition_2 = (
field_type < 10 and (
default_class['numeric_default_min'] <= unique_value <=
default_class['numeric_default_max']))
if condition_1 or condition_2:
assigned_values[default_class['name']] += [unique_value]
assigned = True
if not assigned:
# add to unassigned values list otherwise
unassigned_values += [unique_value]
self.populate_classified_values(
unassigned_values, assigned_values, default_classes)
# Overwrite assigned values according to existing keyword (if present).
# Note the default_classes and unique_values are already loaded!
value_map = self.get_existing_keyword('value_map')
# Do not continue if there is no value_map in existing keywords
if value_map is None:
return
# Do not continue if user selected different field
field_keyword = self.field_keyword_for_the_layer()
field = self.get_existing_keyword(field_keyword)
if not is_raster_layer(self.layer) and field != self.selected_field():
return
unassigned_values = list()
assigned_values = dict()
for default_class in default_classes:
assigned_values[default_class['name']] = list()
if isinstance(value_map, str):
try:
value_map = json.loads(value_map)
except ValueError:
return
for unique_value in unique_values:
if unique_value is None or isinstance(
unique_value, QPyNullVariant):
# Don't classify features with NULL value
continue
# check in value map
assigned = False
for key, value_list in value_map.iteritems():
if unique_value in value_list and key in assigned_values:
assigned_values[key] += [unique_value]
assigned = True
if not assigned:
unassigned_values += [unique_value]
self.populate_classified_values(
unassigned_values, assigned_values, default_classes)
def populate_classified_values(
self, unassigned_values, assigned_values, default_classes):
"""Populate lstUniqueValues and treeClasses.from the parameters.
:param unassigned_values: List of values that haven't been assigned
to a class. It will be put in self.lstUniqueValues.
:type unassigned_values: list
:param assigned_values: Dictionary with class as the key and list of
value as the value of the dictionary. It will be put in
self.treeClasses.
:type assigned_values: dict
:param default_classes: Default classes from unit.
:type default_classes: list
"""
# Populate the unique values list
self.lstUniqueValues.clear()
for value in unassigned_values:
value_as_string = value is not None and unicode(value) or 'NULL'
list_item = QtGui.QListWidgetItem(self.lstUniqueValues)
list_item.setFlags(QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsDragEnabled)
list_item.setData(QtCore.Qt.UserRole, value)
list_item.setText(value_as_string)
self.lstUniqueValues.addItem(list_item)
# Populate assigned values tree
self.treeClasses.clear()
bold_font = QtGui.QFont()
bold_font.setItalic(True)
bold_font.setBold(True)
bold_font.setWeight(75)
self.treeClasses.invisibleRootItem().setFlags(
QtCore.Qt.ItemIsEnabled)
for default_class in default_classes:
# Create branch for class
tree_branch = QtGui.QTreeWidgetItem(self.treeClasses)
tree_branch.setFlags(QtCore.Qt.ItemIsDropEnabled |
QtCore.Qt.ItemIsEnabled)
tree_branch.setExpanded(True)
tree_branch.setFont(0, bold_font)
tree_branch.setText(0, default_class['name'])
if 'description' in default_class:
tree_branch.setToolTip(0, default_class['description'])
# Assign known values
for value in assigned_values[default_class['name']]:
string_value = value is not None and unicode(value) or 'NULL'
tree_leaf = QtGui.QTreeWidgetItem(tree_branch)
tree_leaf.setFlags(QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsDragEnabled)
tree_leaf.setData(0, QtCore.Qt.UserRole, value)
tree_leaf.setText(0, string_value)
# ===========================
# STEP_KW_EXTRAKEYWORDS
# ===========================
# noinspection PyPep8Naming
def on_cboExtraKeyword1_currentIndexChanged(self, indx):
"""This is an automatic Qt slot executed when the
1st extra keyword combobox selection changes.
:param indx: The new index.
:type indx: int or str
"""
if isinstance(indx, int) and indx > -1:
self.extra_keyword_changed(self.extra_keywords_widgets[0])
# noinspection PyPep8Naming
def on_cboExtraKeyword2_currentIndexChanged(self, indx):
"""This is an automatic Qt slot executed when the
2nd extra keyword combobox selection changes.
:param indx: The new index.
:type indx: int or str
"""
if isinstance(indx, int) and indx > -1:
self.extra_keyword_changed(self.extra_keywords_widgets[1])
# noinspection PyPep8Naming
def on_cboExtraKeyword3_currentIndexChanged(self, indx):
"""This is an automatic Qt slot executed when the
3rd extra keyword combobox selection changes.
:param indx: The new index.
:type indx: int or str
"""
if isinstance(indx, int) and indx > -1:
self.extra_keyword_changed(self.extra_keywords_widgets[2])
# noinspection PyPep8Naming
def on_cboExtraKeyword4_currentIndexChanged(self, indx):
"""This is an automatic Qt slot executed when the
4th extra keyword combobox selection changes.
:param indx: The new index.
:type indx: int or str
"""
if isinstance(indx, int) and indx > -1:
self.extra_keyword_changed(self.extra_keywords_widgets[3])
# noinspection PyPep8Naming
def on_cboExtraKeyword5_currentIndexChanged(self, indx):
"""This is an automatic Qt slot executed when the
5th extra keyword combobox selection changes.
:param indx: The new index.
:type indx: int or str
"""
if isinstance(indx, int) and indx > -1:
self.extra_keyword_changed(self.extra_keywords_widgets[4])
# noinspection PyPep8Naming
def on_cboExtraKeyword6_currentIndexChanged(self, indx):
"""This is an automatic Qt slot executed when the
6th extra keyword combobox selection changes.
:param indx: The new index.
:type indx: int or str
"""
if isinstance(indx, int) and indx > -1:
self.extra_keyword_changed(self.extra_keywords_widgets[5])
# noinspection PyPep8Naming
def on_cboExtraKeyword7_currentIndexChanged(self, indx):
"""This is an automatic Qt slot executed when the
7th extra keyword combobox selection changes.
:param indx: The new index.
:type indx: int or str
"""
if isinstance(indx, int) and indx > -1:
self.extra_keyword_changed(self.extra_keywords_widgets[6])
# noinspection PyPep8Naming
def on_cboExtraKeyword8_currentIndexChanged(self, indx):
"""This is an automatic Qt slot executed when the
8th extra keyword combobox selection changes.
:param indx: The new index.
:type indx: int or str
"""
if isinstance(indx, int) and indx > -1:
self.extra_keyword_changed(self.extra_keywords_widgets[7])
def extra_keyword_changed(self, widget):
"""Populate slave widget if exists and enable the Next button
if all extra keywords are set.
:param widget: Metadata of the widget where the event happened.
:type widget: dict
"""
if 'slave_key' in widget and widget['slave_key']:
for w in self.extra_keywords_widgets:
if w['key'] == widget['slave_key']:
field_name = widget['cbo'].itemData(
widget['cbo'].currentIndex(), QtCore.Qt.UserRole)
self.populate_value_widget_from_field(w['cbo'], field_name)
self.pbnNext.setEnabled(self.are_all_extra_keywords_selected())
def selected_extra_keywords(self):
"""Obtain the extra keywords selected by user.
:returns: Metadata of the extra keywords.
:rtype: dict, None
"""
extra_keywords = {}
for ekw in self.extra_keywords_widgets:
if ekw['key'] is not None and ekw['cbo'].currentIndex() != -1:
key = ekw['key']
val = ekw['cbo'].itemData(ekw['cbo'].currentIndex(),
QtCore.Qt.UserRole)
extra_keywords[key] = val
return extra_keywords
def are_all_extra_keywords_selected(self):
"""Ensure all all additional keyword are set by user
:returns: True if all additional keyword widgets are set
:rtype: boolean
"""
for ekw in self.extra_keywords_widgets:
if ekw['key'] is not None and ekw['cbo'].currentIndex() == -1:
return False
return True
def populate_value_widget_from_field(self, widget, field_name):
"""Populate the slave widget with unique values of the field
selected in the master widget.
:param widget: The widget to be populated
:type widget: QComboBox
:param field_name: Name of the field to take the values from
:type field_name: str
"""
fields = self.layer.dataProvider().fields()
field_index = fields.indexFromName(field_name)
widget.clear()
for v in self.layer.uniqueValues(field_index):
widget.addItem(unicode(v), unicode(v))
widget.setCurrentIndex(-1)
def set_widgets_step_kw_extrakeywords(self):
"""Set widgets on the Extra Keywords tab."""
# Hide all widgets
for ekw in self.extra_keywords_widgets:
ekw['cbo'].clear()
ekw['cbo'].hide()
ekw['lbl'].hide()
ekw['key'] = None
ekw['master_key'] = None
# Set and show used widgets
extra_keywords = self.additional_keywords_for_the_layer()
for i in range(len(extra_keywords)):
extra_keyword = extra_keywords[i]
extra_keywords_widget = self.extra_keywords_widgets[i]
extra_keywords_widget['key'] = extra_keyword['key']
extra_keywords_widget['lbl'].setText(extra_keyword['description'])
if extra_keyword['type'] == 'value':
field_widget = self.extra_keywords_widgets[i - 1]['cbo']
field_name = field_widget.itemData(
field_widget.currentIndex(), QtCore.Qt.UserRole)
self.populate_value_widget_from_field(
extra_keywords_widget['cbo'], field_name)
else:
for field in self.layer.dataProvider().fields():
field_name = field.name()
field_type = field.typeName()
extra_keywords_widget['cbo'].addItem('%s (%s)' % (
field_name, field_type), field_name)
# If there is a master keyword, attach this widget as a slave
# to the master widget. It's used for values of a given field.
if ('master_keyword' in extra_keyword and
extra_keyword['master_keyword']):
master_key = extra_keyword['master_keyword']['key']
for master_candidate in self.extra_keywords_widgets:
if master_candidate['key'] == master_key:
master_candidate['slave_key'] = extra_keyword['key']
# Show the widget
extra_keywords_widget['cbo'].setCurrentIndex(-1)
extra_keywords_widget['lbl'].show()
extra_keywords_widget['cbo'].show()
# Set values based on existing keywords (if already assigned)
for ekw in self.extra_keywords_widgets:
if not ekw['key']:
continue
value = self.get_existing_keyword(ekw['key'])
indx = ekw['cbo'].findData(value, QtCore.Qt.UserRole)
if indx != -1:
ekw['cbo'].setCurrentIndex(indx)
# ===========================
# STEP_KW_AGGREGATION
# ===========================
# noinspection PyPep8Naming,PyMethodMayBeStatic
def on_cboFemaleRatioAttribute_currentIndexChanged(self):
"""Automatic slot executed when the female ratio attribute is changed.
When the user changes the female ratio attribute
(cboFemaleRatioAttribute), it will change the enabled value of
dsbFemaleRatioDefault. If value is 'Use default', enable
dsbFemaleRatioDefault. Otherwise, disabled it.
"""
value = self.cboFemaleRatioAttribute.currentText()
if value == self.global_default_string:
self.dsbFemaleRatioDefault.setEnabled(True)
else:
self.dsbFemaleRatioDefault.setEnabled(False)
# noinspection PyPep8Naming,PyMethodMayBeStatic
def on_cboYouthRatioAttribute_currentIndexChanged(self):
"""Automatic slot executed when the youth ratio attribute is changed.
When the user changes the youth ratio attribute
(cboYouthRatioAttribute), it will change the enabled value of
dsbYouthRatioDefault. If value is 'Use default', enable
dsbYouthRatioDefault. Otherwise, disabled it.
"""
value = self.cboYouthRatioAttribute.currentText()
if value == self.global_default_string:
self.dsbYouthRatioDefault.setEnabled(True)
else:
self.dsbYouthRatioDefault.setEnabled(False)
# noinspection PyPep8Naming,PyMethodMayBeStatic
def on_cboAdultRatioAttribute_currentIndexChanged(self):
"""Automatic slot executed when the adult ratio attribute is changed.
When the user changes the adult ratio attribute
(cboAdultRatioAttribute), it will change the enabled value of
dsbAdultRatioDefault. If value is 'Use default', enable
dsbAdultRatioDefault. Otherwise, disabled it.
"""
value = self.cboAdultRatioAttribute.currentText()
if value == self.global_default_string:
self.dsbAdultRatioDefault.setEnabled(True)
else:
self.dsbAdultRatioDefault.setEnabled(False)
# noinspection PyPep8Naming,PyMethodMayBeStatic
def on_cboElderlyRatioAttribute_currentIndexChanged(self):
"""Automatic slot executed when the adult ratio attribute is changed.
When the user changes the elderly ratio attribute
(cboElderlyRatioAttribute), it will change the enabled value of
dsbElderlyRatioDefault. If value is 'Use default', enable
dsbElderlyRatioDefault. Otherwise, disabled it.
"""
value = self.cboElderlyRatioAttribute.currentText()
if value == self.global_default_string:
self.dsbElderlyRatioDefault.setEnabled(True)
else:
self.dsbElderlyRatioDefault.setEnabled(False)
def get_aggregation_attributes(self):
"""Obtain the value of aggregation attributes set by user.
:returns: The key and value of aggregation attributes.
:rtype: dict
"""
aggregation_attributes = dict()
current_index = self.cboFemaleRatioAttribute.currentIndex()
data = self.cboFemaleRatioAttribute.itemData(current_index)
aggregation_attributes[female_ratio_attribute_key] = data
value = self.dsbFemaleRatioDefault.value()
aggregation_attributes[female_ratio_default_key] = value
current_index = self.cboYouthRatioAttribute.currentIndex()
data = self.cboYouthRatioAttribute.itemData(current_index)
aggregation_attributes[youth_ratio_attribute_key] = data
value = self.dsbYouthRatioDefault.value()
aggregation_attributes[youth_ratio_default_key] = value
current_index = self.cboAdultRatioAttribute.currentIndex()
data = self.cboAdultRatioAttribute.itemData(current_index)
aggregation_attributes[adult_ratio_attribute_key] = data
value = self.dsbAdultRatioDefault.value()
aggregation_attributes[adult_ratio_default_key] = value
current_index = self.cboElderlyRatioAttribute.currentIndex()
data = self.cboElderlyRatioAttribute.itemData(current_index)
aggregation_attributes[elderly_ratio_attribute_key] = data
value = self.dsbElderlyRatioDefault.value()
aggregation_attributes[elderly_ratio_default_key] = value
return aggregation_attributes
def age_ratios_are_valid(self):
"""Return true if the sum of age ratios is good, otherwise False.
Good means their sum does not exceed 1.
:returns: Tuple of boolean and float. Boolean represent good or not
good, while float represent the summation of age ratio. If some
ratio do not use global default, the summation is set to 0.
:rtype: tuple
"""
youth_ratio_index = self.cboYouthRatioAttribute.currentIndex()
adult_ratio_index = self.cboAdultRatioAttribute.currentIndex()
elderly_ratio_index = self.cboElderlyRatioAttribute.currentIndex()
ratio_indexes = [
youth_ratio_index, adult_ratio_index, elderly_ratio_index]
if ratio_indexes.count(0) == len(ratio_indexes):
youth_ratio_default = self.dsbYouthRatioDefault.value()
adult_ratio_default = self.dsbAdultRatioDefault.value()
elderly_ratio_default = self.dsbElderlyRatioDefault.value()
sum_ratio_default = youth_ratio_default + adult_ratio_default
sum_ratio_default += elderly_ratio_default
if sum_ratio_default > 1:
return False, sum_ratio_default
else:
return True, sum_ratio_default
return True, 0
# noinspection PyUnresolvedReferences,PyStatementEffect
def populate_cbo_aggregation_attribute(
self, ratio_attribute_key, cbo_ratio_attribute):
"""Populate the combo box cbo_ratio_attribute for ratio_attribute_key.
:param ratio_attribute_key: A ratio attribute key that saved in
keywords.
:type ratio_attribute_key: str
:param cbo_ratio_attribute: A combo box that wants to be populated.
:type cbo_ratio_attribute: QComboBox
"""
cbo_ratio_attribute.clear()
ratio_attribute = self.get_existing_keyword(ratio_attribute_key)
fields, attribute_position = layer_attribute_names(
self.layer, [QtCore.QVariant.Double], ratio_attribute)
cbo_ratio_attribute.addItem(
self.global_default_string, self.global_default_data)
cbo_ratio_attribute.addItem(
self.do_not_use_string, self.do_not_use_data)
for field in fields:
cbo_ratio_attribute.addItem(field, field)
# For backward compatibility, still use Use default
if (ratio_attribute == self.global_default_data or
ratio_attribute == self.tr('Use default')):
cbo_ratio_attribute.setCurrentIndex(0)
elif ratio_attribute == self.do_not_use_data:
cbo_ratio_attribute.setCurrentIndex(1)
elif ratio_attribute is None or attribute_position is None:
# current_keyword was not found in the attribute table.
# Use default
cbo_ratio_attribute.setCurrentIndex(0)
else:
# + 2 is because we add use defaults and don't use
cbo_ratio_attribute.setCurrentIndex(attribute_position + 2)
def set_widgets_step_kw_aggregation(self):
"""Set widgets on the aggregation tab."""
# Set values based on existing keywords (if already assigned)
self.defaults = get_defaults()
female_ratio_default = self.get_existing_keyword(
female_ratio_default_key)
if female_ratio_default:
self.dsbFemaleRatioDefault.setValue(
float(female_ratio_default))
else:
self.dsbFemaleRatioDefault.setValue(self.defaults['FEMALE_RATIO'])
youth_ratio_default = self.get_existing_keyword(
youth_ratio_default_key)
if youth_ratio_default:
self.dsbYouthRatioDefault.setValue(float(youth_ratio_default))
else:
self.dsbYouthRatioDefault.setValue(self.defaults['YOUTH_RATIO'])
adult_ratio_default = self.get_existing_keyword(
adult_ratio_default_key)
if adult_ratio_default:
self.dsbAdultRatioDefault.setValue(float(adult_ratio_default))
else:
self.dsbAdultRatioDefault.setValue(self.defaults['ADULT_RATIO'])
elderly_ratio_default = self.get_existing_keyword(
elderly_ratio_default_key)
if elderly_ratio_default:
self.dsbElderlyRatioDefault.setValue(float(elderly_ratio_default))
else:
self.dsbElderlyRatioDefault.setValue(
self.defaults['ELDERLY_RATIO'])
ratio_attribute_keys = [
female_ratio_attribute_key,
youth_ratio_attribute_key,
adult_ratio_attribute_key,
elderly_ratio_attribute_key]
cbo_ratio_attributes = [
self.cboFemaleRatioAttribute,
self.cboYouthRatioAttribute,
self.cboAdultRatioAttribute,
self.cboElderlyRatioAttribute]
for i in range(len(cbo_ratio_attributes)):
self.populate_cbo_aggregation_attribute(
ratio_attribute_keys[i], cbo_ratio_attributes[i])
# ===========================
# STEP_KW_SOURCE
# ===========================
def set_widgets_step_kw_source(self):
"""Set widgets on the Source tab."""
# Just set values based on existing keywords
source = self.get_existing_keyword('source')
if source or source == 0:
self.leSource.setText(get_unicode(source))
source_scale = self.get_existing_keyword('scale')
if source_scale or source_scale == 0:
self.leSource_scale.setText(get_unicode(source_scale))
source_date = self.get_existing_keyword('date')
if source_date or source_date == 0:
self.leSource_date.setText(get_unicode(source_date))
source_url = self.get_existing_keyword('url')
if source_url or source_url == 0:
self.leSource_url.setText(get_unicode(source_url))
source_license = self.get_existing_keyword('license')
if source_license or source_license == 0:
self.leSource_license.setText(get_unicode(source_license))
# ===========================
# STEP_KW_TITLE
# ===========================
# noinspection PyPep8Naming
def on_leTitle_textChanged(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the title value changes.
"""
self.pbnNext.setEnabled(bool(self.leTitle.text()))
def set_widgets_step_kw_title(self):
"""Set widgets on the Title tab."""
# Just set values based on existing keywords
if self.layer:
title = self.layer.name()
self.leTitle.setText(title)
# ===========================
# STEP_FC_FUNCTION_1
# ===========================
def selected_functions_1(self):
"""Obtain functions available for hazard an exposure selected by user.
:returns: List of the available functions metadata.
:rtype: list, None
"""
selection = self.tblFunctions1.selectedItems()
if len(selection) != 1:
return []
try:
return selection[0].data(RoleFunctions)
except (AttributeError, NameError):
return None
def selected_impact_function_constraints(self):
"""Obtain impact function constraints selected by user.
:returns: Tuple of metadata of hazard, exposure,
hazard layer constraints and exposure layer constraints
:rtype: tuple
"""
selection = self.tblFunctions1.selectedItems()
if len(selection) != 1:
return None, None, None, None
h = selection[0].data(RoleHazard)
e = selection[0].data(RoleExposure)
selection = self.tblFunctions2.selectedItems()
if len(selection) != 1:
return h, e, None, None
hc = selection[0].data(RoleHazardConstraint)
ec = selection[0].data(RoleExposureConstraint)
return h, e, hc, ec
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_tblFunctions1_itemSelectionChanged(self):
"""Choose selected hazard x exposure combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
functions = self.selected_functions_1()
if not functions:
self.lblAvailableFunctions1.clear()
else:
txt = self.tr('Available functions:') + ' ' + ', '.join(
[f['name'] for f in functions])
self.lblAvailableFunctions1.setText(txt)
# Clear the selection on the 2nd matrix
self.tblFunctions2.clearContents()
self.lblAvailableFunctions2.clear()
self.pbnNext.setEnabled(True)
# Put a dot to the selected cell - note there is no way
# to center an icon without using a custom ItemDelegate
selection = self.tblFunctions1.selectedItems()
selItem = (len(selection) == 1) and selection[0] or None
for row in range(self.tblFunctions1.rowCount()):
for col in range(self.tblFunctions1.columnCount()):
item = self.tblFunctions1.item(row, col)
item.setText((item == selItem) and u'\u2022' or '')
# pylint: disable=W0613
# noinspection PyPep8Naming
def on_tblFunctions1_cellDoubleClicked(self, row, column):
"""Choose selected hazard x exposure combination and go ahead.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
self.pbnNext.click()
# pylint: enable=W0613
def populate_function_table_1(self):
"""Populate the tblFunctions1 table with available functions."""
# The hazard category radio buttons are now removed -
# make this parameter of IFM.available_hazards() optional
hazard_category = hazard_category_single_event
hazards = self.impact_function_manager\
.available_hazards(hazard_category['key'])
# Remove 'generic' from hazards
for h in hazards:
if h['key'] == 'generic':
hazards.remove(h)
exposures = self.impact_function_manager.available_exposures()
self.lblAvailableFunctions1.clear()
self.tblFunctions1.clear()
self.tblFunctions1.setColumnCount(len(hazards))
self.tblFunctions1.setRowCount(len(exposures))
for i in range(len(hazards)):
h = hazards[i]
item = QtGui.QTableWidgetItem()
item.setIcon(QtGui.QIcon(
resources_path('img', 'wizard', 'keyword-subcategory-%s.svg'
% (h['key'] or 'notset'))))
item.setText(h['name'].capitalize())
self.tblFunctions1.setHorizontalHeaderItem(i, item)
for i in range(len(exposures)):
e = exposures[i]
item = QtGui.QTableWidgetItem()
item.setIcon(QtGui.QIcon(
resources_path('img', 'wizard', 'keyword-subcategory-%s.svg'
% (e['key'] or 'notset'))))
item.setText(e['name'].capitalize())
self.tblFunctions1.setVerticalHeaderItem(i, item)
big_font = QtGui.QFont()
big_font.setPointSize(80)
for h in hazards:
for e in exposures:
item = QtGui.QTableWidgetItem()
functions = \
self.impact_function_manager.functions_for_constraint(
h['key'], e['key'])
if len(functions):
background_colour = QtGui.QColor(120, 255, 120)
else:
background_colour = QtGui.QColor(220, 220, 220)
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsEnabled)
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsSelectable)
item.setBackground(QtGui.QBrush(background_colour))
item.setFont(big_font)
item.setTextAlignment(QtCore.Qt.AlignCenter |
QtCore.Qt.AlignHCenter)
item.setData(RoleFunctions, functions)
item.setData(RoleHazard, h)
item.setData(RoleExposure, e)
self.tblFunctions1.setItem(
exposures.index(e), hazards.index(h), item)
self.pbnNext.setEnabled(False)
def set_widgets_step_fc_function_1(self):
"""Set widgets on the Impact Functions Table 1 tab."""
self.tblFunctions1.horizontalHeader().setResizeMode(
QtGui.QHeaderView.Stretch)
self.tblFunctions1.verticalHeader().setResizeMode(
QtGui.QHeaderView.Stretch)
self.populate_function_table_1()
# ===========================
# STEP_FC_FUNCTION_2
# ===========================
def selected_functions_2(self):
"""Obtain functions available for hazard and exposure selected by user.
:returns: List of the available functions metadata.
:rtype: list, None
"""
selection = self.tblFunctions2.selectedItems()
if len(selection) != 1:
return []
return selection[0].data(RoleFunctions)
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_tblFunctions2_itemSelectionChanged(self):
"""Choose selected hazard x exposure constraints combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
functions = self.selected_functions_2()
if not functions:
self.lblAvailableFunctions2.clear()
else:
text = self.tr('Available functions:') + ' ' + ', '.join(
[f['name'] for f in functions])
self.lblAvailableFunctions2.setText(text)
self.pbnNext.setEnabled(True)
# Put a dot to the selected cell - note there is no way
# to center an icon without using a custom ItemDelegate
selection = self.tblFunctions2.selectedItems()
selItem = (len(selection) == 1) and selection[0] or None
for row in range(self.tblFunctions2.rowCount()):
for col in range(self.tblFunctions2.columnCount()):
item = self.tblFunctions2.item(row, col)
item.setText((item == selItem) and u'\u2022' or '')
# pylint: disable=W0613
# noinspection PyPep8Naming,PyUnusedLocal
def on_tblFunctions2_cellDoubleClicked(self, row, column):
"""Click handler for selecting hazard and exposure constraints.
:param row: The row that the user clicked on.
:type row: int
:param column: The column that the user clicked on.
:type column: int
.. note:: This is an automatic Qt slot executed when the category
selection changes.
"""
self.pbnNext.click()
# pylint: enable=W0613
def set_widgets_step_fc_function_2(self):
"""Set widgets on the Impact Functions Table 2 tab."""
self.tblFunctions2.clear()
h, e, _hc, _ec = self.selected_impact_function_constraints()
hazard_layer_geometries = [
layer_geometry_raster,
layer_geometry_point,
layer_geometry_line,
layer_geometry_polygon]
exposure_layer_geometries = [
layer_geometry_raster,
layer_geometry_point,
layer_geometry_line,
layer_geometry_polygon]
self.lblSelectFunction2.setText(
select_function_constraints2_question % (h['name'], e['name']))
self.tblFunctions2.setColumnCount(len(hazard_layer_geometries))
self.tblFunctions2.setRowCount(len(exposure_layer_geometries))
# self.tblFunctions2.setHorizontalHeaderLabels(
# [i['layer_geometry'].capitalize() if i['layer_mode'] != 'raster'
# else ('%s %s' % (i['layer_geometry'],
# i['layer_mode'])).capitalize()
# for i in hazard_layer_geometries])
# for i in range(len(exposure_layer_geometries)):
# constr = exposure_layer_geometries[i]
# item = QtGui.QTableWidgetItem()
# if constr['layer_mode'] == 'raster':
# text = '%s\n%s' % (constr['layer_geometry'],
# constr['layer_mode'])
# else:
# text = constr['layer_geometry']
# item.setText(text.capitalize())
# item.setTextAlignment(QtCore.Qt.AlignCenter)
# self.tblFunctions2.setVerticalHeaderItem(i, item)
self.tblFunctions2.setHorizontalHeaderLabels(
[i['name'].capitalize() for i in hazard_layer_geometries])
for i in range(len(exposure_layer_geometries)):
item = QtGui.QTableWidgetItem()
item.setText(exposure_layer_geometries[i]['name'].capitalize())
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tblFunctions2.setVerticalHeaderItem(i, item)
self.tblFunctions2.horizontalHeader().setResizeMode(
QtGui.QHeaderView.Stretch)
self.tblFunctions2.verticalHeader().setResizeMode(
QtGui.QHeaderView.Stretch)
big_font = QtGui.QFont()
big_font.setPointSize(80)
active_items = []
for col in range(len(hazard_layer_geometries)):
for row in range(len(exposure_layer_geometries)):
hc = hazard_layer_geometries[col]
ec = exposure_layer_geometries[row]
functions = self.impact_function_manager\
.functions_for_constraint(
h['key'], e['key'], hc['key'], ec['key'])
item = QtGui.QTableWidgetItem()
if len(functions):
bgcolor = QtGui.QColor(120, 255, 120)
active_items += [item]
else:
bgcolor = QtGui.QColor(220, 220, 220)
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsEnabled)
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsSelectable)
item.setBackground(QtGui.QBrush(bgcolor))
item.setFont(big_font)
item.setTextAlignment(QtCore.Qt.AlignCenter |
QtCore.Qt.AlignHCenter)
item.setData(RoleFunctions, functions)
item.setData(RoleHazard, h)
item.setData(RoleExposure, e)
item.setData(RoleHazardConstraint, hc)
item.setData(RoleExposureConstraint, ec)
self.tblFunctions2.setItem(row, col, item)
# Automatically select one item...
if len(active_items) == 1:
active_items[0].setSelected(True)
# set focus, as the inactive selection style is gray
self.tblFunctions2.setFocus()
# ===========================
# STEP_FC_FUNCTION_3
# ===========================
# noinspection PyPep8Naming
def on_lstFunctions_itemSelectionChanged(self):
"""Update function description label
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
imfunc = self.selected_function()
# Exit if no selection
if not imfunc:
self.lblDescribeFunction.clear()
self.pbnNext.setEnabled(False)
# Set the branch description if selected
branch = self.selected_function_group()
if branch and "description" in branch.keys():
self.lblDescribeFunction.setText(branch['description'])
return
# Set description label
description = '<table border="0">'
if "name" in imfunc.keys():
description += '<tr><td><b>%s</b>: </td><td>%s</td></tr>' % (
self.tr('Function'), imfunc['name'])
if "overview" in imfunc.keys():
description += '<tr><td><b>%s</b>: </td><td>%s</td></tr>' % (
self.tr('Overview'), imfunc['overview'])
description += '</table>'
self.lblDescribeFunction.setText(description)
# Enable the next button if anything selected
self.pbnNext.setEnabled(bool(self.selected_function()))
def selected_function(self):
"""Obtain the impact function selected by user.
:returns: metadata of the selected function.
:rtype: dict, None
"""
item = self.lstFunctions.currentItem()
if not item:
return None
data = item.data(QtCore.Qt.UserRole)
if data:
return data
else:
return None
def set_widgets_step_fc_function_3(self):
"""Set widgets on the Impact Functions tab."""
self.lstFunctions.clear()
self.lblDescribeFunction.setText('')
h, e, hc, ec = self.selected_impact_function_constraints()
functions = self.impact_function_manager.functions_for_constraint(
h['key'], e['key'], hc['key'], ec['key'])
self.lblSelectFunction.setText(
select_function_question % (
hc['name'], h['name'], ec['name'], e['name']))
for f in functions:
item = QtGui.QListWidgetItem(self.lstFunctions)
item.setText(f['name'])
item.setData(QtCore.Qt.UserRole, f)
self.auto_select_one_item(self.lstFunctions)
# Set hazard and exposure icons on next steps
icon_path = resources_path('img', 'wizard',
'keyword-subcategory-%s.svg'
% (h['key'] or 'notset'))
self.lblIconFunctionHazard.setPixmap(QPixmap(icon_path))
self.lblIconIFCWHazardOrigin.setPixmap(QPixmap(icon_path))
self.lblIconIFCWHazardFromCanvas.setPixmap(QPixmap(icon_path))
self.lblIconIFCWHazardFromBrowser.setPixmap(QPixmap(icon_path))
icon_path = resources_path('img', 'wizard',
'keyword-subcategory-%s.svg'
% (e['key'] or 'notset'))
self.lblIconFunctionExposure.setPixmap(QPixmap(icon_path))
self.lblIconIFCWExposureOrigin.setPixmap(QPixmap(icon_path))
self.lblIconIFCWExposureFromCanvas.setPixmap(QPixmap(icon_path))
self.lblIconIFCWExposureFromBrowser.setPixmap(QPixmap(icon_path))
icon_path = resources_path('img', 'wizard',
'keyword-category-aggregation.svg')
# Temporarily hide aggregation icon until we have one suitable
# (as requested in a comment to PR #2060)
icon_path = None
self.lblIconIFCWAggregationOrigin.setPixmap(QPixmap(icon_path))
self.lblIconIFCWAggregationFromCanvas.setPixmap(QPixmap(icon_path))
self.lblIconIFCWAggregationFromBrowser.setPixmap(QPixmap(icon_path))
# ===========================
# STEP_FC_HAZLAYER_ORIGIN
# ===========================
# noinspection PyPep8Naming
def on_rbHazLayerFromCanvas_toggled(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the radiobutton is activated.
"""
self.pbnNext.setEnabled(True)
# noinspection PyPep8Naming
def on_rbHazLayerFromBrowser_toggled(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the radiobutton is activated.
"""
self.pbnNext.setEnabled(True)
def is_layer_compatible(self, layer, layer_purpose, keywords=None):
"""Validate if a given layer is compatible for selected IF
as a given layer_purpose
:param layer: The layer to be validated
:type layer: QgsVectorLayer | QgsRasterLayer
:param layer_purpose: The layer_purpose the layer is validated for
:type layer_purpose: string
:param keywords: The layer keywords
:type keywords: None, dict
:returns: True if layer is appropriate for the selected role
:rtype: boolean
"""
# Get allowed subcategory and layer_geometry from IF constraints
h, e, hc, ec = self.selected_impact_function_constraints()
if layer_purpose == 'hazard':
subcategory = h['key']
layer_geometry = hc['key']
elif layer_purpose == 'exposure':
subcategory = e['key']
layer_geometry = ec['key']
else:
# For aggregation layers, use a simplified test and return
if (keywords and 'layer_purpose' in keywords and
keywords['layer_purpose'] == layer_purpose):
return True
if not keywords and is_polygon_layer(layer):
return True
return False
# Compare layer properties with explicitly set constraints
# Reject if layer geometry doesn't match
if layer_geometry != self.get_layer_geometry_id(layer):
return False
# If no keywords, there's nothing more we can check.
# The same if the keywords version doesn't match
if not keywords or 'keyword_version' not in keywords:
return True
keyword_version = str(keywords['keyword_version'])
if compare_version(keyword_version, get_version()) != 0:
return True
# Compare layer keywords with explicitly set constraints
# Reject if layer purpose missing or doesn't match
if ('layer_purpose' not in keywords or
keywords['layer_purpose'] != layer_purpose):
return False
# Reject if layer subcategory doesn't match
if (layer_purpose in keywords and
keywords[layer_purpose] != subcategory):
return False
# Compare layer keywords with the chosen function's constraints
imfunc = self.selected_function()
lay_req = imfunc['layer_requirements'][layer_purpose]
# Reject if layer mode doesn't match
if ('layer_mode' in keywords and
lay_req['layer_mode']['key'] != keywords['layer_mode']):
return False
# Reject if classification doesn't match
classification_key = '%s_%s_classification' % (
'raster' if is_raster_layer(layer) else 'vector',
layer_purpose)
classification_keys = classification_key + 's'
if (lay_req['layer_mode'] == layer_mode_classified and
classification_key in keywords and
classification_keys in lay_req):
allowed_classifications = [
c['key'] for c in lay_req[classification_keys]]
if keywords[classification_key] not in allowed_classifications:
return False
# Reject if unit doesn't match
unit_key = ('continuous_hazard_unit'
if layer_purpose == layer_purpose_hazard['key']
else 'exposure_unit')
unit_keys = unit_key + 's'
if (lay_req['layer_mode'] == layer_mode_continuous and
unit_key in keywords and
unit_keys in lay_req):
allowed_units = [
c['key'] for c in lay_req[unit_keys]]
if keywords[unit_key] not in allowed_units:
return False
# Finally return True
return True
def get_compatible_layers_from_canvas(self, category):
"""Collect compatible layers from map canvas.
.. note:: Returns layers with keywords and layermode matching
the category and compatible with the selected impact function.
Also returns layers without keywords with layermode
compatible with the selected impact function.
:param category: The category to filter for.
:type category: string
:returns: Metadata of found layers.
:rtype: list of dicts
"""
# Collect compatible layers
layers = []
for layer in self.iface.mapCanvas().layers():
try:
keywords = self.keyword_io.read_keywords(layer)
if ('layer_purpose' not in keywords
and 'impact_summary' not in keywords):
keywords = None
except (HashNotFoundError,
OperationalError,
NoKeywordsFoundError,
KeywordNotFoundError,
InvalidParameterError,
UnsupportedProviderError):
keywords = None
if self.is_layer_compatible(layer, category, keywords):
layers += [
{'id': layer.id(),
'name': layer.name(),
'keywords': keywords}]
# Move layers without keywords to the end
l1 = [l for l in layers if l['keywords']]
l2 = [l for l in layers if not l['keywords']]
layers = l1 + l2
return layers
def list_compatible_layers_from_canvas(self, category, list_widget):
"""Fill given list widget with compatible layers.
.. note:: Uses get_compatible_layers_from_canvas() to filter layers
:param category: The category to filter for.
:type category: string
:param list_widget: The list widget to be filled with layers.
:type list_widget: QListWidget
:returns: Metadata of found layers.
:rtype: list of dicts
"""
italic_font = QtGui.QFont()
italic_font.setItalic(True)
# Add compatible layers
list_widget.clear()
for layer in self.get_compatible_layers_from_canvas(category):
item = QListWidgetItem(layer['name'], list_widget)
item.setData(QtCore.Qt.UserRole, layer['id'])
if not layer['keywords']:
item.setFont(italic_font)
list_widget.addItem(item)
def set_widgets_step_fc_hazlayer_origin(self):
"""Set widgets on the Hazard Layer Origin Type tab."""
# First, list available layers in order to check if there are
# any available layers. Note This will be repeated in
# set_widgets_step_fc_hazlayer_from_canvas because we need
# to list them again after coming back from the Keyword Wizard.
self.list_compatible_layers_from_canvas(
'hazard', self.lstCanvasHazLayers)
if self.lstCanvasHazLayers.count():
self.rbHazLayerFromCanvas.setText(tr(
'I would like to use a hazard layer already loaded in QGIS\n'
'(launches the %s for hazard if needed)'
) % self.keyword_creation_wizard_name)
self.rbHazLayerFromCanvas.setEnabled(True)
self.rbHazLayerFromCanvas.click()
else:
self.rbHazLayerFromCanvas.setText(tr(
'I would like to use a hazard layer already loaded in QGIS\n'
'(no suitable layers found)'))
self.rbHazLayerFromCanvas.setEnabled(False)
self.rbHazLayerFromBrowser.click()
# Set the memo labels on this and next (hazard) steps
(hazard,
_,
hazard_constraints,
_) = self.selected_impact_function_constraints()
layer_geometry = hazard_constraints['name']
text = (select_hazard_origin_question % (
layer_geometry,
hazard['name'],
self.selected_function()['name']))
self.lblSelectHazLayerOriginType.setText(text)
text = (select_hazlayer_from_canvas_question % (
layer_geometry,
hazard['name'],
self.selected_function()['name']))
self.lblSelectHazardLayer.setText(text)
text = (select_hazlayer_from_browser_question % (
layer_geometry,
hazard['name'],
self.selected_function()['name']))
self.lblSelectBrowserHazLayer.setText(text)
# ===========================
# STEP_FC_HAZLAYER_FROM_CANVAS
# ===========================
def get_layer_description_from_canvas(self, layer, purpose):
"""Obtain the description of a canvas layer selected by user.
:param layer: The QGIS layer.
:type layer: QgsMapLayer
:param category: The category of the layer to get the description.
:type category: string
:returns: description of the selected layer.
:rtype: string
"""
if not layer:
return ""
try:
keywords = self.keyword_io.read_keywords(layer)
if 'layer_purpose' not in keywords:
keywords = None
except (HashNotFoundError,
OperationalError,
NoKeywordsFoundError,
KeywordNotFoundError,
InvalidParameterError,
UnsupportedProviderError):
keywords = None
# set the current layer (e.g. for the keyword creation sub-thread)
self.layer = layer
if purpose == 'hazard':
self.hazard_layer = layer
elif purpose == 'exposure':
self.exposure_layer = layer
else:
self.aggregation_layer = layer
# Check if the layer is keywordless
if keywords and 'keyword_version' in keywords:
kw_ver = str(keywords['keyword_version'])
self.is_selected_layer_keywordless = bool(
compare_version(kw_ver, get_version()) != 0)
else:
self.is_selected_layer_keywordless = True
desc = self.layer_description_html(layer, keywords)
return desc
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_lstCanvasHazLayers_itemSelectionChanged(self):
"""Update layer description label
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
self.hazard_layer = self.selected_canvas_hazlayer()
lblText = self.get_layer_description_from_canvas(self.hazard_layer,
'hazard')
self.lblDescribeCanvasHazLayer.setText(lblText)
self.pbnNext.setEnabled(True)
def selected_canvas_hazlayer(self):
"""Obtain the canvas layer selected by user.
:returns: The currently selected map layer in the list.
:rtype: QgsMapLayer
"""
if self.lstCanvasHazLayers.selectedItems():
item = self.lstCanvasHazLayers.currentItem()
else:
return None
try:
layer_id = item.data(QtCore.Qt.UserRole)
except (AttributeError, NameError):
layer_id = None
layer = QgsMapLayerRegistry.instance().mapLayer(layer_id)
return layer
def set_widgets_step_fc_hazlayer_from_canvas(self):
"""Set widgets on the Hazard Layer From TOC tab"""
# The list is already populated in the previous step, but now we
# need to do it again in case we're back from the Keyword Wizard.
# First, preserve self.layer before clearing the list
last_layer = self.layer and self.layer.id() or None
self.lblDescribeCanvasHazLayer.clear()
self.list_compatible_layers_from_canvas(
'hazard', self.lstCanvasHazLayers)
self.auto_select_one_item(self.lstCanvasHazLayers)
# Try to select the last_layer, if found:
if last_layer:
layers = []
for indx in xrange(self.lstCanvasHazLayers.count()):
item = self.lstCanvasHazLayers.item(indx)
layers += [item.data(QtCore.Qt.UserRole)]
if last_layer in layers:
self.lstCanvasHazLayers.setCurrentRow(layers.index(last_layer))
# ===========================
# STEP_FC_HAZLAYER_FROM_BROWSER
# ===========================
def pg_path_to_uri(self, path):
"""Convert layer path from QgsBrowserModel to full QgsDataSourceURI
:param path: The layer path from QgsBrowserModel
:type path: string
:returns: layer uri
:rtype: QgsDataSourceURI
"""
conn_name = path.split('/')[1]
schema = path.split('/')[2]
table = path.split('/')[3]
settings = QSettings()
key = "/PostgreSQL/connections/" + conn_name
service = settings.value(key + "/service")
host = settings.value(key + "/host")
port = settings.value(key + "/port")
if not port:
port = "5432"
db = settings.value(key + "/database")
use_estimated_metadata = settings.value(
key + "/estimatedMetadata", False, type=bool)
sslmode = settings.value(
key + "/sslmode", QgsDataSourceURI.SSLprefer, type=int)
username = ""
password = ""
if settings.value(key + "/saveUsername") == "true":
username = settings.value(key + "/username")
if settings.value(key + "/savePassword") == "true":
password = settings.value(key + "/password")
# Old save setting
if settings.contains(key + "/save"):
username = settings.value(key + "/username")
if settings.value(key + "/save") == "true":
password = settings.value(key + "/password")
uri = QgsDataSourceURI()
if service:
uri.setConnection(service, db, username, password, sslmode)
else:
uri.setConnection(host, port, db, username, password, sslmode)
uri.setUseEstimatedMetadata(use_estimated_metadata)
# Obtain the geometry column name
connector = PostGisDBConnector(uri)
tbls = connector.getVectorTables(schema)
tbls = [tbl for tbl in tbls if tbl[1] == table]
# if len(tbls) != 1:
# In the future, also look for raster layers?
# tbls = connector.getRasterTables(schema)
# tbls = [tbl for tbl in tbls if tbl[1]==table]
if not tbls:
return None
tbl = tbls[0]
geom_col = tbl[8]
uri.setDataSource(schema, table, geom_col)
return uri
def layer_description_html(self, layer, keywords=None):
"""Form a html description of a given layer based on the layer
parameters and keywords if provided
:param layer: The layer to get the description
:type layer: QgsMapLayer
:param keywords: The layer keywords
:type keywords: None, dict
:returns: The html description in tabular format,
ready to use in a label or tool tip.
:rtype: str
"""
if keywords and 'keyword_version' in keywords:
keyword_version = str(keywords['keyword_version'])
else:
keyword_version = None
if (keywords and keyword_version and
compare_version(keyword_version, get_version()) == 0):
# The layer has valid keywords
purpose = keywords.get('layer_purpose')
if purpose == layer_purpose_hazard['key']:
subcategory = '<tr><td><b>%s</b>: </td><td>%s</td></tr>' % (
self.tr('Hazard'), keywords.get(purpose))
unit = keywords.get('continuous_hazard_unit')
elif purpose == layer_purpose_exposure['key']:
subcategory = '<tr><td><b>%s</b>: </td><td>%s</td></tr>' % (
self.tr('Exposure'), keywords.get(purpose))
unit = keywords.get('exposure_unit')
else:
subcategory = ''
unit = None
if keywords.get('layer_mode') == layer_mode_classified['key']:
unit = self.tr('classified data')
if unit:
unit = '<tr><td><b>%s</b>: </td><td>%s</td></tr>' % (
self.tr('Unit'), unit)
desc = """
<table border="0" width="100%%">
<tr><td><b>%s</b>: </td><td>%s</td></tr>
<tr><td><b>%s</b>: </td><td>%s</td></tr>
%s
%s
<tr><td><b>%s</b>: </td><td>%s</td></tr>
</table>
""" % (self.tr('Title'), keywords.get('title'),
self.tr('Purpose'), keywords.get('layer_purpose'),
subcategory,
unit,
self.tr('Source'), keywords.get('source'))
elif keywords:
# The layer has keywords, but the version is wrong
desc = self.tr(
'Your layer\'s keyword\'s version (%s) does not match with '
'your InaSAFE version (%s). If you wish to use it as an '
'exposure, hazard, or aggregation layer in an analysis, '
'please update the keywords. Click Next if you want to assign '
'key words now.' % (keyword_version or 'No Version',
get_version()))
else:
# The layer is keywordless
if is_point_layer(layer):
geom_type = 'point'
elif is_polygon_layer(layer):
geom_type = 'polygon'
else:
geom_type = 'line'
# hide password in the layer source
source = re.sub(
r'password=\'.*\'', r'password=*****', layer.source())
desc = """
%s<br/><br/>
<b>%s</b>: %s<br/>
<b>%s</b>: %s<br/><br/>
%s
""" % (self.tr('This layer has no valid keywords assigned'),
self.tr('SOURCE'), source,
self.tr('TYPE'), is_raster_layer(layer) and 'raster' or
'vector (%s)' % geom_type,
self.tr('In the next step you will be able' +
' to assign keywords to this layer.'))
return desc
def unsuitable_layer_description_html(self, layer, layer_purpose,
keywords=None):
"""Form a html description of a given non-matching layer based on
the currently selected impact function requirements vs layer\'s
parameters and keywords if provided, as
:param layer: The layer to be validated
:type layer: QgsVectorLayer | QgsRasterLayer
:param layer_purpose: The layer_purpose the layer is validated for
:type layer_purpose: string
:param keywords: The layer keywords
:type keywords: None, dict
:returns: The html description in tabular format,
ready to use in a label or tool tip.
:rtype: str
"""
def emphasize(str1, str2):
''' Compare two strings and emphasize both if differ '''
if str1 != str2:
str1 = '<i>%s</i>' % str1
str2 = '<i>%s</i>' % str2
return (str1, str2)
# Get allowed subcategory and layer_geometry from IF constraints
h, e, hc, ec = self.selected_impact_function_constraints()
imfunc = self.selected_function()
lay_req = imfunc['layer_requirements'][layer_purpose]
if layer_purpose == 'hazard':
layer_purpose_key_name = layer_purpose_hazard['name']
req_subcategory = h['key']
req_geometry = hc['key']
elif layer_purpose == 'exposure':
layer_purpose_key_name = layer_purpose_exposure['name']
req_subcategory = e['key']
req_geometry = ec['key']
else:
layer_purpose_key_name = layer_purpose_aggregation['name']
req_subcategory = ''
# For aggregation layers, only accept polygons
req_geometry = 'polygon'
req_layer_mode = lay_req['layer_mode']['key']
lay_geometry = self.get_layer_geometry_id(layer)
lay_purpose = ' -'
lay_subcategory = ' -'
lay_layer_mode = ' -'
if keywords:
if 'layer_purpose' in keywords:
lay_purpose = keywords['layer_purpose']
if layer_purpose in keywords:
lay_subcategory = keywords[layer_purpose]
if 'layer_mode' in keywords:
lay_layer_mode = keywords['layer_mode']
lay_geometry, req_geometry = emphasize(lay_geometry, req_geometry)
lay_purpose, layer_purpose = emphasize(lay_purpose, layer_purpose)
lay_subcategory, req_subcategory = emphasize(lay_subcategory,
req_subcategory)
lay_layer_mode, req_layer_mode = emphasize(lay_layer_mode,
req_layer_mode)
# Classification
classification_row = ''
if (lay_req['layer_mode'] == layer_mode_classified and
layer_purpose == 'hazard'):
# Determine the keyword key for the classification
classification_obj = (raster_hazard_classification
if is_raster_layer(layer)
else vector_hazard_classification)
classification_key = classification_obj['key']
classification_key_name = classification_obj['name']
classification_keys = classification_key + 's'
if classification_keys in lay_req:
allowed_classifications = [
c['key'] for c in lay_req[classification_keys]]
req_classifications = ', '.join(allowed_classifications)
lay_classification = ' -'
if classification_key in keywords:
lay_classification = keywords[classification_key]
if lay_classification not in allowed_classifications:
# We already know we want to empasize them and the test
# inside the function will always pass.
lay_classification, req_classifications = emphasize(
lay_classification, req_classifications)
classification_row = (('<tr><td><b>%s</b></td>' +
'<td>%s</td><td>%s</td></tr>')
% (classification_key_name,
lay_classification,
req_classifications))
# Unit
units_row = ''
if lay_req['layer_mode'] == layer_mode_continuous:
# Determine the keyword key for the unit
unit_obj = (continuous_hazard_unit
if layer_purpose == layer_purpose_hazard['key']
else exposure_unit)
unit_key = unit_obj['key']
unit_key_name = unit_obj['name']
unit_keys = unit_key + 's'
if unit_keys in lay_req:
allowed_units = [c['key'] for c in lay_req[unit_keys]]
req_units = ', '.join(allowed_units)
lay_unit = ' -'
if unit_key in keywords:
lay_unit = keywords[unit_key]
if lay_unit not in allowed_units:
# We already know we want to empasize them and the test
# inside the function will always pass.
lay_unit, req_units = emphasize(lay_unit, req_units)
units_row = (('<tr><td><b>%s</b></td>' +
'<td>%s</td><td>%s</td></tr>')
% (unit_key_name, lay_unit, req_units))
html = '''
<table border="0" width="100%%" cellpadding="2">
<tr><td width="33%%"></td>
<td width="33%%"><b>%s</b></td>
<td width="33%%"><b>%s</b></td>
</tr>
<tr><td><b>%s</b></td><td>%s</td><td>%s</td></tr>
<tr><td><b>%s</b></td><td>%s</td><td>%s</td></tr>
<tr><td><b>%s</b></td><td>%s</td><td>%s</td></tr>
<tr><td><b>%s</b></td><td>%s</td><td>%s</td></tr>
%s
%s
</table>
''' % (self.tr('Layer'), self.tr('Required'),
self.tr('Geometry'), lay_geometry, req_geometry,
self.tr('Purpose'), lay_purpose, layer_purpose,
layer_purpose_key_name, lay_subcategory, req_subcategory,
self.tr('Layer mode'), lay_layer_mode, req_layer_mode,
classification_row,
units_row)
return html
def get_layer_description_from_browser(self, category):
"""Obtain the description of the browser layer selected by user.
:param category: The category of the layer to get the description.
:type category: string
:returns: Tuple of boolean and string. Boolean is true if layer is
validated as compatible for current role (impact function and
category) and false otherwise. String contains a description
of the selected layer or an error message.
:rtype: tuple
"""
if category == 'hazard':
browser = self.tvBrowserHazard
elif category == 'exposure':
browser = self.tvBrowserExposure
elif category == 'aggregation':
browser = self.tvBrowserAggregation
else:
raise InaSAFEError
index = browser.selectionModel().currentIndex()
if not index:
return False, ''
# Map the proxy model index to the source model index
index = browser.model().mapToSource(index)
item = browser.model().sourceModel().dataItem(index)
if not item:
return False, ''
item_class_name = item.metaObject().className()
# if not itemClassName.endswith('LayerItem'):
if not item.type() == QgsDataItem.Layer:
if item_class_name == 'QgsPGRootItem' and not item.children():
return False, create_postGIS_connection_first
else:
return False, ''
if item_class_name not in [
'QgsOgrLayerItem', 'QgsLayerItem', 'QgsPGLayerItem']:
return False, ''
path = item.path()
if item_class_name in ['QgsOgrLayerItem',
'QgsLayerItem'] and not os.path.exists(path):
return False, ''
# try to create the layer
if item_class_name == 'QgsOgrLayerItem':
layer = QgsVectorLayer(path, '', 'ogr')
elif item_class_name == 'QgsPGLayerItem':
uri = self.pg_path_to_uri(path)
if uri:
layer = QgsVectorLayer(uri.uri(), uri.table(), 'postgres')
else:
layer = None
else:
layer = QgsRasterLayer(path, '', 'gdal')
if not layer or not layer.isValid():
return False, self.tr('Not a valid layer.')
try:
keywords = self.keyword_io.read_keywords(layer)
if ('layer_purpose' not in keywords
and 'impact_summary' not in keywords):
keywords = None
except (HashNotFoundError,
OperationalError,
NoKeywordsFoundError,
KeywordNotFoundError,
InvalidParameterError,
UnsupportedProviderError):
keywords = None
# set the layer name for further use in the step_fc_summary
if keywords:
layer.setLayerName(keywords.get('title'))
if not self.is_layer_compatible(layer, category, keywords):
label_text = '%s<br/>%s' % (self.tr('This layer\'s keywords ' +
'or type are not suitable:'),
self.unsuitable_layer_description_html(
layer, category, keywords))
return False, label_text
# set the current layer (e.g. for the keyword creation sub-thread
# or for adding the layer to mapCanvas)
self.layer = layer
if category == 'hazard':
self.hazard_layer = layer
elif category == 'exposure':
self.exposure_layer = layer
else:
self.aggregation_layer = layer
# Check if the layer is keywordless
if keywords and 'keyword_version' in keywords:
kw_ver = str(keywords['keyword_version'])
self.is_selected_layer_keywordless = bool(
compare_version(kw_ver, get_version()) != 0)
else:
self.is_selected_layer_keywordless = True
desc = self.layer_description_html(layer, keywords)
return True, desc
# noinspection PyPep8Naming
def tvBrowserHazard_selection_changed(self):
"""Update layer description label"""
(is_compatible, desc) = self.get_layer_description_from_browser(
'hazard')
self.lblDescribeBrowserHazLayer.setText(desc)
self.lblDescribeBrowserHazLayer.setEnabled(is_compatible)
self.pbnNext.setEnabled(is_compatible)
def set_widgets_step_fc_hazlayer_from_browser(self):
"""Set widgets on the Hazard Layer From Browser tab"""
self.tvBrowserHazard_selection_changed()
# ===========================
# STEP_FC_EXPLAYER_ORIGIN
# ===========================
# noinspection PyPep8Naming
def on_rbExpLayerFromCanvas_toggled(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the radiobutton is activated.
"""
self.pbnNext.setEnabled(True)
# noinspection PyPep8Naming
def on_rbExpLayerFromBrowser_toggled(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the radiobutton is activated.
"""
self.pbnNext.setEnabled(True)
def set_widgets_step_fc_explayer_origin(self):
"""Set widgets on the Exposure Layer Origin Type tab"""
# First, list available layers in order to check if there are
# any available layers. Note This will be repeated in
# set_widgets_step_fc_explayer_from_canvas because we need
# to list them again after coming back from the Keyword Wizard.
self.list_compatible_layers_from_canvas(
'exposure', self.lstCanvasExpLayers)
if self.lstCanvasExpLayers.count():
self.rbExpLayerFromCanvas.setText(tr(
'I would like to use an exposure layer already loaded in QGIS'
'\n'
'(launches the %s for exposure if needed)'
) % self.keyword_creation_wizard_name)
self.rbExpLayerFromCanvas.setEnabled(True)
self.rbExpLayerFromCanvas.click()
else:
self.rbExpLayerFromCanvas.setText(tr(
'I would like to use an exposure layer already loaded in QGIS'
'\n'
'(no suitable layers found)'))
self.rbExpLayerFromCanvas.setEnabled(False)
self.rbExpLayerFromBrowser.click()
# Set the memo labels on this and next (exposure) steps
(_,
exposure,
_,
exposure_constraints) = self.selected_impact_function_constraints()
layer_geometry = exposure_constraints['name']
text = (select_exposure_origin_question % (
layer_geometry,
exposure['name'],
self.selected_function()['name']))
self.lblSelectExpLayerOriginType.setText(text)
text = (select_explayer_from_canvas_question % (
layer_geometry,
exposure['name'],
self.selected_function()['name']))
self.lblSelectExposureLayer.setText(text)
text = (select_explayer_from_browser_question % (
layer_geometry,
exposure['name'],
self.selected_function()['name']))
self.lblSelectBrowserExpLayer.setText(text)
# ===========================
# STEP_FC_EXPLAYER_FROM_CANVAS
# ===========================
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_lstCanvasExpLayers_itemSelectionChanged(self):
"""Update layer description label
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
self.exposure_layer = self.selected_canvas_explayer()
lblText = self.get_layer_description_from_canvas(self.exposure_layer,
'exposure')
self.lblDescribeCanvasExpLayer.setText(lblText)
self.pbnNext.setEnabled(True)
def selected_canvas_explayer(self):
"""Obtain the canvas exposure layer selected by user.
:returns: The currently selected map layer in the list.
:rtype: QgsMapLayer
"""
if self.lstCanvasExpLayers.selectedItems():
item = self.lstCanvasExpLayers.currentItem()
else:
return None
try:
layer_id = item.data(QtCore.Qt.UserRole)
except (AttributeError, NameError):
layer_id = None
layer = QgsMapLayerRegistry.instance().mapLayer(layer_id)
return layer
def set_widgets_step_fc_explayer_from_canvas(self):
"""Set widgets on the Exposure Layer From Canvas tab"""
# The list is already populated in the previous step, but now we
# need to do it again in case we're back from the Keyword Wizard.
# First, preserve self.layer before clearing the list
last_layer = self.layer and self.layer.id() or None
self.lblDescribeCanvasExpLayer.clear()
self.list_compatible_layers_from_canvas(
'exposure', self.lstCanvasExpLayers)
self.auto_select_one_item(self.lstCanvasExpLayers)
# Try to select the last_layer, if found:
if last_layer:
layers = []
for indx in xrange(self.lstCanvasExpLayers.count()):
item = self.lstCanvasExpLayers.item(indx)
layers += [item.data(QtCore.Qt.UserRole)]
if last_layer in layers:
self.lstCanvasExpLayers.setCurrentRow(layers.index(last_layer))
# ===========================
# STEP_FC_EXPLAYER_FROM_BROWSER
# ===========================
def tvBrowserExposure_selection_changed(self):
"""Update layer description label"""
(is_compatible, desc) = self.get_layer_description_from_browser(
'exposure')
self.lblDescribeBrowserExpLayer.setText(desc)
self.pbnNext.setEnabled(is_compatible)
def set_widgets_step_fc_explayer_from_browser(self):
"""Set widgets on the Exposure Layer From Browser tab"""
self.tvBrowserExposure_selection_changed()
# ===========================
# STEP_FC_DISJOINT_LAYERS
# ===========================
def layers_intersect(self, layer_a, layer_b):
"""Check if extents of two layers intersect.
:param layer_a: One of the two layers to test overlapping
:type layer_a: QgsMapLayer
:param layer_b: The second of the two layers to test overlapping
:type layer_b: QgsMapLayer
:returns: true if the layers intersect, false if they are disjoint
:rtype: boolean
"""
extent_a = layer_a.extent()
extent_b = layer_b.extent()
if layer_a.crs() != layer_b.crs():
coord_transform = QgsCoordinateTransform(
layer_a.crs(), layer_b.crs())
extent_b = (coord_transform.transform(
extent_b, QgsCoordinateTransform.ReverseTransform))
return extent_a.intersects(extent_b)
def set_widgets_step_fc_disjoint_layers(self):
"""Set widgets on the Disjoint Layers tab"""
pass
# ===========================
# STEP_FC_AGGLAYER_ORIGIN
# ===========================
# noinspection PyPep8Naming
def on_rbAggLayerFromCanvas_toggled(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the radiobutton is activated.
"""
self.pbnNext.setEnabled(True)
# noinspection PyPep8Naming
def on_rbAggLayerFromBrowser_toggled(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the radiobutton is activated.
"""
self.pbnNext.setEnabled(True)
# noinspection PyPep8Naming
def on_rbAggLayerNoAggregation_toggled(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the radiobutton is activated.
"""
self.pbnNext.setEnabled(True)
def set_widgets_step_fc_agglayer_origin(self):
"""Set widgets on the Aggregation Layer Origin Type tab"""
# First, list available layers in order to check if there are
# any available layers. Note This will be repeated in
# set_widgets_step_fc_agglayer_from_canvas because we need
# to list them again after coming back from the Keyword Wizard.
self.list_compatible_layers_from_canvas(
'aggregation', self.lstCanvasAggLayers)
if self.lstCanvasAggLayers.count():
self.rbAggLayerFromCanvas.setText(tr(
'I would like to use an aggregation layer already loaded in '
'QGIS\n'
'(launches the %s for aggregation if needed)'
) % self.keyword_creation_wizard_name)
self.rbAggLayerFromCanvas.setEnabled(True)
self.rbAggLayerFromCanvas.click()
else:
self.rbAggLayerFromCanvas.setText(tr(
'I would like to use an aggregation layer already loaded in '
'QGIS\n'
'(no suitable layers found)'))
self.rbAggLayerFromCanvas.setEnabled(False)
self.rbAggLayerFromBrowser.click()
# ===========================
# STEP_FC_AGGLAYER_FROM_CANVAS
# ===========================
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_lstCanvasAggLayers_itemSelectionChanged(self):
"""Update layer description label
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
self.aggregation_layer = self.selected_canvas_agglayer()
lblText = self.get_layer_description_from_canvas(
self.aggregation_layer, 'aggregation')
self.lblDescribeCanvasAggLayer.setText(lblText)
self.pbnNext.setEnabled(True)
def selected_canvas_agglayer(self):
"""Obtain the canvas aggregation layer selected by user.
:returns: The currently selected map layer in the list.
:rtype: QgsMapLayer
"""
if self.lstCanvasAggLayers.selectedItems():
item = self.lstCanvasAggLayers.currentItem()
else:
return None
try:
layer_id = item.data(QtCore.Qt.UserRole)
except (AttributeError, NameError):
layer_id = None
layer = QgsMapLayerRegistry.instance().mapLayer(layer_id)
return layer
def set_widgets_step_fc_agglayer_from_canvas(self):
"""Set widgets on the Aggregation Layer from Canvas tab"""
# The list is already populated in the previous step, but now we
# need to do it again in case we're back from the Keyword Wizard.
# First, preserve self.layer before clearing the list
last_layer = self.layer and self.layer.id() or None
self.lblDescribeCanvasAggLayer.clear()
self.list_compatible_layers_from_canvas(
'aggregation', self.lstCanvasAggLayers)
self.auto_select_one_item(self.lstCanvasAggLayers)
# Try to select the last_layer, if found:
if last_layer:
layers = []
for indx in xrange(self.lstCanvasAggLayers.count()):
item = self.lstCanvasAggLayers.item(indx)
layers += [item.data(QtCore.Qt.UserRole)]
if last_layer in layers:
self.lstCanvasAggLayers.setCurrentRow(layers.index(last_layer))
# ===========================
# STEP_FC_AGGLAYER_FROM_BROWSER
# ===========================
# noinspection PyPep8Naming
def tvBrowserAggregation_selection_changed(self):
"""Update layer description label"""
(is_compatible, desc) = self.get_layer_description_from_browser(
'aggregation')
self.lblDescribeBrowserAggLayer.setText(desc)
self.pbnNext.setEnabled(is_compatible)
def set_widgets_step_fc_agglayer_from_browser(self):
"""Set widgets on the Aggregation Layer From Browser tab"""
self.tvBrowserAggregation_selection_changed()
# ===========================
# STEP_FC_AGGLAYER_DISJOINT
# ===========================
def set_widgets_step_fc_agglayer_disjoint(self):
"""Set widgets on the Aggregation Layer Disjoint tab"""
pass
# ===========================
# STEP_FC_EXTENT
# ===========================
# noinspection PyPep8Naming
def on_rbExtentUser_toggled(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the radiobutton is activated.
"""
self.pbnNext.setEnabled(True)
# noinspection PyPep8Naming
def on_rbExtentLayer_toggled(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the radiobutton is activated.
"""
self.pbnNext.setEnabled(True)
# noinspection PyPep8Naming
def on_rbExtentScreen_toggled(self):
"""Unlock the Next button
.. note:: This is an automatic Qt slot
executed when the radiobutton is activated.
"""
self.pbnNext.setEnabled(True)
def start_capture_coordinates(self):
"""Enter the coordinate capture mode"""
self.hide()
def stop_capture_coordinates(self):
"""Exit the coordinate capture mode"""
self.extent_dialog._populate_coordinates()
self.extent_dialog.canvas.setMapTool(
self.extent_dialog.previous_map_tool)
self.show()
def set_widgets_step_fc_extent(self):
"""Set widgets on the Extent tab"""
# import here only so that it is AFTER i18n set up
from safe.gui.tools.extent_selector_dialog import ExtentSelectorDialog
self.extent_dialog = ExtentSelectorDialog(
self.iface,
self.iface.mainWindow(),
extent=self.dock.extent.user_extent,
crs=self.dock.extent.user_extent_crs)
self.extent_dialog.tool.rectangle_created.disconnect(
self.extent_dialog.stop_capture)
self.extent_dialog.clear_extent.connect(
self.dock.extent.clear_user_analysis_extent)
self.extent_dialog.extent_defined.connect(
self.dock.define_user_analysis_extent)
self.extent_dialog.capture_button.clicked.connect(
self.start_capture_coordinates)
self.extent_dialog.tool.rectangle_created.connect(
self.stop_capture_coordinates)
self.extent_dialog.label.setText(self.tr(
'Please specify extent of your analysis:'))
if self.swExtent:
self.swExtent.hide()
self.swExtent = self.extent_dialog.stacked_widget
self.layoutAnalysisExtent.addWidget(self.swExtent)
def write_extent(self):
""" After the extent selection,
save the extent and disconnect signals
"""
self.extent_dialog.accept()
self.extent_dialog.clear_extent.disconnect(
self.dock.extent.clear_user_analysis_extent)
self.extent_dialog.extent_defined.disconnect(
self.dock.define_user_analysis_extent)
self.extent_dialog.capture_button.clicked.disconnect(
self.start_capture_coordinates)
self.extent_dialog.tool.rectangle_created.disconnect(
self.stop_capture_coordinates)
# ===========================
# STEP_FC_EXTENT_DISJOINT
# ===========================
def validate_extent(self):
"""Check if the selected extent intersects source data.
:returns: true if extent intersects both layers, false if is disjoint
:rtype: boolean
"""
self.analysis_handler = AnalysisHandler(self)
self.analysis_handler.init_analysis()
try:
self.analysis_handler.analysis.setup_analysis()
except InsufficientOverlapError:
self.analysis_handler = None
return False
self.analysis_handler = None
return True
def set_widgets_step_fc_extent_disjoint(self):
"""Set widgets on the Extent Disjoint tab"""
pass
# ===========================
# STEP_FC_PARAMS
# ===========================
def set_widgets_step_fc_params(self):
"""Set widgets on the Params tab"""
# TODO Put the params to metadata! Now we need to import the IF class.
# Notes: Why don't we store impact_function to class attribute?
impact_function_id = self.selected_function()['id']
impact_function = self.impact_function_manager.get(
impact_function_id)
if not impact_function:
return
self.if_params = None
if hasattr(impact_function, 'parameters'):
self.if_params = impact_function.parameters
text = self.tr(
'Please set impact functions parameters.<br/>Parameters for '
'impact function "%s" that can be modified are:' %
impact_function_id)
self.lblSelectIFParameters.setText(text)
self.parameter_dialog = FunctionOptionsDialog(self)
self.parameter_dialog.set_dialog_info(impact_function_id)
self.parameter_dialog.build_form(self.if_params)
if self.twParams:
self.twParams.hide()
self.twParams = self.parameter_dialog.tabWidget
self.layoutIFParams.addWidget(self.twParams)
# ===========================
# STEP_FC_SUMMARY
# ===========================
def set_widgets_step_fc_summary(self):
"""Set widgets on the Summary tab"""
def format_postprocessor(pp):
""" make nested OrderedDicts more flat"""
if isinstance(pp, OrderedDict):
result = []
for v in pp:
if isinstance(pp[v], OrderedDict):
# omit the v key and unpack the dict directly
result += [u'%s: %s' % (unicode(k), unicode(pp[v][k]))
for k in pp[v]]
else:
result += [u'%s: %s' % (unicode(v), unicode(pp[v]))]
return u', '.join(result)
elif isinstance(pp, list):
result = []
for i in pp:
name = i.serialize()['name']
val = i.serialize()['value']
if isinstance(val, bool):
val = val and self.tr('Enabled') or self.tr('Disabled')
if isinstance(i, GroupParameter):
# val is a list od *Parameter instances
jresult = []
for j in val:
jname = j.serialize()['name']
jval = j.serialize()['value']
if isinstance(jval, bool):
jval = (jval and self.tr('Enabled') or
self.tr('Disabled'))
else:
jval = unicode(jval)
jresult += [u'%s: %s' % (jname, jval)]
val = u', '.join(jresult)
else:
val = unicode(val)
if pp.index(i) == 0:
result += [val]
else:
result += [u'%s: %s' % (name, val)]
return u', '.join(result)
else:
return unicode(pp)
self.if_params = self.parameter_dialog.parse_input(
self.parameter_dialog.values)
# (IS) Set the current impact function to use parameter from user.
# We should do it prettier (put it on analysis or impact calculator
impact_function_id = self.selected_function()['id']
impact_function = self.impact_function_manager.get(
impact_function_id)
if not impact_function:
return
impact_function.parameters = self.if_params
params = []
for p in self.if_params:
if isinstance(self.if_params[p], OrderedDict):
subparams = [
u'<tr><td>%s </td><td>%s</td></tr>' % (
unicode(pp),
format_postprocessor(self.if_params[p][pp]))
for pp in self.if_params[p]
]
if subparams:
subparams = ''.join(subparams)
subparams = '<table border="0">%s</table>' % subparams
elif isinstance(self.if_params[p], list) and p == 'minimum needs':
subparams = ''
for need in self.if_params[p]:
# concatenate all ResourceParameter
name = unicode(need.serialize()['name'])
val = unicode(need.serialize()['value'])
if isinstance(need, ResourceParameter):
if need.unit and need.unit.abbreviation:
val += need.unit.abbreviation
subparams += u'<tr><td>%s </td><td>%s</td></tr>' % (
name, val)
if subparams:
subparams = '<table border="0">%s</table>' % subparams
else:
subparams = 'Not applicable'
elif isinstance(self.if_params[p], list):
subparams = ', '.join([unicode(i) for i in self.if_params[p]])
else:
subparams = unicode(self.if_params[p].serialize()['value'])
params += [(p, subparams)]
if self.aggregation_layer:
aggr = self.aggregation_layer.name()
else:
aggr = self.tr('no aggregation')
html = self.tr('Please ensure the following information '
'is correct and press Run.')
# TODO: update this to use InaSAFE message API rather...
html += '<br/><table cellspacing="4">'
html += ('<tr>'
' <td><b>%s</b></td><td width="10"></td><td>%s</td>'
'</tr><tr>'
' <td colspan="3"></td>'
'</tr><tr>'
' <td><b>%s</b></td><td></td><td>%s</td>'
'</tr><tr>'
' <td><b>%s</b></td><td></td><td>%s</td>'
'</tr><tr>'
' <td><b>%s</b></td><td></td><td>%s</td>'
'</tr><tr>'
' <td colspan="3"></td>'
'</tr>' % (
self.tr('impact function').capitalize().replace(
' ', ' '),
self.selected_function()['name'],
self.tr('hazard layer').capitalize().replace(
' ', ' '),
self.hazard_layer.name(),
self.tr('exposure layer').capitalize().replace(
' ', ' '),
self.exposure_layer.name(),
self.tr('aggregation layer').capitalize().replace(
' ', ' '), aggr))
def humanize(my_string):
"""Humanize string.
:param my_string: A not human friendly string
:type my_string: str
:returns: A human friendly string
:rtype: str
"""
my_string = my_string.replace('_', ' ')
my_string = my_string.capitalize()
return my_string
for p in params:
html += (
'<tr>'
' <td><b>%s</b></td><td></td><td>%s</td>'
'</tr>' % (humanize(p[0]), p[1]))
html += '</table>'
self.lblSummary.setText(html)
# ===========================
# STEP_FC_ANALYSIS
# ===========================
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_pbnReportWeb_released(self):
"""Handle the Open Report in Web Browser button release.
.. note:: This is an automatic Qt slot
executed when the Next button is released.
"""
self.wvResults.open_current_in_browser()
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_pbnReportPDF_released(self):
"""Handle the Generate PDF button release.
.. note:: This is an automatic Qt slot
executed when the Next button is released.
"""
self.analysis_handler.print_map('pdf')
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_pbnReportComposer_released(self):
"""Handle the Open Report in Web Broseer button release.
.. note:: This is an automatic Qt slot
executed when the Next button is released.
"""
self.analysis_handler.print_map('composer')
def setup_and_run_analysis(self):
"""Execute analysis after the tab is displayed"""
# noinspection PyTypeChecker
self.analysis_handler = AnalysisHandler(self)
self.analysis_handler.setup_and_run_analysis()
def set_widgets_step_fc_analysis(self):
"""Set widgets on the Progress tab"""
self.pbProgress.setValue(0)
self.wvResults.setHtml('')
self.pbnReportWeb.hide()
self.pbnReportPDF.hide()
self.pbnReportComposer.hide()
self.lblAnalysisStatus.setText(self.tr('Running analysis...'))
# ===========================
# STEPS NAVIGATION
# ===========================
def go_to_step(self, step):
"""Set the stacked widget to the given step, set up the buttons,
and run all operations that should start immediately after
entering the new step.
:param step: The step number to be moved to.
:type step: int
"""
self.stackedWidget.setCurrentIndex(step - 1)
self.lblStep.clear()
# Disable the Next button unless new data already entered
self.pbnNext.setEnabled(self.is_ready_to_next_step(step))
# Enable the Back button unless it's not the first step
self.pbnBack.setEnabled(
step not in [step_kw_category, step_fc_function_1] or
self.parent_step is not None)
# Set Next button label
if (step in [step_kw_title, step_fc_analysis] and
self.parent_step is None):
self.pbnNext.setText(self.tr('Finish'))
elif step == step_fc_summary:
self.pbnNext.setText(self.tr('Run'))
else:
self.pbnNext.setText(self.tr('Next'))
# Run analysis after switching to the new step
if step == step_fc_analysis:
# self.update_MessageViewer_size()
self.setup_and_run_analysis()
# Set lblSelectCategory label if entering the kw mode
# from the ifcw mode
if step == step_kw_category and self.parent_step:
if self.parent_step in [step_fc_hazlayer_from_canvas,
step_fc_hazlayer_from_browser]:
text_label = category_question_hazard
elif self.parent_step in [step_fc_explayer_from_canvas,
step_fc_explayer_from_browser]:
text_label = category_question_exposure
else:
text_label = category_question_aggregation
self.lblSelectCategory.setText(text_label)
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_pbnNext_released(self):
"""Handle the Next button release.
.. note:: This is an automatic Qt slot
executed when the Next button is released.
"""
current_step = self.get_current_step()
# Save keywords if it's the end of the keyword creation mode
if current_step == step_kw_title:
self.save_current_keywords()
if current_step == step_kw_aggregation:
good_age_ratio, sum_age_ratios = self.age_ratios_are_valid()
if not good_age_ratio:
message = self.tr(
'The sum of age ratio default is %s and it is more '
'than 1. Please adjust the age ratio default so that they '
'will not more than 1.' % sum_age_ratios)
if not self.suppress_warning_dialog:
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
QtGui.QMessageBox.warning(
self, self.tr('InaSAFE'), message)
return
# After any step involving Browser, add selected layer to map canvas
if current_step in [step_fc_hazlayer_from_browser,
step_fc_explayer_from_browser,
step_fc_agglayer_from_browser]:
if not QgsMapLayerRegistry.instance().mapLayersByName(
self.layer.name()):
QgsMapLayerRegistry.instance().addMapLayers([self.layer])
# After the extent selection, save the extent and disconnect signals
if current_step == step_fc_extent:
self.write_extent()
# Determine the new step to be switched
new_step = self.compute_next_step(current_step)
# Prepare the next tab
if new_step == step_kw_category:
self.set_widgets_step_kw_category()
if new_step == step_kw_subcategory:
self.set_widgets_step_kw_subcategory()
if new_step == step_kw_hazard_category:
self.set_widgets_step_kw_hazard_category()
elif new_step == step_kw_layermode:
self.set_widgets_step_kw_layermode()
elif new_step == step_kw_unit:
self.set_widgets_step_kw_unit()
elif new_step == step_kw_classification:
self.set_widgets_step_kw_classification()
elif new_step == step_kw_field:
self.set_widgets_step_kw_field()
elif new_step == step_kw_resample:
self.set_widgets_step_kw_resample()
elif new_step == step_kw_classify:
self.set_widgets_step_kw_classify()
elif new_step == step_kw_extrakeywords:
self.set_widgets_step_kw_extrakeywords()
elif new_step == step_kw_aggregation:
self.set_widgets_step_kw_aggregation()
elif new_step == step_kw_source:
self.set_widgets_step_kw_source()
elif new_step == step_kw_title:
self.set_widgets_step_kw_title()
elif new_step == step_fc_function_1:
self.set_widgets_step_fc_function_1()
elif new_step == step_fc_function_2:
self.set_widgets_step_fc_function_2()
elif new_step == step_fc_function_3:
self.set_widgets_step_fc_function_3()
elif new_step == step_fc_hazlayer_origin:
self.set_widgets_step_fc_hazlayer_origin()
elif new_step == step_fc_hazlayer_from_canvas:
self.set_widgets_step_fc_hazlayer_from_canvas()
elif new_step == step_fc_hazlayer_from_browser:
self.set_widgets_step_fc_hazlayer_from_browser()
elif new_step == step_fc_explayer_origin:
self.set_widgets_step_fc_explayer_origin()
elif new_step == step_fc_explayer_from_canvas:
self.set_widgets_step_fc_explayer_from_canvas()
elif new_step == step_fc_explayer_from_browser:
self.set_widgets_step_fc_explayer_from_browser()
elif new_step == step_fc_disjoint_layers:
self.set_widgets_step_fc_disjoint_layers()
elif new_step == step_fc_agglayer_origin:
self.set_widgets_step_fc_agglayer_origin()
elif new_step == step_fc_agglayer_from_canvas:
self.set_widgets_step_fc_agglayer_from_canvas()
elif new_step == step_fc_agglayer_from_browser:
self.set_widgets_step_fc_agglayer_from_browser()
elif new_step == step_fc_agglayer_disjoint:
self.set_widgets_step_fc_agglayer_disjoint()
elif new_step == step_fc_extent:
self.set_widgets_step_fc_extent()
elif new_step == step_fc_extent_disjoint:
self.set_widgets_step_fc_extent_disjoint()
elif new_step == step_fc_params:
self.set_widgets_step_fc_params()
elif new_step == step_fc_summary:
self.set_widgets_step_fc_summary()
elif new_step == step_fc_analysis:
self.set_widgets_step_fc_analysis()
elif new_step is None:
# Wizard complete
self.accept()
return
else:
# unknown step
pass
self.go_to_step(new_step)
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_pbnBack_released(self):
"""Handle the Back button release.
.. note:: This is an automatic Qt slot
executed when the Back button is released.
"""
current_step = self.get_current_step()
new_step = self.compute_previous_step(current_step)
# set focus to table widgets, as the inactive selection style is gray
if new_step == step_fc_function_1:
self.tblFunctions1.setFocus()
if new_step == step_fc_function_2:
self.tblFunctions2.setFocus()
# Re-connect disconnected signals when coming back to the Extent step
if new_step == step_fc_extent:
self.set_widgets_step_fc_extent()
# Set Next button label
self.pbnNext.setText(self.tr('Next'))
self.pbnNext.setEnabled(True)
self.go_to_step(new_step)
def is_ready_to_next_step(self, step):
"""Check if the step we enter is initially complete. If so, there is
no reason to block the Next button.
:param step: The present step number.
:type step: int
:returns: True if new step may be enabled.
:rtype: bool
"""
if step == step_kw_category:
return bool(self.selected_category())
if step == step_kw_subcategory:
return bool(self.selected_subcategory())
if step == step_kw_hazard_category:
return bool(self.selected_hazard_category())
if step == step_kw_layermode:
return bool(self.selected_layermode())
if step == step_kw_unit:
return bool(self.selected_unit())
if step == step_kw_classification:
return bool(self.selected_classification())
if step == step_kw_field:
return bool(self.selected_field() or not self.lstFields.count())
if step == step_kw_resample:
return True
if step == step_kw_classify:
# Allow to not classify any values
return True
if step == step_kw_extrakeywords:
return self.are_all_extra_keywords_selected()
if step == step_kw_aggregation:
# Not required
return True
if step == step_kw_source:
# The source_* keywords are not required
return True
if step == step_kw_title:
return bool(self.leTitle.text())
if step == step_fc_function_1:
return bool(self.tblFunctions1.selectedItems())
if step == step_fc_function_2:
return bool(self.tblFunctions2.selectedItems())
if step == step_fc_function_3:
return bool(self.selected_function())
if step == step_fc_hazlayer_origin:
return (bool(self.rbHazLayerFromCanvas.isChecked() or
self.rbHazLayerFromBrowser.isChecked()))
if step == step_fc_hazlayer_from_canvas:
return bool(self.selected_canvas_hazlayer())
if step == step_fc_hazlayer_from_browser:
return self.get_layer_description_from_browser('hazard')[0]
if step == step_fc_explayer_origin:
return (bool(self.rbExpLayerFromCanvas.isChecked() or
self.rbExpLayerFromBrowser.isChecked()))
if step == step_fc_explayer_from_canvas:
return bool(self.selected_canvas_explayer())
if step == step_fc_explayer_from_browser:
return self.get_layer_description_from_browser('exposure')[0]
if step == step_fc_disjoint_layers:
# Never go further if layers disjoint
return False
if step == step_fc_agglayer_origin:
return (bool(self.rbAggLayerFromCanvas.isChecked() or
self.rbAggLayerFromBrowser.isChecked() or
self.rbAggLayerNoAggregation.isChecked()))
if step == step_fc_agglayer_from_canvas:
return bool(self.selected_canvas_agglayer())
if step == step_fc_agglayer_from_browser:
return self.get_layer_description_from_browser('aggregation')[0]
if step == step_fc_agglayer_disjoint:
# Never go further if layers disjoint
return False
if step == step_fc_extent:
return True
if step == step_fc_params:
return True
if step == step_fc_summary:
return True
if step == step_fc_analysis:
return True
return True
def compute_next_step(self, current_step):
"""Determine the next step to be switched to.
:param current_step: The present step number.
:type current_step: int
:returns: The next step number or None if finished.
:rtype: int
"""
if current_step == step_kw_category:
if self.selected_category() == layer_purpose_aggregation:
new_step = step_kw_field
else:
new_step = step_kw_subcategory
elif current_step == step_kw_subcategory:
if self.selected_category() == layer_purpose_hazard:
new_step = step_kw_hazard_category
else:
new_step = step_kw_layermode
elif current_step == step_kw_hazard_category:
new_step = step_kw_layermode
elif current_step == step_kw_layermode:
if self.selected_layermode() == layer_mode_classified:
if is_point_layer(self.layer) \
and self.selected_category() == layer_purpose_hazard:
# Skip FIELD and CLASSIFICATION for point volcanos
new_step = step_kw_extrakeywords
elif self.classifications_for_layer():
new_step = step_kw_classification
elif is_raster_layer(self.layer):
new_step = step_kw_extrakeywords
else:
new_step = step_kw_field
else:
# CONTINUOUS DATA, ALL GEOMETRIES
new_step = step_kw_unit
elif current_step == step_kw_unit:
if is_raster_layer(self.layer):
if self.selected_category() == layer_purpose_exposure:
# Only go to resample for continuous raster exposures
new_step = step_kw_resample
else:
new_step = step_kw_extrakeywords
else:
# Currently not used, as we don't have continuous vectors
new_step = step_kw_field
elif current_step == step_kw_classification:
if is_raster_layer(self.layer):
new_step = step_kw_classify
else:
new_step = step_kw_field
elif current_step == step_kw_field:
if self.selected_category() == layer_purpose_aggregation:
new_step = step_kw_aggregation
elif self.selected_layermode() == layer_mode_classified and \
self.classifications_for_layer():
new_step = step_kw_classify
else:
new_step = step_kw_extrakeywords
elif current_step == step_kw_resample:
new_step = step_kw_extrakeywords
elif current_step == step_kw_classify:
new_step = step_kw_extrakeywords
elif current_step == step_kw_extrakeywords:
new_step = step_kw_source
elif current_step == step_kw_aggregation:
new_step = step_kw_source
elif current_step == step_kw_source:
new_step = step_kw_title
elif current_step == step_kw_title:
if self.parent_step:
# Come back to the parent thread
new_step = self.parent_step
self.parent_step = None
self.is_selected_layer_keywordless = False
self.set_mode_label_to_ifcw()
else:
# Wizard complete
new_step = None
elif current_step == step_fc_hazlayer_origin:
if self.rbHazLayerFromCanvas.isChecked():
new_step = step_fc_hazlayer_from_canvas
else:
new_step = step_fc_hazlayer_from_browser
elif current_step in [step_fc_hazlayer_from_canvas,
step_fc_hazlayer_from_browser]:
if self.is_selected_layer_keywordless:
# insert keyword creation thread here
self.parent_step = current_step
self.existing_keywords = None
self.set_mode_label_to_keywords_creation()
new_step = step_kw_category
else:
new_step = step_fc_explayer_origin
elif current_step == step_fc_explayer_origin:
if self.rbExpLayerFromCanvas.isChecked():
new_step = step_fc_explayer_from_canvas
else:
new_step = step_fc_explayer_from_browser
elif current_step in [step_fc_explayer_from_canvas,
step_fc_explayer_from_browser]:
if self.is_selected_layer_keywordless:
# insert keyword creation thread here
self.parent_step = current_step
self.existing_keywords = None
self.set_mode_label_to_keywords_creation()
new_step = step_kw_category
else:
if not self.layers_intersect(self.hazard_layer,
self.exposure_layer):
new_step = step_fc_disjoint_layers
else:
new_step = step_fc_agglayer_origin
elif current_step == step_fc_disjoint_layers:
new_step = step_fc_agglayer_origin
elif current_step == step_fc_agglayer_origin:
if self.rbAggLayerFromCanvas.isChecked():
new_step = step_fc_agglayer_from_canvas
elif self.rbAggLayerFromBrowser.isChecked():
new_step = step_fc_agglayer_from_browser
else:
new_step = step_fc_extent
elif current_step in [step_fc_agglayer_from_canvas,
step_fc_agglayer_from_browser]:
if self.is_selected_layer_keywordless:
# insert keyword creation thread here
self.parent_step = current_step
self.existing_keywords = None
self.set_mode_label_to_keywords_creation()
new_step = step_kw_category
else:
flag = self.layers_intersect(
self.exposure_layer, self.aggregation_layer)
if not flag:
new_step = step_fc_agglayer_disjoint
else:
new_step = step_fc_extent
elif current_step == step_fc_agglayer_disjoint:
new_step = step_fc_extent
elif current_step == step_fc_extent:
if self.validate_extent():
new_step = step_fc_params
else:
new_step = step_fc_extent_disjoint
elif current_step in [step_fc_function_1, step_fc_function_2,
step_fc_function_3,
step_fc_params, step_fc_summary]:
new_step = current_step + 1
elif current_step == step_fc_analysis:
new_step = None # Wizard complete
elif current_step < self.stackedWidget.count():
raise Exception('Unhandled step')
else:
raise Exception('Unexpected number of steps')
# Skip the extra_keywords tab if no extra keywords available:
if (new_step == step_kw_extrakeywords and not
self.additional_keywords_for_the_layer()):
new_step = step_kw_source
return new_step
def compute_previous_step(self, current_step):
"""Determine the previous step to be switched to (by the Back button).
:param current_step: The present step number.
:type current_step: int
:returns: The previous step number.
:rtype: int
"""
if current_step == step_kw_category:
if self.parent_step:
# Come back to the parent thread
self.set_mode_label_to_ifcw()
new_step = self.parent_step
self.parent_step = None
else:
new_step = step_kw_category
elif current_step == step_kw_subcategory:
new_step = step_kw_category
elif current_step == step_kw_hazard_category:
new_step = step_kw_subcategory
elif current_step == step_kw_layermode:
if self.selected_category() == layer_purpose_hazard:
new_step = step_kw_hazard_category
else:
new_step = step_kw_subcategory
elif current_step == step_kw_unit:
new_step = step_kw_layermode
elif current_step == step_kw_classification:
new_step = step_kw_layermode
elif current_step == step_kw_field:
if self.selected_category() == layer_purpose_aggregation:
new_step = step_kw_category
elif self.selected_layermode() == layer_mode_continuous:
new_step = step_kw_unit
elif self.classifications_for_layer():
new_step = step_kw_classification
else:
new_step = step_kw_layermode
elif current_step == step_kw_resample:
new_step = step_kw_unit
elif current_step == step_kw_classify:
if is_raster_layer(self.layer):
new_step = step_kw_classification
else:
new_step = step_kw_field
elif current_step == step_kw_aggregation:
new_step = step_kw_field
elif current_step == step_kw_extrakeywords:
if self.selected_layermode() == layer_mode_classified:
if self.selected_classification():
new_step = step_kw_classify
elif self.selected_field():
new_step = step_kw_field
else:
new_step = step_kw_layermode
else:
if self.selected_allowresampling() is not None:
new_step = step_kw_resample
else:
new_step = step_kw_unit
elif current_step == step_kw_source:
if self.selected_category() == layer_purpose_aggregation:
new_step = step_kw_aggregation
elif self.selected_extra_keywords():
new_step = step_kw_extrakeywords
# otherwise behave like it was step_kw_extrakeywords
elif self.selected_layermode() == layer_mode_classified:
if self.selected_classification():
new_step = step_kw_classify
elif self.selected_field():
new_step = step_kw_field
else:
new_step = step_kw_layermode
else:
if self.selected_allowresampling() is not None:
new_step = step_kw_resample
else:
new_step = step_kw_unit
elif current_step == step_kw_title:
new_step = step_kw_source
elif current_step == step_fc_function_1:
new_step = step_fc_function_1
elif current_step == step_fc_hazlayer_from_browser:
new_step = step_fc_hazlayer_origin
elif current_step == step_fc_explayer_origin:
if self.rbHazLayerFromCanvas.isChecked():
new_step = step_fc_hazlayer_from_canvas
else:
new_step = step_fc_hazlayer_from_browser
elif current_step == step_fc_explayer_from_browser:
new_step = step_fc_explayer_origin
elif current_step == step_fc_disjoint_layers:
if self.rbExpLayerFromCanvas.isChecked():
new_step = step_fc_explayer_from_canvas
else:
new_step = step_fc_explayer_from_browser
elif current_step == step_fc_agglayer_origin:
if self.rbExpLayerFromCanvas.isChecked():
new_step = step_fc_explayer_from_canvas
else:
new_step = step_fc_explayer_from_browser
elif current_step == step_fc_agglayer_from_browser:
new_step = step_fc_agglayer_origin
elif current_step == step_fc_agglayer_disjoint:
if self.rbAggLayerFromCanvas.isChecked():
new_step = step_fc_agglayer_from_canvas
else:
new_step = step_fc_agglayer_from_browser
elif current_step == step_fc_extent:
if self.rbAggLayerFromCanvas.isChecked():
new_step = step_fc_agglayer_from_canvas
elif self.rbAggLayerFromBrowser.isChecked():
new_step = step_fc_agglayer_from_browser
else:
new_step = step_fc_agglayer_origin
elif current_step == step_fc_params:
new_step = step_fc_extent
else:
new_step = current_step - 1
return new_step
# ===========================
# COMMON METHODS
# ===========================
def get_current_step(self):
"""Return current step of the wizard.
:returns: Current step of the wizard.
:rtype: int
"""
return self.stackedWidget.currentIndex() + 1
def get_layer_geometry_id(self, layer=None):
"""Obtain layer mode of a given layer.
If no layer specified, the current layer is used
:param layer : layer to examine
:type layer: QgsMapLayer or None
:returns: The layer mode.
:rtype: str
"""
if not layer:
layer = self.layer
if is_raster_layer(layer):
return 'raster'
elif is_point_layer(layer):
return 'point'
elif is_polygon_layer(layer):
return 'polygon'
else:
return 'line'
def get_existing_keyword(self, keyword):
"""Obtain an existing keyword's value.
:param keyword: A keyword from keywords.
:type keyword: str
:returns: The value of the keyword.
:rtype: str
"""
if self.existing_keywords is None:
return None
if keyword is not None:
return self.existing_keywords.get(keyword, None)
else:
return None
def get_keywords(self):
"""Obtain the state of the dialog as a keywords dict.
:returns: Keywords reflecting the state of the dialog.
:rtype: dict
"""
keywords = {}
keywords['layer_geometry'] = self.get_layer_geometry_id()
if self.selected_category():
keywords['layer_purpose'] = self.selected_category()['key']
if keywords['layer_purpose'] == 'aggregation':
keywords.update(self.get_aggregation_attributes())
if self.selected_subcategory():
key = self.selected_category()['key']
keywords[key] = self.selected_subcategory()['key']
if self.selected_hazard_category():
keywords['hazard_category'] \
= self.selected_hazard_category()['key']
if self.selected_layermode():
keywords['layer_mode'] = self.selected_layermode()['key']
if self.selected_unit():
if self.selected_category() == layer_purpose_hazard:
key = continuous_hazard_unit['key']
else:
key = exposure_unit['key']
keywords[key] = self.selected_unit()['key']
if self.selected_allowresampling() is not None:
keywords['allow_resampling'] = (
self.selected_allowresampling() and 'true' or 'false')
if self.lstFields.currentItem():
field_keyword = self.field_keyword_for_the_layer()
keywords[field_keyword] = self.lstFields.currentItem().text()
if self.selected_classification():
geom = 'raster' if is_raster_layer(self.layer) else 'vector'
key = '%s_%s_classification' % (geom,
self.selected_category()['key'])
keywords[key] = self.selected_classification()['key']
value_map = self.selected_mapping()
if value_map:
keywords['value_map'] = json.dumps(value_map)
extra_keywords = self.selected_extra_keywords()
for key in extra_keywords:
keywords[key] = extra_keywords[key]
if self.leSource.text():
keywords['source'] = get_unicode(self.leSource.text())
if self.leSource_url.text():
keywords['url'] = get_unicode(self.leSource_url.text())
if self.leSource_scale.text():
keywords['scale'] = get_unicode(self.leSource_scale.text())
if self.leSource_date.text():
keywords['date'] = get_unicode(self.leSource_date.text())
if self.leSource_license.text():
keywords['license'] = get_unicode(self.leSource_license.text())
if self.leTitle.text():
keywords['title'] = get_unicode(self.leTitle.text())
return keywords
def save_current_keywords(self):
"""Save keywords to the layer.
It will write out the keywords for the current layer.
This method is based on the KeywordsDialog class.
"""
current_keywords = self.get_keywords()
try:
self.keyword_io.write_keywords(
layer=self.layer, keywords=current_keywords)
except InaSAFEError, e:
error_message = get_error_message(e)
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
QtGui.QMessageBox.warning(
self, self.tr('InaSAFE'),
((self.tr(
'An error was encountered when saving the keywords:\n'
'%s') % error_message.to_html())))
if self.dock is not None:
# noinspection PyUnresolvedReferences
self.dock.get_layers()
# noinspection PyUnresolvedReferences,PyMethodMayBeStatic
def auto_select_one_item(self, list_widget):
"""Select item in the list in list_widget if it's the only item.
:param list_widget: The list widget that want to be checked.
:type list_widget: QListWidget
"""
if list_widget.count() == 1 and list_widget.currentRow() == -1:
list_widget.setCurrentRow(0)
def set_tool_tip(self):
"""Set tool tip as helper text for some objects."""
title_tooltip = self.tr('Title of the layer.')
source_tooltip = self.tr(
'Please record who is the custodian of this layer i.e. '
'OpenStreetMap')
date_tooltip = self.tr(
'When was this data collected or downloaded i.e. 1-May-2014')
scale_tooltip = self.tr('What is the scale of this layer?')
url_tooltip = self.tr(
'Does the custodians have their own website '
'i.e. www.openstreetmap.org')
self.lblTitle.setToolTip(title_tooltip)
self.lblSource.setToolTip(source_tooltip)
self.lblDate.setToolTip(date_tooltip)
self.lblScale.setToolTip(scale_tooltip)
self.lblURL.setToolTip(url_tooltip)
self.leTitle.setToolTip(title_tooltip)
self.leSource.setToolTip(source_tooltip)
self.leSource_date.setToolTip(date_tooltip)
self.leSource_scale.setToolTip(scale_tooltip)
self.leSource_url.setToolTip(url_tooltip)
| gpl-3.0 | -5,908,274,928,395,027,000 | 39.141865 | 79 | 0.583704 | false |
Drahflow/lymph | lymph/web/wsgi_server.py | 11 | 1200 | import logging
from gevent.pywsgi import WSGIServer, WSGIHandler
logger = logging.getLogger(__name__)
class LymphWSGIHandler(WSGIHandler):
def format_request(self):
# XXX(Mouad): Copied shamessly from gevent.pywsgi.WSGIHandler's format_request
# and removed only the datetime from the output, since it's already part of
# lymph logger format.
length = self.response_length or '-'
if self.time_finish:
delta = '%f' % (self.time_finish - self.time_start)
else:
delta = '-'
client_address = self.client_address[0] if isinstance(self.client_address, tuple) else self.client_address
return 'client=%s - - "%s" status=%s length=%s duration=%s (seconds)' % (
client_address or '-',
getattr(self, 'requestline', ''),
(getattr(self, 'status', None) or '000').split()[0],
length,
delta)
def log_request(self):
# XXX(Mouad): Workaround to log correctly in gevent wsgi.
# https://github.com/gevent/gevent/issues/106
logger.info(self.format_request())
class LymphWSGIServer(WSGIServer):
handler_class = LymphWSGIHandler
| apache-2.0 | -946,782,471,492,515,700 | 33.285714 | 114 | 0.624167 | false |
user-none/calibre | src/calibre/ebooks/oeb/polish/tests/base.py | 14 | 3643 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os, unittest, shutil
from calibre import CurrentDir
from calibre.ptempfile import TemporaryDirectory
from calibre.ptempfile import PersistentTemporaryDirectory
from calibre.utils.logging import DevNull
import calibre.ebooks.oeb.polish.container as pc
def get_cache():
from calibre.constants import cache_dir
cache = os.path.join(cache_dir(), 'polish-test')
if not os.path.exists(cache):
os.mkdir(cache)
return cache
def needs_recompile(obj, srcs):
if isinstance(srcs, type('')):
srcs = [srcs]
try:
obj_mtime = os.stat(obj).st_mtime
except OSError:
return True
for src in srcs:
if os.stat(src).st_mtime > obj_mtime:
return True
return False
def build_book(src, dest, args=()):
from calibre.ebooks.conversion.cli import main
main(['ebook-convert', src, dest] + list(args))
def add_resources(raw, rmap):
for placeholder, path in rmap.iteritems():
fname = os.path.basename(path)
shutil.copy2(path, '.')
raw = raw.replace(placeholder, fname)
return raw
def get_simple_book(fmt='epub'):
cache = get_cache()
ans = os.path.join(cache, 'simple.'+fmt)
src = os.path.join(os.path.dirname(__file__), 'simple.html')
if needs_recompile(ans, src):
with TemporaryDirectory('bpt') as tdir:
with CurrentDir(tdir):
raw = open(src, 'rb').read().decode('utf-8')
raw = add_resources(raw, {
'LMONOI': P('fonts/liberation/LiberationMono-Italic.ttf'),
'LMONOR': P('fonts/liberation/LiberationMono-Regular.ttf'),
'IMAGE1': I('marked.png'),
'IMAGE2': I('textures/light_wood.png'),
})
shutil.copy2(I('lt.png'), '.')
x = 'index.html'
with open(x, 'wb') as f:
f.write(raw.encode('utf-8'))
build_book(x, ans, args=[
'--level1-toc=//h:h2', '--language=en', '--authors=Kovid Goyal', '--cover=lt.png'])
return ans
def get_split_book(fmt='epub'):
cache = get_cache()
ans = os.path.join(cache, 'split.'+fmt)
src = os.path.join(os.path.dirname(__file__), 'split.html')
if needs_recompile(ans, src):
x = src.replace('split.html', 'index.html')
raw = open(src, 'rb').read().decode('utf-8')
try:
with open(x, 'wb') as f:
f.write(raw.encode('utf-8'))
build_book(x, ans, args=['--level1-toc=//h:h2', '--language=en', '--authors=Kovid Goyal',
'--cover=' + I('lt.png')])
finally:
os.remove(x)
return ans
devnull = DevNull()
class BaseTest(unittest.TestCase):
longMessage = True
maxDiff = None
def setUp(self):
pc.default_log = devnull
self.tdir = PersistentTemporaryDirectory(suffix='-polish-test')
def tearDown(self):
shutil.rmtree(self.tdir, ignore_errors=True)
del self.tdir
def check_links(self, container):
for name in container.name_path_map:
for link in container.iterlinks(name, get_line_numbers=False):
dest = container.href_to_name(link, name)
if dest:
self.assertTrue(container.exists(dest), 'The link %s in %s does not exist' % (link, name))
| gpl-3.0 | 6,065,995,719,595,907,000 | 33.367925 | 110 | 0.576722 | false |
mojeto/django | django/contrib/gis/db/models/sql/conversion.py | 11 | 2426 | """
This module holds simple classes to convert geospatial values from the
database.
"""
from decimal import Decimal
from django.contrib.gis.db.models.fields import GeoSelectFormatMixin
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
class BaseField:
empty_strings_allowed = True
def get_db_converters(self, connection):
return [self.from_db_value]
def select_format(self, compiler, sql, params):
return sql, params
class AreaField(BaseField):
"Wrapper for Area values."
def __init__(self, area_att=None):
self.area_att = area_att
def from_db_value(self, value, expression, connection, context):
if connection.features.interprets_empty_strings_as_nulls and value == '':
value = None
# If the database returns a Decimal, convert it to a float as expected
# by the Python geometric objects.
if isinstance(value, Decimal):
value = float(value)
# If the units are known, convert value into area measure.
if value is not None and self.area_att:
value = Area(**{self.area_att: value})
return value
def get_internal_type(self):
return 'AreaField'
class DistanceField(BaseField):
"Wrapper for Distance values."
def __init__(self, distance_att):
self.distance_att = distance_att
def from_db_value(self, value, expression, connection, context):
if value is not None:
value = Distance(**{self.distance_att: value})
return value
def get_internal_type(self):
return 'DistanceField'
class GeomField(GeoSelectFormatMixin, BaseField):
"""
Wrapper for Geometry values. It is a lightweight alternative to
using GeometryField (which requires an SQL query upon instantiation).
"""
# Hacky marker for get_db_converters()
geom_type = None
def from_db_value(self, value, expression, connection, context):
if value is not None:
value = Geometry(value)
return value
def get_internal_type(self):
return 'GeometryField'
class GMLField(BaseField):
"""
Wrapper for GML to be used by Oracle to ensure Database.LOB conversion.
"""
def get_internal_type(self):
return 'GMLField'
def from_db_value(self, value, expression, connection, context):
return value
| bsd-3-clause | 3,402,006,066,017,740,300 | 28.228916 | 81 | 0.66488 | false |
abinashk-inf/AstroBox | src/ext/sockjs/tornado/transports/rawwebsocket.py | 9 | 2637 | # -*- coding: utf-8 -*-
"""
sockjs.tornado.transports.rawwebsocket
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Raw websocket transport implementation
"""
import logging
import socket
from sockjs.tornado import websocket, session
from sockjs.tornado.transports import base
LOG = logging.getLogger("tornado.general")
class RawSession(session.BaseSession):
"""Raw session without any sockjs protocol encoding/decoding. Simply
works as a proxy between `SockJSConnection` class and `RawWebSocketTransport`."""
def send_message(self, msg, stats=True, binary=False):
self.handler.send_pack(msg, binary)
def on_message(self, msg):
self.conn.on_message(msg)
class RawWebSocketTransport(websocket.SockJSWebSocketHandler, base.BaseTransportMixin):
"""Raw Websocket transport"""
name = 'rawwebsocket'
def initialize(self, server):
self.server = server
self.session = None
self.active = True
def open(self):
# Stats
self.server.stats.on_conn_opened()
# Disable nagle if needed
if self.server.settings['disable_nagle']:
self.stream.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
# Create and attach to session
self.session = RawSession(self.server.get_connection_class(), self.server)
self.session.set_handler(self)
self.session.verify_state()
def _detach(self):
if self.session is not None:
self.session.remove_handler(self)
self.session = None
def on_message(self, message):
# SockJS requires that empty messages should be ignored
if not message or not self.session:
return
try:
self.session.on_message(message)
except Exception:
LOG.exception('RawWebSocket')
# Close running connection
self.abort_connection()
def on_close(self):
# Close session if websocket connection was closed
if self.session is not None:
# Stats
self.server.stats.on_conn_closed()
session = self.session
self._detach()
session.close()
def send_pack(self, message, binary=False):
# Send message
try:
self.write_message(message, binary)
except IOError:
self.server.io_loop.add_callback(self.on_close)
def session_closed(self):
try:
self.close()
except IOError:
pass
finally:
self._detach()
# Websocket overrides
def allow_draft76(self):
return True
| agpl-3.0 | -5,703,634,699,476,184,000 | 27.354839 | 87 | 0.614714 | false |
rnyberg/pyfibot | pyfibot/modules/available/module_forecast.py | 2 | 1114 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, division
import logging
log = logging.getLogger("forecast")
has_pywapi = False
try:
import pywapi
has_pywapi = True
except:
log.error('Error loading library pywapi. Library not found.')
def fahrenheit_to_celcius(f):
return (int(f) - 32) / 1.8
def command_forecast(bot, user, channel, args):
"""This module tells weather forecast for location"""
if not has_pywapi:
return
result_dict = pywapi.get_weather_from_yahoo(args)
if not all(result_dict.values()):
bot.say(channel, 'unknown location')
return
def format_day(day):
return (u'%s: %s (%.0f°C/%.0f°C)' % (day['day_of_week'],
day['condition'],
fahrenheit_to_celcius(day['low']),
fahrenheit_to_celcius(day['high'])))
answerstr = u'%s: ' % (result_dict['forecast_information']['city'])
answerstr += u", ".join(format_day(day) for day in result_dict['forecasts'])
bot.say(channel, answerstr.encode('utf-8'))
| bsd-3-clause | 5,223,996,139,730,087,000 | 27.512821 | 80 | 0.596223 | false |
jonhadfield/linkchecker | linkcheck/gui/syntax.py | 7 | 3578 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2011-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from PyQt4 import QtCore, QtGui
def format (color, style=''):
"""Return a QTextCharFormat with the given attributes."""
format = QtGui.QTextCharFormat()
format.setForeground(getattr(QtCore.Qt, color))
if 'bold' in style:
format.setFontWeight(QtGui.QFont.Bold)
if 'italic' in style:
format.setFontItalic(True)
return format
class Highlighter (QtGui.QSyntaxHighlighter):
"""Base class for all highlighters."""
def __init__ (self, document):
"""Initialize rules and styles."""
super(Highlighter, self).__init__(document)
self.rules = []
self.styles = {}
def highlightBlock(self, text):
"""Highlight a text block."""
for expression, format in self.rules:
# get first match
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
# jump to next match
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
def addRule (self, pattern, style):
"""Add a rule pattern with given style."""
self.rules.append((QtCore.QRegExp(pattern), self.styles[style]))
class XmlHighlighter (Highlighter):
"""XML syntax highlighter."""
def __init__(self, document):
"""Set XML syntax rules."""
super(XmlHighlighter, self).__init__(document)
self.styles.update({
'keyword': format('darkBlue'),
'attribute': format('darkGreen'),
'comment': format('darkYellow'),
'string': format('darkMagenta'),
})
# keywords
for reg in ('/>', '>', '<!?[a-zA-Z0-9_]+'):
self.addRule(reg, 'keyword')
# attributes
self.addRule(r"\b[A-Za-z0-9_]+(?=\s*\=)", 'attribute')
# double-quoted string, possibly containing escape sequences
self.addRule(r'"[^"\\]*(\\.[^"\\]*)*"', 'string')
# single-quoted string, possibly containing escape sequences
self.addRule(r"'[^'\\]*(\\.[^'\\]*)*'", 'string')
# comments
self.addRule(r"<!--[^>]*-->", 'comment')
# Treat HTML as XML
HtmlHighlighter = XmlHighlighter
class IniHighlighter (Highlighter):
"""INI syntax highlighter."""
def __init__(self, document):
"""Set INI syntax rules."""
super(IniHighlighter, self).__init__(document)
self.styles.update({
'section': format('darkBlue'),
'property': format('darkGreen'),
'comment': format('darkYellow'),
})
self.addRule(r'\b\[[a-zA-Z0-9_]+\]\b', 'section')
self.addRule(r'\b[a-zA-Z0-9_]+\](?=\s*\=)', 'property')
self.addRule(r'#[^\n]*', 'comment')
| gpl-2.0 | 7,163,881,046,305,214,000 | 35.510204 | 73 | 0.604807 | false |
szeged/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/_code/code.py | 32 | 32343 | from __future__ import absolute_import, division, print_function
import inspect
import sys
import traceback
from inspect import CO_VARARGS, CO_VARKEYWORDS
import attr
import re
from weakref import ref
from _pytest.compat import _PY2, _PY3, PY35, safe_str
from six import text_type
import py
builtin_repr = repr
if _PY3:
from traceback import format_exception_only
else:
from ._py2traceback import format_exception_only
class Code(object):
""" wrapper around Python code objects """
def __init__(self, rawcode):
if not hasattr(rawcode, "co_filename"):
rawcode = getrawcode(rawcode)
try:
self.filename = rawcode.co_filename
self.firstlineno = rawcode.co_firstlineno - 1
self.name = rawcode.co_name
except AttributeError:
raise TypeError("not a code object: %r" % (rawcode,))
self.raw = rawcode
def __eq__(self, other):
return self.raw == other.raw
__hash__ = None
def __ne__(self, other):
return not self == other
@property
def path(self):
""" return a path object pointing to source code (note that it
might not point to an actually existing file). """
try:
p = py.path.local(self.raw.co_filename)
# maybe don't try this checking
if not p.check():
raise OSError("py.path check failed.")
except OSError:
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
p = self.raw.co_filename
return p
@property
def fullsource(self):
""" return a _pytest._code.Source object for the full source file of the code
"""
from _pytest._code import source
full, _ = source.findsource(self.raw)
return full
def source(self):
""" return a _pytest._code.Source object for the code object's source only
"""
# return source only for that part of code
import _pytest._code
return _pytest._code.Source(self.raw)
def getargs(self, var=False):
""" return a tuple with the argument names for the code object
if 'var' is set True also return the names of the variable and
keyword arguments when present
"""
# handfull shortcut for getting args
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame(object):
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
def __init__(self, frame):
self.lineno = frame.f_lineno - 1
self.f_globals = frame.f_globals
self.f_locals = frame.f_locals
self.raw = frame
self.code = Code(frame.f_code)
@property
def statement(self):
""" statement this frame is at """
import _pytest._code
if self.code.fullsource is None:
return _pytest._code.Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
""" evaluate 'code' in the frame
'vars' are optional additional local variables
returns the result of the evaluation
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def exec_(self, code, **vars):
""" exec 'code' in the frame
'vars' are optiona; additional local variables
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
py.builtin.exec_(code, self.f_globals, f_locals)
def repr(self, object):
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
"""
return py.io.saferepr(object)
def is_true(self, object):
return object
def getargs(self, var=False):
""" return a list of tuples (name, value) for all arguments
if 'var' is set True also include the variable and keyword
arguments when present
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry(object):
""" a single entry in a traceback """
_repr_style = None
exprinfo = None
def __init__(self, rawentry, excinfo=None):
self._excinfo = excinfo
self._rawentry = rawentry
self.lineno = rawentry.tb_lineno - 1
def set_repr_style(self, mode):
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self):
import _pytest._code
return _pytest._code.Frame(self._rawentry.tb_frame)
@property
def relline(self):
return self.lineno - self.frame.code.firstlineno
def __repr__(self):
return "<TracebackEntry %s:%d>" % (self.frame.code.path, self.lineno + 1)
@property
def statement(self):
""" _pytest._code.Source object for the current statement """
source = self.frame.code.fullsource
return source.getstatement(self.lineno)
@property
def path(self):
""" path to the source code """
return self.frame.code.path
def getlocals(self):
return self.frame.f_locals
locals = property(getlocals, None, None, "locals of underlaying frame")
def getfirstlinesource(self):
# on Jython this firstlineno can be -1 apparently
return max(self.frame.code.firstlineno, 0)
def getsource(self, astcache=None):
""" return failing source code. """
# we use the passed in astcache to not reparse asttrees
# within exception info printing
from _pytest._code.source import getstatementrange_ast
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(
self.lineno, source, astnode=astnode
)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self):
""" return True if the current frame has a var __tracebackhide__
resolving to True
If __tracebackhide__ is a callable, it gets called with the
ExceptionInfo instance and can decide whether to hide the traceback.
mostly for internal use
"""
try:
tbh = self.frame.f_locals["__tracebackhide__"]
except KeyError:
try:
tbh = self.frame.f_globals["__tracebackhide__"]
except KeyError:
return False
if callable(tbh):
return tbh(None if self._excinfo is None else self._excinfo())
else:
return tbh
def __str__(self):
try:
fn = str(self.path)
except py.error.Error:
fn = "???"
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except: # noqa
line = "???"
return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line)
def name(self):
return self.frame.code.raw.co_name
name = property(name, None, None, "co_name of underlaying code")
class Traceback(list):
""" Traceback objects encapsulate and offer higher level
access to Traceback entries.
"""
Entry = TracebackEntry
def __init__(self, tb, excinfo=None):
""" initialize from given python traceback object and ExceptionInfo """
self._excinfo = excinfo
if hasattr(tb, "tb_next"):
def f(cur):
while cur is not None:
yield self.Entry(cur, excinfo=excinfo)
cur = cur.tb_next
list.__init__(self, f(tb))
else:
list.__init__(self, tb)
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
""" return a Traceback instance wrapping part of this Traceback
by provding any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined
this allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback)
"""
for x in self:
code = x.frame.code
codepath = code.path
if (
(path is None or codepath == path)
and (
excludepath is None
or not hasattr(codepath, "relto")
or not codepath.relto(excludepath)
)
and (lineno is None or x.lineno == lineno)
and (firstlineno is None or x.frame.code.firstlineno == firstlineno)
):
return Traceback(x._rawentry, self._excinfo)
return self
def __getitem__(self, key):
val = super(Traceback, self).__getitem__(key)
if isinstance(key, type(slice(0))):
val = self.__class__(val)
return val
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackEntry
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackEntries which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self), self._excinfo)
def getcrashentry(self):
""" return last non-hidden traceback entry that lead
to the exception of a traceback.
"""
for i in range(-1, -len(self) - 1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackEntry where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
# XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
# print "checking for recursion at", key
values = cache.setdefault(key, [])
if values:
f = entry.frame
loc = f.f_locals
for otherloc in values:
if f.is_true(
f.eval(
co_equal,
__recursioncache_locals_1=loc,
__recursioncache_locals_2=otherloc,
)
):
return i
values.append(entry.frame.f_locals)
return None
co_equal = compile(
"__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval"
)
class ExceptionInfo(object):
""" wraps sys.exc_info() objects and offers
help for navigating the traceback.
"""
_striptext = ""
_assert_start_repr = "AssertionError(u'assert " if _PY2 else "AssertionError('assert "
def __init__(self, tup=None, exprinfo=None):
import _pytest._code
if tup is None:
tup = sys.exc_info()
if exprinfo is None and isinstance(tup[1], AssertionError):
exprinfo = getattr(tup[1], "msg", None)
if exprinfo is None:
exprinfo = py.io.saferepr(tup[1])
if exprinfo and exprinfo.startswith(self._assert_start_repr):
self._striptext = "AssertionError: "
self._excinfo = tup
#: the exception class
self.type = tup[0]
#: the exception instance
self.value = tup[1]
#: the exception raw traceback
self.tb = tup[2]
#: the exception type name
self.typename = self.type.__name__
#: the exception traceback (_pytest._code.Traceback instance)
self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self))
def __repr__(self):
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
def exconly(self, tryshort=False):
""" return the exception as a string
when 'tryshort' resolves to True, and the exception is a
_pytest._code._AssertionError, only the actual exception part of
the exception representation is returned (so 'AssertionError: ' is
removed from the beginning)
"""
lines = format_exception_only(self.type, self.value)
text = "".join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext):]
return text
def errisinstance(self, exc):
""" return True if the exception is an instance of exc """
return isinstance(self.value, exc)
def _getreprcrash(self):
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno + 1, exconly)
def getrepr(
self,
showlocals=False,
style="long",
abspath=False,
tbfilter=True,
funcargs=False,
):
""" return str()able representation of this exception info.
showlocals: show locals per traceback entry
style: long|short|no|native traceback style
tbfilter: hide entries (where __tracebackhide__ is true)
in case of style==native, tbfilter and showlocals is ignored.
"""
if style == "native":
return ReprExceptionInfo(
ReprTracebackNative(
traceback.format_exception(
self.type, self.value, self.traceback[0]._rawentry
)
),
self._getreprcrash(),
)
fmt = FormattedExcinfo(
showlocals=showlocals,
style=style,
abspath=abspath,
tbfilter=tbfilter,
funcargs=funcargs,
)
return fmt.repr_excinfo(self)
def __str__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return str(loc)
def __unicode__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return text_type(loc)
def match(self, regexp):
"""
Match the regular expression 'regexp' on the string representation of
the exception. If it matches then True is returned (so that it is
possible to write 'assert excinfo.match()'). If it doesn't match an
AssertionError is raised.
"""
__tracebackhide__ = True
if not re.search(regexp, str(self.value)):
assert 0, "Pattern '{!s}' not found in '{!s}'".format(regexp, self.value)
return True
@attr.s
class FormattedExcinfo(object):
""" presenting information about failing Functions and Generators. """
# for traceback entries
flow_marker = ">"
fail_marker = "E"
showlocals = attr.ib(default=False)
style = attr.ib(default="long")
abspath = attr.ib(default=True)
tbfilter = attr.ib(default=True)
funcargs = attr.ib(default=False)
astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False)
def _getindent(self, source):
# figure out indent for given source
try:
s = str(source.getstatement(len(source) - 1))
except KeyboardInterrupt:
raise
except: # noqa
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except: # noqa
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry):
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def _saferepr(self, obj):
return py.io.saferepr(obj)
def repr_args(self, entry):
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, self._saferepr(argvalue)))
return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False):
""" return formatted and marked up source lines. """
import _pytest._code
lines = []
if source is None or line_index >= len(source.lines):
source = _pytest._code.Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index + 1:]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(self, excinfo, indent=4, markall=False):
lines = []
indent = " " * indent
# get the real exception information out
exlines = excinfo.exconly(tryshort=True).split("\n")
failindent = self.fail_marker + indent[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indent
return lines
def repr_locals(self, locals):
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == "__builtins__":
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
str_repr = self._saferepr(value)
# if len(str_repr) < 70 or not isinstance(value,
# (list, tuple, dict)):
lines.append("%-10s = %s" % (name, str_repr))
# else:
# self._line("%-10s =\\" % (name,))
# # XXX
# pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
def repr_traceback_entry(self, entry, excinfo=None):
import _pytest._code
source = self._getentrysource(entry)
if source is None:
source = _pytest._code.Source("???")
line_index = 0
else:
# entry.getfirstlinesource() can be -1, should be 0 on jython
line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
lines = []
style = entry._repr_style
if style is None:
style = self.style
if style in ("short", "long"):
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" % (entry.name)
else:
message = excinfo and excinfo.typename or ""
path = self._makepath(entry.path)
filelocrepr = ReprFileLocation(path, entry.lineno + 1, message)
localsrepr = None
if not short:
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path):
if not self.abspath:
try:
np = py.path.local().bestrelpath(path)
except OSError:
return path
if len(np) < len(str(path)):
path = np
return path
def repr_traceback(self, excinfo):
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
if is_recursion_error(excinfo):
traceback, extraline = self._truncate_recursive_traceback(traceback)
else:
extraline = None
last = traceback[-1]
entries = []
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
return ReprTraceback(entries, extraline, style=self.style)
def _truncate_recursive_traceback(self, traceback):
"""
Truncate the given recursive traceback trying to find the starting point
of the recursion.
The detection is done by going through each traceback entry and finding the
point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``.
Handle the situation where the recursion process might raise an exception (for example
comparing numpy arrays using equality raises a TypeError), in which case we do our best to
warn the user of the error and show a limited traceback.
"""
try:
recursionindex = traceback.recursionindex()
except Exception as e:
max_frames = 10
extraline = (
"!!! Recursion error detected, but an error occurred locating the origin of recursion.\n"
" The following exception happened when comparing locals in the stack frame:\n"
" {exc_type}: {exc_msg}\n"
" Displaying first and last {max_frames} stack frames out of {total}."
).format(
exc_type=type(e).__name__,
exc_msg=safe_str(e),
max_frames=max_frames,
total=len(traceback),
)
traceback = traceback[:max_frames] + traceback[-max_frames:]
else:
if recursionindex is not None:
extraline = "!!! Recursion detected (same locals & position)"
traceback = traceback[:recursionindex + 1]
else:
extraline = None
return traceback, extraline
def repr_excinfo(self, excinfo):
if _PY2:
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
return ReprExceptionInfo(reprtraceback, reprcrash)
else:
repr_chain = []
e = excinfo.value
descr = None
while e is not None:
if excinfo:
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
else:
# fallback to native repr if the exception doesn't have a traceback:
# ExceptionInfo objects require a full traceback to work
reprtraceback = ReprTracebackNative(
traceback.format_exception(type(e), e, None)
)
reprcrash = None
repr_chain += [(reprtraceback, reprcrash, descr)]
if e.__cause__ is not None:
e = e.__cause__
excinfo = ExceptionInfo(
(type(e), e, e.__traceback__)
) if e.__traceback__ else None
descr = "The above exception was the direct cause of the following exception:"
elif (e.__context__ is not None and not e.__suppress_context__):
e = e.__context__
excinfo = ExceptionInfo(
(type(e), e, e.__traceback__)
) if e.__traceback__ else None
descr = "During handling of the above exception, another exception occurred:"
else:
e = None
repr_chain.reverse()
return ExceptionChainRepr(repr_chain)
class TerminalRepr(object):
def __str__(self):
s = self.__unicode__()
if _PY2:
s = s.encode("utf-8")
return s
def __unicode__(self):
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = py.io.TextIO()
tw = py.io.TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self):
return "<%s instance at %0x>" % (self.__class__, id(self))
class ExceptionRepr(TerminalRepr):
def __init__(self):
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, tw):
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
class ExceptionChainRepr(ExceptionRepr):
def __init__(self, chain):
super(ExceptionChainRepr, self).__init__()
self.chain = chain
# reprcrash and reprtraceback of the outermost (the newest) exception
# in the chain
self.reprtraceback = chain[-1][0]
self.reprcrash = chain[-1][1]
def toterminal(self, tw):
for element in self.chain:
element[0].toterminal(tw)
if element[2] is not None:
tw.line("")
tw.line(element[2], yellow=True)
super(ExceptionChainRepr, self).toterminal(tw)
class ReprExceptionInfo(ExceptionRepr):
def __init__(self, reprtraceback, reprcrash):
super(ReprExceptionInfo, self).__init__()
self.reprtraceback = reprtraceback
self.reprcrash = reprcrash
def toterminal(self, tw):
self.reprtraceback.toterminal(tw)
super(ReprExceptionInfo, self).toterminal(tw)
class ReprTraceback(TerminalRepr):
entrysep = "_ "
def __init__(self, reprentries, extraline, style):
self.reprentries = reprentries
self.extraline = extraline
self.style = style
def toterminal(self, tw):
# the entries might have different styles
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i + 1]
if (
entry.style == "long"
or entry.style == "short"
and next_entry.style == "long"
):
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines):
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
class ReprEntryNative(TerminalRepr):
style = "native"
def __init__(self, tblines):
self.lines = tblines
def toterminal(self, tw):
tw.write("".join(self.lines))
class ReprEntry(TerminalRepr):
localssep = "_ "
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
self.lines = lines
self.reprfuncargs = reprfuncargs
self.reprlocals = reprlocals
self.reprfileloc = filelocrepr
self.style = style
def toterminal(self, tw):
if self.style == "short":
self.reprfileloc.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
# tw.line("")
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
if self.reprlocals:
# tw.sep(self.localssep, "Locals")
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self):
return "%s\n%s\n%s" % ("\n".join(self.lines), self.reprlocals, self.reprfileloc)
class ReprFileLocation(TerminalRepr):
def __init__(self, path, lineno, message):
self.path = str(path)
self.lineno = lineno
self.message = message
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors unterstand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.write(self.path, bold=True, red=True)
tw.line(":%s: %s" % (self.lineno, msg))
class ReprLocals(TerminalRepr):
def __init__(self, lines):
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
class ReprFuncArgs(TerminalRepr):
def __init__(self, args):
self.args = args
def toterminal(self, tw):
if self.args:
linesofar = ""
for name, value in self.args:
ns = "%s = %s" % (safe_str(name), safe_str(value))
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
def getrawcode(obj, trycall=True):
""" return code object for given function. """
try:
return obj.__code__
except AttributeError:
obj = getattr(obj, "im_func", obj)
obj = getattr(obj, "func_code", obj)
obj = getattr(obj, "f_code", obj)
obj = getattr(obj, "__code__", obj)
if trycall and not hasattr(obj, "co_firstlineno"):
if hasattr(obj, "__call__") and not inspect.isclass(obj):
x = getrawcode(obj.__call__, trycall=False)
if hasattr(x, "co_firstlineno"):
return x
return obj
if PY35: # RecursionError introduced in 3.5
def is_recursion_error(excinfo):
return excinfo.errisinstance(RecursionError) # noqa
else:
def is_recursion_error(excinfo):
if not excinfo.errisinstance(RuntimeError):
return False
try:
return "maximum recursion depth exceeded" in str(excinfo.value)
except UnicodeError:
return False
| mpl-2.0 | 756,837,724,535,387,500 | 32.343299 | 117 | 0.554834 | false |
csdl/makahiki | makahiki/apps/managers/predicate_mgr/predicate_mgr.py | 2 | 15003 | '''Manager for predicates.
Created on Jun 15, 2013
@author: Cam Moore
'''
import sys
import inspect
from apps.managers.challenge_mgr.models import RoundSetting, GameInfo
from apps.widgets.smartgrid_library.models import LibraryEvent, LibraryAction
from apps.widgets.smartgrid_design.models import DesignerLevel, DesignerEvent, DesignerAction
# Used to build the unlock_conditions
(_AND, _OR, _NOT, _TRUE, _FALSE) = ('and', 'or', 'not', 'True', 'False')
def eval_predicates(predicates, user):
"""Returns the boolean evaluation result of the predicates against the user."""
ALLOW_DICT = {"True": True, "False": False, "user": user}
ALLOW_DICT.update(get_player_predicates())
ALLOW_DICT.update(get_challenge_predicates())
ALLOW_DICT.update(get_smartgrid_predicates())
for key in ALLOW_DICT:
if "%s(" % key in predicates:
predicates = predicates.replace("%s(" % key, "%s(user," % key)
return eval(predicates, {"__builtins__": None}, ALLOW_DICT)
def eval_play_tester_predicates(predicates, user, draft_slug):
"""Returns the boolean evaluation results of the tester predicates against the user."""
ALLOW_DICT = {"True": True, "False": False, "user": user}
ALLOW_DICT.update(get_smartgrid_tester_predicates())
for key in ALLOW_DICT:
if "%s(" % key in predicates:
predicates = predicates.replace("%s(" % key, "%s('%s', " % (key, draft_slug))
ALLOW_DICT.update(get_player_tester_predicates())
ALLOW_DICT.update(get_challenge_tester_predicates())
for key in ALLOW_DICT:
if "%s(" % key in predicates:
predicates = predicates.replace("%s(" % key, "%s(user," % key)
return eval(predicates, {"__builtins__": None}, ALLOW_DICT)
def get_action_slugs(draft):
"""Returns a list of all the slugs available in the given draft. This includes all the
LibraryAction slugs and any new action slugs in the draft."""
ret = get_library_action_slugs()
for action in DesignerAction.objects.filter(draft=draft):
if action.slug not in ret:
ret.append(action.slug)
return sorted(ret)
def get_action_types():
"""Returns a list of the possible action types."""
return ('activity', 'commitment', 'event')
def get_challenge_predicates():
"""Returns the challenge predicates as a dictionary whose keys are the names of the predicates
and the values are the predicate functions."""
from apps.managers.predicate_mgr.challenge_predicates import game_enabled, reached_round
return {
"game_enabled": game_enabled,
"reached_round": reached_round,
}
def reached_round_tester():
"""Tester predicate replacement for challenge_mgr.predicates.reached_round."""
return True
def get_challenge_tester_predicates():
"""Returns the tester challenge predicates."""
from apps.managers.predicate_mgr.challenge_tester_predicates import game_enabled, reached_round
return {
"game_enabled": game_enabled,
"reached_round": reached_round,
}
def get_defined_predicates():
"""Returns the predicates defined in Makahiki as a dictionary."""
ret = {}
ret.update(get_player_predicates())
ret.update(get_challenge_predicates())
ret.update(get_smartgrid_predicates())
return ret
def get_event_slugs(draft):
"""Returns a list of all the Event slugs available in the given draft."""
ret = get_library_event_slugs()
for event in DesignerEvent.objects.filter(draft=draft):
if event.slug not in ret:
ret.append(event.slug)
return ret
def get_game_names():
"""Returns a list of all the game names."""
ret = []
for info in GameInfo.objects.all():
if info.name not in ret:
ret.append(info.name)
return ret
def get_level_names(draft):
"""Returns a list of all the level names defined in the given draft."""
ret = []
for level in DesignerLevel.objects.filter(draft=draft):
if level.name not in ret:
ret.append(level.name)
return ret
def get_library_action_slugs():
"""Returns a list of the LibraryAction slugs."""
ret = []
for action in LibraryAction.objects.all():
if action.slug not in ret:
ret.append(action.slug)
return ret
def get_library_event_slugs():
"""Returns a list of all the LibraryEvent slugs."""
ret = []
for event in LibraryEvent.objects.all():
if event.slug not in ret:
ret.append(event.slug)
return ret
def get_player_predicates():
"""Returns the predicates associated with players as a dictionary whose keys are the names
of the predicates and values are the predicate functions."""
from apps.managers.predicate_mgr.player_predicates import has_points, is_admin, \
allocated_raffle_ticket, badge_awarded, posted_to_wall, set_profile_pic, daily_visit_count, \
changed_theme, daily_energy_goal_count, referring_count, team_member_point_percent
return {
"is_admin": is_admin,
"has_points": has_points,
"allocated_raffle_ticket": allocated_raffle_ticket,
"badge_awarded": badge_awarded,
"posted_to_wall": posted_to_wall,
"set_profile_pic": set_profile_pic,
"daily_visit_count": daily_visit_count,
"change_theme": changed_theme,
"changed_theme": changed_theme,
"daily_energy_goal_count": daily_energy_goal_count,
"referring_count": referring_count,
"team_member_point_percent": team_member_point_percent,
}
def get_player_tester_predicates():
"""Returns the tester predicates associated with players. This is the same
get_player_predicates()."""
return get_player_predicates()
def get_predicate_parameter_types(predicate_str):
"""Returns a list of the parameter types for the given predicate_str."""
preds = get_defined_predicates()
try:
return inspect.getargspec(preds[predicate_str]).args
except KeyError:
return []
def get_resources():
"""Returns a list of the possible resource choices."""
return ('energy', 'water', 'waste')
def get_round_names():
"""Returns a list of the defined round names."""
ret = []
for r in RoundSetting.objects.all():
if r.name not in ret:
ret.append(r.name)
return ret
def get_smartgrid_predicates(): # pylint: disable=R0914
"""Returns the SmartGrid predicates as a dictionary whose keys are the names of the predicates
and the values are the predicate functions."""
from apps.managers.predicate_mgr.smartgrid_predicates import approved_action, \
approved_all_of_level, approved_all_of_resource, approved_all_of_type, approved_some, \
approved_some_full_spectrum, approved_some_of_level, approved_some_of_resource, \
approved_some_of_type, completed_level, social_bonus_count, submitted_action, \
submitted_all_of_level, submitted_all_of_resource, submitted_all_of_type, submitted_level, \
submitted_some, submitted_some_full_spectrum, submitted_some_of_level, \
submitted_some_of_resource, submitted_some_of_type, unlock_on_date, unlock_on_event
return {
"approved_action": approved_action,
"approved_all_of_level": approved_all_of_level,
"approved_all_of_resource": approved_all_of_resource,
"approved_all_of_type": approved_all_of_type,
"approved_some": approved_some,
"approved_some_full_spectrum": approved_some_full_spectrum,
"approved_some_of_level": approved_some_of_level,
"approved_some_of_resource": approved_some_of_resource,
"approved_some_of_type": approved_some_of_type,
"completed_action": submitted_action,
"completed_level": completed_level,
"completed_some_of": submitted_some_of_type,
"completed_some_of_level": submitted_some_of_level,
"social_bonus_count": social_bonus_count,
"submitted_action": submitted_action,
"submitted_all_of_level": submitted_all_of_level,
"submitted_all_of_resource": submitted_all_of_resource,
"submitted_all_of_type": submitted_all_of_type,
"submitted_level": submitted_level,
"submitted_some": submitted_some,
"submitted_some_full_spectrum": submitted_some_full_spectrum,
"submitted_some_of_level": submitted_some_of_level,
"submitted_some_of_resource": submitted_some_of_resource,
"submitted_some_of_type": submitted_some_of_type,
"unlock_on_date": unlock_on_date,
"unlock_on_event": unlock_on_event,
} # pylint: enable=R0914
def get_smartgrid_tester_predicates(): # pylint: disable=R0914
"""Returns the tester smartgrid predicates."""
from apps.managers.predicate_mgr.smartgrid_tester_predicates import approved_action, \
approved_all_of_level, approved_all_of_resource, approved_all_of_type, approved_some, \
approved_some_full_spectrum, approved_some_of_level, approved_some_of_resource, \
approved_some_of_type, completed_level, social_bonus_count, submitted_action, \
submitted_all_of_level, submitted_all_of_resource, submitted_all_of_type, submitted_level, \
submitted_some, submitted_some_full_spectrum, submitted_some_of_level, \
submitted_some_of_resource, submitted_some_of_type, unlock_on_date, unlock_on_event
return {
"approved_action": approved_action,
"approved_all_of_level": approved_all_of_level,
"approved_all_of_resource": approved_all_of_resource,
"approved_all_of_type": approved_all_of_type,
"approved_some": approved_some,
"approved_some_full_spectrum": approved_some_full_spectrum,
"approved_some_of_level": approved_some_of_level,
"approved_some_of_resource": approved_some_of_resource,
"approved_some_of_type": approved_some_of_type,
"completed_action": submitted_action,
"completed_level": completed_level,
"completed_some_of": submitted_some_of_type,
"completed_some_of_level": submitted_some_of_level,
"social_bonus_count": social_bonus_count,
"submitted_action": submitted_action,
"submitted_all_of_level": submitted_all_of_level,
"submitted_all_of_resource": submitted_all_of_resource,
"submitted_all_of_type": submitted_all_of_type,
"submitted_level": submitted_level,
"submitted_some": submitted_some,
"submitted_some_full_spectrum": submitted_some_full_spectrum,
"submitted_some_of_level": submitted_some_of_level,
"submitted_some_of_resource": submitted_some_of_resource,
"submitted_some_of_type": submitted_some_of_type,
"unlock_on_date": unlock_on_date,
"unlock_on_event": unlock_on_event,
} # pylint: enable=R0914
def get_smartgrid_unlock_predicates():
"""Returns the suggested predicates for Smartgrid Action unlock conditions."""
from apps.managers.predicate_mgr.smartgrid_predicates import approved_action, \
submitted_action, unlock_on_date, unlock_on_event
from apps.managers.predicate_mgr.player_predicates import has_points
return {
"submitted_action": submitted_action,
"approved_action": approved_action,
"has_points": has_points,
"unlock_on_date": unlock_on_date,
"unlock_on_event": unlock_on_event,
}
def get_smartgrid_unlock_predicate_list():
"""Returns the suggested Smartgrid unlock condition predicate list."""
ret = []
ret.append('submitted_action')
ret.append('approved_action')
ret.append('has_points')
ret.append('unlock_on_date')
ret.append('unlock_on_event')
return ret
def is_action_slug_predicate(predicate_fn):
"""Returns true if the predicate_fn takes parameter that is an Action slug."""
return 'action_slug' in inspect.getargspec(predicate_fn).args
def is_action_type_predicate(predicate_fn):
"""Returns True if the predicate_fn takes an action_type parameter."""
return 'action_type' in inspect.getargspec(predicate_fn).args
def is_event_slug_predicate(predicate_fn):
"""Returns True if the predicated_fn takes a parameter that is an event_slug."""
return 'event_slug' in inspect.getargspec(predicate_fn).args
def is_game_name_predicate(predicate_fn):
"""Returns True if the predicate_fn takes a game_name parameter."""
return 'game_name' in inspect.getargspec(predicate_fn).args
def is_level_name_predicate(predicate_fn):
"""Returns True if the predicate_fn takes a level_name parameter."""
return 'level_name' in inspect.getargspec(predicate_fn).args
def is_predicate_name(name):
"""Returns True if the given name is a valid predicate function name."""
predicates = get_defined_predicates()
if name in predicates.keys():
return True
else:
return False
def is_resource_predicate(predicate_fn):
"""Returns True if the predicate_fn takes a resource parameter."""
return 'resource' in inspect.getargspec(predicate_fn).args
def is_round_name_predicate(predicate_fn):
"""Returns True if the predicate_fn takes a round_name parameter."""
return 'round_name' in inspect.getargspec(predicate_fn).args
def validate_form_predicates(predicates):
"""validate the predicates in a form. if error, raise the form validation error."""
from django import forms
from django.contrib.auth.models import User
# Pick a user and see if the conditions result is true or false.
user = User.objects.all()[0]
try:
result = eval_predicates(predicates, user)
# Check if the result type is a boolean
if type(result) != type(True):
raise forms.ValidationError("Expected boolean value but got %s" % type(result))
except Exception:
info = sys.exc_info()
if len(info) > 1:
raise forms.ValidationError("Received exception: %s:%s" % (sys.exc_info()[0],
sys.exc_info()[1]))
else:
raise forms.ValidationError("Received exception: %s" % sys.exc_info()[0])
def validate_predicates(predicates):
"""Validate the predicates string."""
from django.contrib.auth.models import User
error_msg = None
# Pick a user and see if the conditions result is true or false.
user = User.objects.all()[0]
try:
result = eval_predicates(predicates, user)
# Check if the result type is a boolean
if type(result) != type(True):
error_msg = "Expected boolean value but got %s" % type(result)
except Exception:
error_msg = "Received exception: %s" % sys.exc_info()[0]
return error_msg
| mit | -7,216,325,315,947,898,000 | 38.585752 | 99 | 0.659601 | false |
robosafe/testbench_vRAL_hydro | bert2_simulator/sim_step_monitors/AM_ifSensorsNotOK_notRelease.py | 1 | 6001 | #!/usr/bin/env python
"""
AM_ifSensorsNotOK_notRelease
Assertion monitor
For use in Human-Robot Interaction simulator.
Created by David Western, July 2015.
Implements assertion: if human is ready and NOT sensorsOk and contact_robot_hand_and_object ==1 then assert after T, contact_robot_hand_and_object == 1
(robot does not release object within T seconds of a negative sensor reading).
"""
# These two imports must be here for all AMs:
from assertion_monitor_baseclass import AssertionMonitor
import rospy
# Imports specific to this AM:
from bert2_simulator.msg import *
import dist_between_links
from std_msgs.msg import Float64
from std_msgs.msg import Int8
# Make sure this name matches the name of this file.
# vvvvvvvvvvvvvvv
class AM_ifSensorsNotOK_notRelease(AssertionMonitor):
"""
A template to illustrate the generic structure of an assertion monitor
specification, for use in the human-robot interaction simulator.
"""
def __init__(self,trace_label):
"""
List, in order, the stages of the precondition and postcondition.
The strings given here should name methods defined within this class.
The assertion_monitor base class requires both these attributes to be
defined.
"""
# Required (but adjust the list entries to reflect your own AM):
self.precon_list = ['precon']
self.postcon_list = ['Wait','Decide']
# Make sure /use_sim_time is true, for use in Wait():
use_sim_time = rospy.get_param('/use_sim_time')
# print "use_sim_time: ", use_sim_time
if not use_sim_time:
rospy.set_param('/use_sim_time',True)
print "use_sim_time changed to True"
self.T = 3 # Time to wait, in seconds.
# This particular AM depends on values published on ROS topics.
# Hence, let's set up some subscribers.
rospy.Subscriber("human_signals",Human, self.h_signals_callback)
rospy.Subscriber("gpl_is_ok", Int8, self.sensorsOK_callback)
# Related initialisations:
self.h_ready = 0
self.sensorsOK = 0
self.decision = 0
self.object_in_robot_hand = 1 # Fudge.
# Required at the end of __init__:
# super(YOUR_AM_CLASSNAME_HERE,self).__init__(trace_label) # Execute constructor of base class.
super(AM_decisionInTime,self).__init__(trace_label) # Execute constructor of base class.
"""
Define member functions representing each stage of the PREcondition below here.
Each stage of the precondition must return 1 (satisfied), 0 (not yet satisfied),
or -1 (invalidated)...
1 - SATISFIED:
The assertion monitor will progress to the next stage ON THE NEXT simulation
time-step. A special case is that, when the last stage of the precondition
evaluates as 1, the first stage of the POSTcondition will be evaluated
in the SAME time-step.
0 - NOT YET SATISFIED:
The assertion monitor will evaluate the same stage again on the next
simulation time-step.
-1 - INVALIDATED:
The stage has not been satisfied and cannot be satisfied in future evaluations.
Hence the assertion monitor will reset (evaluate the first stage of the
precondition on the next simulation time-step).
TO DO: Allow forking of the AM instance so that multiple independent precon (and
then postcon) checks can run concurrently; add a self.fork() member
function in assertion_monitor.py, to be invoked in the stage member
functions below?
"""
def precon(self):
dist = dist_between_links.check('object_link','bert2::left_wrist_flex_link')
obj_in_rob_hand = dist<=0.1
#if (not self.sensorsOK) and obj_in_rob_hand:
if self.h_ready==1 and (not self.sensorsOK) and obj_in_rob_hand:
self.t_WaitStart = rospy.get_time()
self.object_hand = 1 # In case it's been set prematurely.
self.decision = 0 # In case it's been set prematurely.
return 1
else:
return 0
"""
Define member functions representing each stage of the POSTcondition below here.
Each stage of the postcondition must return 1 (satisfied), 0 (not yet satisfied),
or -1 (violated)...
1 - SATISFIED:
The assertion monitor will progress to the next stage ON THE NEXT simulation
time-step. A special case is that, when the last stage of the postcondition
evaluates as 1, the assertion is deemed satisfied.
0 - NOT YET SATISFIED:
The assertion monitor will evaluate the same stage again on the next
simulation time-step.
-1 - VIOLATED:
The stage has not been satisfied and cannot be satisfied in future evaluations.
Hence the assertion deemed violated. A flag will be raised to indicate this.
Depending on mode settings (not yet implemented), either the simulation will
end or the monitor will reset.
"""
def Wait(self):
now = rospy.get_time()
if now-self.t_WaitStart>=self.T:
return 1
else:
return 0
def Decide(self):
dist = dist_between_links.check('object','bert2::left_wrist_flex_link')
if dist<=0.1:
rospy.loginfo('Valid assertion')
return 1
else:
rospy.loginfo('Violation of property')
return -1
"""
Define callbacks for ROS subscribers.
"""
def h_signals_callback(self,data):
#Assuming perfect sensing and sensing delays
if data.humanIsReady==1:
self.h_ready = 1
def sensorsOK_callback(self,data):
#Assuming perfect sensing and sensing delays
if data.data==1:
self.sensorsOK = 1
else:
self.sensorsOK = 0
| gpl-3.0 | -6,965,395,529,337,508,000 | 35.150602 | 151 | 0.63906 | false |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause | -6,047,827,956,118,446,000 | 27.423729 | 76 | 0.632081 | false |
m-ochi/RiotGameApiChallenge2016 | lolcrawler.py | 1 | 3730 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
'''
Created on Oct 20, 2015
クロールの流れ
1.FeaturedGamesで最近のゲームを取得
2.ParticipantsのSummonerNamesを取得
3.SummonerNamesを使ってSummonerIdを取得
4.SummonerIdを使ってSummonerGamesを取得
5.SummonerGamesからMatchStatsを取得
6.保存
@author: ochi
'''
import urllib2
import urllib
import json
import lolapi
import os
import sys
import shutil
import datetime as dt
class LoLCrawler:
def __init__(self):
place = os.path.abspath(os.path.dirname(__file__))
today = dt.datetime.today()
todaystr = today.strftime("%Y%m%d%H")
self.resultDir = place + "/results%s"%(todaystr)
pass
def run(self):
if os.path.exists(self.resultDir):
pass
else:
os.mkdir(self.resultDir)
os.chdir(self.resultDir)
api = lolapi.LoLAPI()
loop_continue = True
while loop_continue is True:
featuredGamesDic, code = api.getFeaturedGames()
if code == 200:
print "retry getFeatureGames"
loop_continue = False
games = featuredGamesDic["gameList"]
for gameDic in games:
gameID = str(gameDic["gameId"])
for gameDic in games:
featured_gameID = str(gameDic["gameId"])
participants = gameDic["participants"]
summonerNames = self.getSummonerNames(participants)
for summonerName in summonerNames:
summonerDic, code = api.getSummonerBySummonerName(summonerName=summonerName)
if code != 200:
print "cannot find summonerName: %s"%(summonerName)
continue
key_summonerName = summonerDic.keys()[0]
summonerid = str(summonerDic[key_summonerName]["id"])
statsDic, code = api.getSummonerGames(summonerID=summonerid)
if code != 200:
print "cannot find summonerGames summonerID: %s"%(summonerid)
continue
game_ids = self.getPlayerStatsGameIDs(statsDic)
for gameID in game_ids:
print "now consumed api_c: %d"%(api.api_c)
fileName = "gameID%s.json"%(gameID)
print "Start Get gameID:%s"%(gameID)
outDic, code = api.getMatchStats(matchID=gameID)
if code != 200:
print "cannot find Matchstats GameID: %s"%(gameID)
continue
# output for each game
if os.path.exists(fileName):
print "find same game file! gameID: %s"%(gameID)
os.remove(fileName)
else:
f = open(fileName, 'w')
line = json.dumps(outDic,indent=4,sort_keys=True)
f.write(line)
f.close()
print "finish write featured games stats"
print "consume api_c: %d"%(api.api_c)
pass
def getPlayerStatsGameIDs(self, statsDic):
games = statsDic["games"]
game_ids = []
for a_gameDic in games:
game_id = str(a_gameDic["gameId"])
game_ids.append(game_id)
return game_ids
def getSummonerNames(self, participants):
summonerNames = []
for eachSummonerDic in participants:
summonerName = eachSummonerDic["summonerName"]
summonerNames.append(summonerName)
return summonerNames
if __name__ == "__main__":
obj = LoLCrawler()
obj.run()
| apache-2.0 | -979,281,018,528,778,800 | 28.933884 | 92 | 0.551353 | false |
Pexego/odoo | addons/l10n_in_hr_payroll/report/report_hr_salary_employee_bymonth.py | 374 | 5518 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import osv
from openerp.report import report_sxw
class report_hr_salary_employee_bymonth(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_hr_salary_employee_bymonth, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_periods': self.get_periods,
'get_months_tol': self.get_months_tol,
'get_total': self.get_total,
})
self.context = context
self.mnths = []
self.mnths_total = []
self.total = 0.0
def get_periods(self, form):
# Get start year-month-date and end year-month-date
first_year = int(form['start_date'][0:4])
last_year = int(form['end_date'][0:4])
first_month = int(form['start_date'][5:7])
last_month = int(form['end_date'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('')
self.mnths.append('')
return [mnth_name]
def get_salary(self, form, emp_id, emp_salary, total_mnths):
category_id = form.get('category_id', [])
category_id = category_id and category_id[0] or False
self.cr.execute("select to_char(date_to,'mm-yyyy') as to_date ,sum(pl.total) \
from hr_payslip_line as pl \
left join hr_payslip as p on pl.slip_id = p.id \
left join hr_employee as emp on emp.id = p.employee_id \
left join resource_resource as r on r.id = emp.resource_id \
where p.state = 'done' and p.employee_id = %s and pl.category_id = %s \
group by r.name, p.date_to,emp.id",(emp_id, category_id,))
sal = self.cr.fetchall()
salary = dict(sal)
total = 0.0
cnt = 0
for month in self.mnths:
if month <> '':
if len(month) != 7:
month = '0' + str(month)
if month in salary and salary[month]:
emp_salary.append(salary[month])
total += salary[month]
total_mnths[cnt] = total_mnths[cnt] + salary[month]
else:
emp_salary.append(0.00)
else:
emp_salary.append('')
total_mnths[cnt] = ''
cnt = cnt + 1
return emp_salary, total, total_mnths
def get_employee(self, form):
emp_salary = []
salary_list = []
total_mnths=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
emp_obj = self.pool.get('hr.employee')
emp_ids = form.get('employee_ids', [])
employees = emp_obj.browse(self.cr, self.uid, emp_ids, context=self.context)
for emp_id in employees:
emp_salary.append(emp_id.name)
total = 0.0
emp_salary, total, total_mnths = self.get_salary(form, emp_id.id, emp_salary, total_mnths)
emp_salary.append(total)
salary_list.append(emp_salary)
emp_salary = []
self.mnths_total.append(total_mnths)
return salary_list
def get_months_tol(self):
return self.mnths_total
def get_total(self):
for item in self.mnths_total:
for count in range(1, len(item)):
if item[count] == '':
continue
self.total += item[count]
return self.total
class wrapped_report_employee_salary_bymonth(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_hrsalarybymonth'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_hrsalarybymonth'
_wrapped_report_class = report_hr_salary_employee_bymonth
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,923,142,380,941,902,000 | 39.277372 | 102 | 0.554549 | false |
sangoma/sangoma-sbc-provisioning | common/attr/validators.py | 1 | 3489 | """
Commonly useful validators.
"""
from __future__ import absolute_import, division, print_function
from ._make import attr, attributes
@attributes(repr=False, slots=True)
class _InstanceOfValidator(object):
type = attr()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not isinstance(value, self.type):
raise TypeError(
"'{name}' must be {type!r} (got {value!r} that is a "
"{actual!r})."
.format(name=attr.name, type=self.type,
actual=value.__class__, value=value),
attr, self.type, value,
)
def __repr__(self):
return (
"<instance_of validator for type {type!r}>"
.format(type=self.type)
)
def instance_of(type):
"""
A validator that raises a :exc:`TypeError` if the initializer is called
with a wrong type for this particular attribute (checks are perfomed using
:func:`isinstance` therefore it's also valid to pass a tuple of types).
:param type: The type to check for.
:type type: type or tuple of types
The :exc:`TypeError` is raised with a human readable error message, the
attribute (of type :class:`attr.Attribute`), the expected type, and the
value it got.
"""
return _InstanceOfValidator(type)
@attributes(repr=False, slots=True)
class _ProvidesValidator(object):
interface = attr()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.interface.providedBy(value):
raise TypeError(
"'{name}' must provide {interface!r} which {value!r} "
"doesn't."
.format(name=attr.name, interface=self.interface, value=value),
attr, self.interface, value,
)
def __repr__(self):
return (
"<provides validator for interface {interface!r}>"
.format(interface=self.interface)
)
def provides(interface):
"""
A validator that raises a :exc:`TypeError` if the initializer is called
with an object that does not provide the requested *interface* (checks are
performed using ``interface.providedBy(value)`` (see `zope.interface
<http://docs.zope.org/zope.interface/>`_).
:param interface: The interface to check for.
:type interface: zope.interface.Interface
The :exc:`TypeError` is raised with a human readable error message, the
attribute (of type :class:`attr.Attribute`), the expected interface, and
the value it got.
"""
return _ProvidesValidator(interface)
@attributes(repr=False, slots=True)
class _OptionalValidator(object):
validator = attr()
def __call__(self, inst, attr, value):
if value is None:
return
return self.validator(inst, attr, value)
def __repr__(self):
return (
"<optional validator for {type} or None>"
.format(type=repr(self.validator))
)
def optional(validator):
"""
A validator that makes an attribute optional. An optional attribute is one
which can be set to ``None`` in addition to satisfying the requirements of
the sub-validator.
:param validator: A validator that is used for non-``None`` values.
"""
return _OptionalValidator(validator)
| gpl-2.0 | -5,579,103,273,543,965,000 | 29.605263 | 79 | 0.60963 | false |
GenericStudent/home-assistant | homeassistant/components/picotts/tts.py | 26 | 1854 | """Support for the Pico TTS speech service."""
import logging
import os
import shutil
import subprocess
import tempfile
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
_LOGGER = logging.getLogger(__name__)
SUPPORT_LANGUAGES = ["en-US", "en-GB", "de-DE", "es-ES", "fr-FR", "it-IT"]
DEFAULT_LANG = "en-US"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES)}
)
def get_engine(hass, config, discovery_info=None):
"""Set up Pico speech component."""
if shutil.which("pico2wave") is None:
_LOGGER.error("'pico2wave' was not found")
return False
return PicoProvider(config[CONF_LANG])
class PicoProvider(Provider):
"""The Pico TTS API provider."""
def __init__(self, lang):
"""Initialize Pico TTS provider."""
self._lang = lang
self.name = "PicoTTS"
@property
def default_language(self):
"""Return the default language."""
return self._lang
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
def get_tts_audio(self, message, language, options=None):
"""Load TTS using pico2wave."""
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpf:
fname = tmpf.name
cmd = ["pico2wave", "--wave", fname, "-l", language, message]
subprocess.call(cmd)
data = None
try:
with open(fname, "rb") as voice:
data = voice.read()
except OSError:
_LOGGER.error("Error trying to read %s", fname)
return (None, None)
finally:
os.remove(fname)
if data:
return ("wav", data)
return (None, None)
| apache-2.0 | -3,127,726,662,319,629,000 | 26.264706 | 78 | 0.61219 | false |
davidvoler/ate_meteor | launcher/api/python/notifier/notify_monitor.py | 1 | 2261 | import requests
import redis
from requests.exceptions import Timeout, ConnectionError, HTTPError
import json
import time
from tornado.options import options
import config
from ate_logger import AteLogger
class NotifyMonitor(object):
def __init__(self, logger=None):
if logger:
self.__logger = AteLogger("Notify Monitor", logger.logger_info)
else:
self.__logger = AteLogger("Notify Monitor")
self.driver = None
if options.monitor_http_driver:
from ate_notify_monitor.new_notifier.drivers.http_driver import HttpMonitorDriver
self.driver = HttpMonitorDriver(self.__logger)
elif options.monitor_redis_driver:
from ate_notify_monitor.new_notifier.drivers.redis_driver import RedisMonitorDriver
self.driver = RedisMonitorDriver(self.__logger)
elif options.monitor_ddp_driver:
from ate_notify_monitor.new_notifier.drivers.ddp_driver import DdpMonitorDriver
self.driver = DdpMonitorDriver(self.__logger)
def notify_fixture(self, fixture_id, status, progress=-1):
"""
:param fixture_id:
:param status: 'running', ''......
:param progress: -1 if progress is unknown , 0-100 if progress is known
:return:
"""
return self.driver.notify(fixture_id, {})
def notify_blocking_request(self, fixture_id, cavity=None, action=None, timeout=options.monitor_user_timeout):
"""
:param fixture_id:
:param cavity:
:param action:
:param timeout:
:return:
"""
self.driver.notify_blocking_request(fixture_id, cavity, action, timeout)
def notify_cavity(self, fixture_id, cavity_id, status, test_name, atp_number, progress=-1):
"""
:param fixture_id:
:param cavity_id:
:param status: 'running', 'fail', 'success'
:param test_name: name of the running test
:param atp_number:
:param progress: -1 if progress is unknown , 0-100 if progress is known
:return:
"""
return self.driver.notify(fixture_id, {})
def notify_resource(self, fixture_id, resource_id, status):
return self.driver.notify(fixture_id, {})
| mit | -868,136,828,224,130,800 | 32.25 | 114 | 0.636886 | false |
zcm19900902/picasso-graphic | tools/gyp/pylib/gyp/SCons.py | 253 | 5848 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
SCons generator.
This contains class definitions and supporting functions for generating
pieces of SCons files for the different types of GYP targets.
"""
import os
def WriteList(fp, list, prefix='',
separator=',\n ',
preamble=None,
postamble=None):
fp.write(preamble or '')
fp.write((separator or ' ').join([prefix + l for l in list]))
fp.write(postamble or '')
class TargetBase(object):
"""
Base class for a SCons representation of a GYP target.
"""
is_ignored = False
target_prefix = ''
target_suffix = ''
def __init__(self, spec):
self.spec = spec
def full_product_name(self):
"""
Returns the full name of the product being built:
* Uses 'product_name' if it's set, else prefix + 'target_name'.
* Prepends 'product_dir' if set.
* Appends SCons suffix variables for the target type (or
product_extension).
"""
suffix = self.target_suffix
product_extension = self.spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
prefix = self.spec.get('product_prefix', self.target_prefix)
name = self.spec['target_name']
name = prefix + self.spec.get('product_name', name) + suffix
product_dir = self.spec.get('product_dir')
if product_dir:
name = os.path.join(product_dir, name)
else:
name = os.path.join(self.out_dir, name)
return name
def write_input_files(self, fp):
"""
Writes the definition of the input files (sources).
"""
sources = self.spec.get('sources')
if not sources:
fp.write('\ninput_files = []\n')
return
preamble = '\ninput_files = [\n '
postamble = ',\n]\n'
WriteList(fp, map(repr, sources), preamble=preamble, postamble=postamble)
def builder_call(self):
"""
Returns the actual SCons builder call to build this target.
"""
name = self.full_product_name()
return 'env.%s(env.File(%r), input_files)' % (self.builder_name, name)
def write_target(self, fp, src_dir='', pre=''):
"""
Writes the lines necessary to build this target.
"""
fp.write('\n' + pre)
fp.write('_outputs = %s\n' % self.builder_call())
fp.write('target_files.extend(_outputs)\n')
class NoneTarget(TargetBase):
"""
A GYP target type of 'none', implicitly or explicitly.
"""
def write_target(self, fp, src_dir='', pre=''):
fp.write('\ntarget_files.extend(input_files)\n')
class SettingsTarget(TargetBase):
"""
A GYP target type of 'settings'.
"""
is_ignored = True
compilable_sources_template = """
_result = []
for infile in input_files:
if env.compilable(infile):
if (type(infile) == type('')
and (infile.startswith(%(src_dir)r)
or not os.path.isabs(env.subst(infile)))):
# Force files below the build directory by replacing all '..'
# elements in the path with '__':
base, ext = os.path.splitext(os.path.normpath(infile))
base = [d == '..' and '__' or d for d in base.split('/')]
base = os.path.join(*base)
object = '${OBJ_DIR}/${COMPONENT_NAME}/${TARGET_NAME}/' + base
if not infile.startswith(%(src_dir)r):
infile = %(src_dir)r + infile
infile = env.%(name)s(object, infile)[0]
else:
infile = env.%(name)s(infile)[0]
_result.append(infile)
input_files = _result
"""
class CompilableSourcesTargetBase(TargetBase):
"""
An abstract base class for targets that compile their source files.
We explicitly transform compilable files into object files,
even though SCons could infer that for us, because we want
to control where the object file ends up. (The implicit rules
in SCons always put the object file next to the source file.)
"""
intermediate_builder_name = None
def write_target(self, fp, src_dir='', pre=''):
if self.intermediate_builder_name is None:
raise NotImplementedError
if src_dir and not src_dir.endswith('/'):
src_dir += '/'
variables = {
'src_dir': src_dir,
'name': self.intermediate_builder_name,
}
fp.write(compilable_sources_template % variables)
super(CompilableSourcesTargetBase, self).write_target(fp)
class ProgramTarget(CompilableSourcesTargetBase):
"""
A GYP target type of 'executable'.
"""
builder_name = 'GypProgram'
intermediate_builder_name = 'StaticObject'
target_prefix = '${PROGPREFIX}'
target_suffix = '${PROGSUFFIX}'
out_dir = '${TOP_BUILDDIR}'
class StaticLibraryTarget(CompilableSourcesTargetBase):
"""
A GYP target type of 'static_library'.
"""
builder_name = 'GypStaticLibrary'
intermediate_builder_name = 'StaticObject'
target_prefix = '${LIBPREFIX}'
target_suffix = '${LIBSUFFIX}'
out_dir = '${LIB_DIR}'
class SharedLibraryTarget(CompilableSourcesTargetBase):
"""
A GYP target type of 'shared_library'.
"""
builder_name = 'GypSharedLibrary'
intermediate_builder_name = 'SharedObject'
target_prefix = '${SHLIBPREFIX}'
target_suffix = '${SHLIBSUFFIX}'
out_dir = '${LIB_DIR}'
class LoadableModuleTarget(CompilableSourcesTargetBase):
"""
A GYP target type of 'loadable_module'.
"""
builder_name = 'GypLoadableModule'
intermediate_builder_name = 'SharedObject'
target_prefix = '${SHLIBPREFIX}'
target_suffix = '${SHLIBSUFFIX}'
out_dir = '${TOP_BUILDDIR}'
TargetMap = {
None : NoneTarget,
'none' : NoneTarget,
'settings' : SettingsTarget,
'executable' : ProgramTarget,
'static_library' : StaticLibraryTarget,
'shared_library' : SharedLibraryTarget,
'loadable_module' : LoadableModuleTarget,
}
def Target(spec):
return TargetMap[spec.get('type')](spec)
| bsd-3-clause | 8,385,293,136,331,921,000 | 28.386935 | 77 | 0.64894 | false |
zcbenz/cefode-chromium | chrome/test/pyautolib/omnibox_info.py | 69 | 4320 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Python representation for Chromium Omnibox.
Obtain one of these from PyUITestSuite::GetOmniboxInfo() call.
Example:
class MyTest(pyauto.PyUITest):
def testBasic(self):
info = self.OmniboxInfo() # fetch omnibox snapshot
print info.Matches()
See more tests in chrome/test/functional/omnibox.py.
"""
import simplejson as json
from pyauto_errors import JSONInterfaceError
class OmniboxInfo(object):
"""Represent info for Chromium Omnibox.
Info contains:
- a list of matches in the same order as you'd see in the omnibox,
- a dictionary of properties related to the omnibox.
Sample info text:
{ u'matches': [
{
u'contents': u'google',
u'description': u'Google Search',
u'destination_url': u'http://www.google.com/search?aq=f&'
'sourceid=chrome&ie=UTF-8&q=google',
u'starred': False,
u'type': u'search-what-you-typed'},
{
u'contents': u'maps.google.com/',
u'description': u'Google Maps',
u'destination_url': u'http://maps.google.com/',
u'starred': False,
u'type': u'navsuggest'},
{ u'contents': u'google maps',
u'description': u'',
u'destination_url': u'http://www.google.com/search?aq=0&oq=google&'
'sourceid=chrome&ie=UTF-8&q=google+maps',
u'starred': False,
u'type': u'search-suggest'},
{ u'contents': u'google earth',
u'description': u'',
u'destination_url': u'http://www.google.com/search?aq=1&oq=google&'
'sourceid=chrome&ie=UTF-8&q=google+earth',
u'starred': False,
u'type': u'search-suggest'},
{ u'contents': u'Search Google for <enter query>',
u'description': u'(Keyword: google.com)',
u'destination_url': u'',
u'starred': False,
u'type': u'search-other-engine'}],
u'properties': { u'has_focus': True,
u'keyword': u'',
u'query_in_progress': False,
u'text': u'google'}}
"""
def __init__(self, omnibox_dict):
"""Initialize a OmniboxInfo from a json string.
Args:
omnibox_dict: returned by an IPC call for the command 'GetOmniboxInfo'.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
# JSON string prepared in GetOmniboxInfo() in automation_provider.cc
self.omniboxdict = omnibox_dict
if self.omniboxdict.has_key('error'):
raise JSONInterfaceError(self.omniboxdict['error'])
def Matches(self):
"""Get omnibox matches.
Returns:
a list of omnibox match items.
"""
return self.omniboxdict.get('matches', [])
def MatchesWithAttributes(self, attr_dict):
"""Find all omnibox matches which match the attributes in |attr_dict|.
Args:
attr_dict: a dictionary of attributes to be satisfied.
All attributes in the given dictionary should be satisfied.
example:
{ 'destiantion_url': 'http://www.google.com/',
'description': 'Google' }
Returns:
a list of omnibox match items.
"""
out = []
for item in self.Matches():
matched = True
for key, val in attr_dict.iteritems():
if not item.has_key(key) or item[key] != val:
matched = False
if matched:
out.append(item)
return out
def Properties(self, key=None):
"""Get the properties
Args:
key: if specified, value for the given property is returned.
Returns:
a dictionary of properties if no key is given, OR
value corresponding to a particular property if key is given
"""
all = self.omniboxdict.get('properties')
if not key:
return all
return all.get(key)
def Text(self):
"""Get the text in the omnibox.
This need not be the same as the user-inputted text, since omnibox may
autocomplete some URLs, or the user may move omnibox popup selection
up/down.
"""
return self.Properties('text')
def IsQueryInProgress(self):
"""Determine if a query is in progress."""
return self.Properties('query_in_progress')
| bsd-3-clause | -5,654,108,129,798,670,000 | 29.857143 | 79 | 0.622222 | false |
cernops/nova | nova/console/websocketproxy.py | 4 | 6962 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Websocket proxy that is compatible with OpenStack Nova.
Leverages websockify.py by Joel Martin
'''
import socket
import sys
from oslo_log import log as logging
from six.moves import http_cookies as Cookie
import six.moves.urllib.parse as urlparse
import websockify
import nova.conf
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova import exception
from nova.i18n import _
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class NovaProxyRequestHandlerBase(object):
def address_string(self):
# NOTE(rpodolyaka): override the superclass implementation here and
# explicitly disable the reverse DNS lookup, which might fail on some
# deployments due to DNS configuration and break VNC access completely
return str(self.client_address[0])
def verify_origin_proto(self, connection_info, origin_proto):
access_url = connection_info.get('access_url')
if not access_url:
detail = _("No access_url in connection_info. "
"Cannot validate protocol")
raise exception.ValidationError(detail=detail)
expected_protos = [urlparse.urlparse(access_url).scheme]
# NOTE: For serial consoles the expected protocol could be ws or
# wss which correspond to http and https respectively in terms of
# security.
if 'ws' in expected_protos:
expected_protos.append('http')
if 'wss' in expected_protos:
expected_protos.append('https')
return origin_proto in expected_protos
def new_websocket_client(self):
"""Called after a new WebSocket connection has been established."""
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
from eventlet import hubs
hubs.use_hub()
# The nova expected behavior is to have token
# passed to the method GET of the request
parse = urlparse.urlparse(self.path)
if parse.scheme not in ('http', 'https'):
# From a bug in urlparse in Python < 2.7.4 we cannot support
# special schemes (cf: http://bugs.python.org/issue9374)
if sys.version_info < (2, 7, 4):
raise exception.NovaException(
_("We do not support scheme '%s' under Python < 2.7.4, "
"please use http or https") % parse.scheme)
query = parse.query
token = urlparse.parse_qs(query).get("token", [""]).pop()
if not token:
# NoVNC uses it's own convention that forward token
# from the request to a cookie header, we should check
# also for this behavior
hcookie = self.headers.getheader('cookie')
if hcookie:
cookie = Cookie.SimpleCookie()
cookie.load(hcookie)
if 'token' in cookie:
token = cookie['token'].value
ctxt = context.get_admin_context()
rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
connect_info = rpcapi.check_token(ctxt, token=token)
if not connect_info:
raise exception.InvalidToken(token=token)
# Verify Origin
expected_origin_hostname = self.headers.getheader('Host')
if ':' in expected_origin_hostname:
e = expected_origin_hostname
if '[' in e and ']' in e:
expected_origin_hostname = e.split(']')[0][1:]
else:
expected_origin_hostname = e.split(':')[0]
expected_origin_hostnames = CONF.console_allowed_origins
expected_origin_hostnames.append(expected_origin_hostname)
origin_url = self.headers.getheader('Origin')
# missing origin header indicates non-browser client which is OK
if origin_url is not None:
origin = urlparse.urlparse(origin_url)
origin_hostname = origin.hostname
origin_scheme = origin.scheme
if origin_hostname == '' or origin_scheme == '':
detail = _("Origin header not valid.")
raise exception.ValidationError(detail=detail)
if origin_hostname not in expected_origin_hostnames:
detail = _("Origin header does not match this host.")
raise exception.ValidationError(detail=detail)
if not self.verify_origin_proto(connect_info, origin_scheme):
detail = _("Origin header protocol does not match this host.")
raise exception.ValidationError(detail=detail)
self.msg(_('connect info: %s'), str(connect_info))
host = connect_info['host']
port = int(connect_info['port'])
# Connect to the target
self.msg(_("connecting to: %(host)s:%(port)s") % {'host': host,
'port': port})
tsock = self.socket(host, port, connect=True)
# Handshake as necessary
if connect_info.get('internal_access_path'):
tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
while True:
data = tsock.recv(4096, socket.MSG_PEEK)
if data.find("\r\n\r\n") != -1:
if data.split("\r\n")[0].find("200") == -1:
raise exception.InvalidConnectionInfo()
tsock.recv(len(data))
break
# Start proxying
try:
self.do_proxy(tsock)
except Exception:
if tsock:
tsock.shutdown(socket.SHUT_RDWR)
tsock.close()
self.vmsg(_("%(host)s:%(port)s: Target closed") %
{'host': host, 'port': port})
raise
class NovaProxyRequestHandler(NovaProxyRequestHandlerBase,
websockify.ProxyRequestHandler):
def __init__(self, *args, **kwargs):
websockify.ProxyRequestHandler.__init__(self, *args, **kwargs)
def socket(self, *args, **kwargs):
return websockify.WebSocketServer.socket(*args, **kwargs)
class NovaWebSocketProxy(websockify.WebSocketProxy):
@staticmethod
def get_logger():
return LOG
| apache-2.0 | 4,487,071,705,288,082,000 | 39.242775 | 78 | 0.606148 | false |
tutorcruncher/morpheus | morpheus/app/patches.py | 1 | 7326 | import asyncio
from atoolbox import patch
from atoolbox.db.helpers import run_sql_section
from textwrap import dedent, indent
from time import time
from tqdm import tqdm
@patch
async def run_logic_sql(conn, settings, **kwargs):
"""
run the "logic" section of models.sql
"""
await run_sql_section('logic', settings.sql_path.read_text(), conn)
async def print_run_sql(conn, sql):
indented_sql = indent(dedent(sql.strip('\n')), ' ').strip('\n')
print(f'running\n\033[36m{indented_sql}\033[0m ...')
start = time()
v = await conn.execute(sql)
print(f'completed in {time() - start:0.1f}s: {v}')
async def chunked_update(conn, table, sql, sleep_time: float = 0):
count = await conn.fetchval(f'select count(*) from {table} WHERE company_id IS NULL')
print(f'{count} {table} to update...')
with tqdm(total=count, smoothing=0.1) as t:
while True:
v = await conn.execute(sql)
updated = int(v.replace('UPDATE ', ''))
if updated == 0:
return
t.update(updated)
await asyncio.sleep(sleep_time)
@patch
async def performance_step1(conn, settings, **kwargs):
"""
First step to changing schema to improve performance. THIS WILL BE SLOW, but can be run in the background.
"""
await print_run_sql(conn, "SET lock_timeout TO '10s'")
await print_run_sql(conn, 'create extension if not exists btree_gin;')
await print_run_sql(
conn,
"""
CREATE TABLE companies (
id SERIAL PRIMARY KEY,
code VARCHAR(63) NOT NULL UNIQUE
);
""",
)
await print_run_sql(
conn,
"""
INSERT INTO companies (code)
SELECT DISTINCT company
FROM message_groups;
""",
)
await print_run_sql(conn, 'ALTER TABLE message_groups ADD company_id INT REFERENCES companies ON DELETE RESTRICT')
await chunked_update(
conn,
'message_groups',
"""
UPDATE message_groups g
SET company_id=c.id FROM companies c
WHERE g.company=c.code and g.id in (
SELECT id
FROM message_groups
WHERE company_id IS NULL
FOR UPDATE
LIMIT 1000
)
""",
)
await print_run_sql(conn, 'ALTER TABLE messages ADD COLUMN company_id INT REFERENCES companies ON DELETE RESTRICT;')
await print_run_sql(conn, 'ALTER TABLE messages ADD COLUMN new_method SEND_METHODS;')
@patch(direct=True)
async def performance_step2(conn, settings, **kwargs):
"""
Second step to changing schema to improve performance. THIS WILL BE VERY SLOW, but can be run in the background.
"""
await print_run_sql(conn, "SET lock_timeout TO '40s'")
await print_run_sql(conn, 'DROP INDEX CONCURRENTLY IF EXISTS message_status')
await print_run_sql(conn, 'DROP INDEX CONCURRENTLY IF EXISTS message_group_id')
await print_run_sql(conn, 'DROP INDEX CONCURRENTLY IF EXISTS event_ts')
await print_run_sql(conn, 'DROP INDEX CONCURRENTLY IF EXISTS link_message_id')
await print_run_sql(conn, 'DROP INDEX CONCURRENTLY IF EXISTS message_group_company_id')
await print_run_sql(
conn, 'CREATE INDEX CONCURRENTLY message_group_company_id ON message_groups USING btree (company_id)'
)
await print_run_sql(conn, 'DROP INDEX CONCURRENTLY IF EXISTS message_update_ts')
await print_run_sql(conn, 'CREATE INDEX CONCURRENTLY message_update_ts ON messages USING btree (update_ts desc)')
await print_run_sql(conn, 'DROP INDEX CONCURRENTLY IF EXISTS message_tags')
await print_run_sql(
conn, 'CREATE INDEX CONCURRENTLY message_tags ON messages USING gin (tags, new_method, company_id)'
)
await print_run_sql(conn, 'DROP INDEX CONCURRENTLY IF EXISTS message_vector')
await print_run_sql(
conn, 'CREATE INDEX CONCURRENTLY message_vector ON messages USING gin (vector, new_method, company_id)'
)
await print_run_sql(conn, 'DROP INDEX CONCURRENTLY IF EXISTS message_company_method')
await print_run_sql(
conn, 'CREATE INDEX CONCURRENTLY message_company_method ON messages USING btree (new_method, company_id, id)'
)
await print_run_sql(conn, 'DROP INDEX CONCURRENTLY IF EXISTS message_company_id')
await print_run_sql(conn, 'CREATE INDEX CONCURRENTLY message_company_id ON messages USING btree (company_id)')
@patch(direct=True)
async def performance_step3(conn, settings, **kwargs):
"""
Third step to changing schema to improve performance. THIS WILL BE VERY SLOW, but can be run in the background.
"""
await print_run_sql(conn, "SET lock_timeout TO '40s'")
await chunked_update(
conn,
'messages',
"""
UPDATE messages m
SET company_id=sq.company_id, new_method=sq.method
FROM (
SELECT m2.id, g.company_id, g.method
FROM messages m2
JOIN message_groups g ON m2.group_id = g.id
WHERE m2.company_id IS NULL OR m2.new_method IS NULL
ORDER BY id
LIMIT 100
) sq
where sq.id = m.id
""",
sleep_time=0.2,
)
@patch
async def performance_step4(conn, settings, **kwargs):
"""
Fourth step to changing schema to improve performance. This should not be too slow, but will LOCK ENTIRE TABLES.
"""
print('create the table companies...')
await print_run_sql(conn, "SET lock_timeout TO '40s'")
await print_run_sql(conn, 'LOCK TABLE companies IN SHARE MODE')
await print_run_sql(
conn,
"""
INSERT INTO companies (code)
SELECT DISTINCT company FROM message_groups
ON CONFLICT (code) DO NOTHING;
""",
)
await print_run_sql(conn, 'LOCK TABLE message_groups IN SHARE MODE')
await print_run_sql(
conn,
"""
UPDATE message_groups g SET company_id=c.id
FROM companies c WHERE g.company=c.code AND g.company_id IS NULL
""",
)
await print_run_sql(conn, 'ALTER TABLE message_groups ALTER company_id SET NOT NULL')
await print_run_sql(conn, 'ALTER TABLE message_groups DROP company')
await print_run_sql(conn, 'ALTER TABLE message_groups RENAME method TO message_method')
await print_run_sql(conn, 'LOCK TABLE messages IN SHARE MODE')
await print_run_sql(
conn,
"""
UPDATE messages m
SET company_id=g.company_id, new_method=g.message_method
FROM message_groups g
WHERE m.group_id=g.id AND (m.company_id IS NULL OR m.new_method IS NULL)
""",
)
await print_run_sql(
conn,
"""
ALTER TABLE messages ADD CONSTRAINT
messages_company_id_fkey FOREIGN KEY (company_id) REFERENCES companies (id) ON DELETE RESTRICT
""",
)
await print_run_sql(conn, 'ALTER TABLE messages ALTER COLUMN company_id SET NOT NULL')
await print_run_sql(conn, 'ALTER TABLE messages ALTER COLUMN new_method SET NOT NULL')
await print_run_sql(conn, 'ALTER TABLE messages RENAME new_method TO method')
@patch
async def add_aggregation_view(conn, settings, **kwargs):
"""
run the "message_aggregation" section of models.sql
"""
await run_sql_section('message_aggregation', settings.sql_path.read_text(), conn)
| mit | -4,704,837,679,300,637,000 | 34.736585 | 120 | 0.644554 | false |
hgl888/chromium-crosswalk-efl | tools/telemetry/telemetry/results/page_test_results.py | 32 | 6232 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import traceback
from telemetry import value as value_module
from telemetry.results import page_run
from telemetry.results import progress_reporter as progress_reporter_module
from telemetry.value import failure
from telemetry.value import skip
class PageTestResults(object):
def __init__(self, output_stream=None, output_formatters=None,
progress_reporter=None, trace_tag=''):
"""
Args:
output_stream: The output stream to use to write test results.
output_formatters: A list of output formatters. The output
formatters are typically used to format the test results, such
as CsvOutputFormatter, which output the test results as CSV.
progress_reporter: An instance of progress_reporter.ProgressReporter,
to be used to output test status/results progressively.
trace_tag: A string to append to the buildbot trace
name. Currently only used for buildbot.
"""
# TODO(chrishenry): Figure out if trace_tag is still necessary.
super(PageTestResults, self).__init__()
self._output_stream = output_stream
self._progress_reporter = (
progress_reporter if progress_reporter is not None
else progress_reporter_module.ProgressReporter())
self._output_formatters = (
output_formatters if output_formatters is not None else [])
self._trace_tag = trace_tag
self._current_page_run = None
self._all_page_runs = []
self._representative_value_for_each_value_name = {}
self._all_summary_values = []
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if isinstance(v, collections.Container):
v = copy.copy(v)
setattr(result, k, v)
return result
@property
def all_page_specific_values(self):
values = []
for run in self._all_page_runs:
values += run.values
if self._current_page_run:
values += self._current_page_run.values
return values
@property
def all_summary_values(self):
return self._all_summary_values
@property
def current_page(self):
assert self._current_page_run, 'Not currently running test.'
return self._current_page_run.page
@property
def current_page_run(self):
assert self._current_page_run, 'Not currently running test.'
return self._current_page_run
@property
def all_page_runs(self):
return self._all_page_runs
@property
def pages_that_succeeded(self):
"""Returns the set of pages that succeeded."""
pages = set(run.page for run in self.all_page_runs)
pages.difference_update(self.pages_that_failed)
return pages
@property
def pages_that_failed(self):
"""Returns the set of failed pages."""
failed_pages = set()
for run in self.all_page_runs:
if run.failed:
failed_pages.add(run.page)
return failed_pages
@property
def failures(self):
values = self.all_page_specific_values
return [v for v in values if isinstance(v, failure.FailureValue)]
@property
def skipped_values(self):
values = self.all_page_specific_values
return [v for v in values if isinstance(v, skip.SkipValue)]
def _GetStringFromExcInfo(self, err):
return ''.join(traceback.format_exception(*err))
def WillRunPage(self, page):
assert not self._current_page_run, 'Did not call DidRunPage.'
self._current_page_run = page_run.PageRun(page)
self._progress_reporter.WillRunPage(self)
def DidRunPage(self, page, discard_run=False): # pylint: disable=W0613
"""
Args:
page: The current page under test.
discard_run: Whether to discard the entire run and all of its
associated results.
"""
assert self._current_page_run, 'Did not call WillRunPage.'
self._progress_reporter.DidRunPage(self)
if not discard_run:
self._all_page_runs.append(self._current_page_run)
self._current_page_run = None
def WillAttemptPageRun(self, attempt_count, max_attempts):
"""To be called when a single attempt on a page run is starting.
This is called between WillRunPage and DidRunPage and can be
called multiple times, one for each attempt.
Args:
attempt_count: The current attempt number, start at 1
(attempt_count == 1 for the first attempt, 2 for second
attempt, and so on).
max_attempts: Maximum number of page run attempts before failing.
"""
self._progress_reporter.WillAttemptPageRun(
self, attempt_count, max_attempts)
# Clear any values from previous attempts for this page run.
self._current_page_run.ClearValues()
def AddValue(self, value):
assert self._current_page_run, 'Not currently running test.'
self._ValidateValue(value)
# TODO(eakuefner/chrishenry): Add only one skip per pagerun assert here
self._current_page_run.AddValue(value)
self._progress_reporter.DidAddValue(value)
def AddSummaryValue(self, value):
assert value.page is None
self._ValidateValue(value)
self._all_summary_values.append(value)
def _ValidateValue(self, value):
assert isinstance(value, value_module.Value)
if value.name not in self._representative_value_for_each_value_name:
self._representative_value_for_each_value_name[value.name] = value
representative_value = self._representative_value_for_each_value_name[
value.name]
assert value.IsMergableWith(representative_value)
def PrintSummary(self):
self._progress_reporter.DidFinishAllTests(self)
for output_formatter in self._output_formatters:
output_formatter.Format(self)
def FindPageSpecificValuesForPage(self, page, value_name):
values = []
for value in self.all_page_specific_values:
if value.page == page and value.name == value_name:
values.append(value)
return values
def FindAllPageSpecificValuesNamed(self, value_name):
values = []
for value in self.all_page_specific_values:
if value.name == value_name:
values.append(value)
return values
| bsd-3-clause | 8,316,439,513,002,947,000 | 33.054645 | 75 | 0.693999 | false |
takis/django | tests/template_tests/syntax_tests/test_regroup.py | 367 | 3984 | from datetime import date
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class RegroupTagTests(SimpleTestCase):
@setup({'regroup01': ''
'{% regroup data by bar as grouped %}'
'{% for group in grouped %}'
'{{ group.grouper }}:'
'{% for item in group.list %}'
'{{ item.foo }}'
'{% endfor %},'
'{% endfor %}'})
def test_regroup01(self):
output = self.engine.render_to_string('regroup01', {
'data': [{'foo': 'c', 'bar': 1},
{'foo': 'd', 'bar': 1},
{'foo': 'a', 'bar': 2},
{'foo': 'b', 'bar': 2},
{'foo': 'x', 'bar': 3}],
})
self.assertEqual(output, '1:cd,2:ab,3:x,')
@setup({'regroup02': ''
'{% regroup data by bar as grouped %}'
'{% for group in grouped %}'
'{{ group.grouper }}:'
'{% for item in group.list %}'
'{{ item.foo }}'
'{% endfor %}'
'{% endfor %}'})
def test_regroup02(self):
"""
Test for silent failure when target variable isn't found
"""
output = self.engine.render_to_string('regroup02', {})
self.assertEqual(output, '')
@setup({'regroup03': ''
'{% regroup data by at|date:"m" as grouped %}'
'{% for group in grouped %}'
'{{ group.grouper }}:'
'{% for item in group.list %}'
'{{ item.at|date:"d" }}'
'{% endfor %},'
'{% endfor %}'})
def test_regroup03(self):
"""
Regression tests for #17675
The date template filter has expects_localtime = True
"""
output = self.engine.render_to_string('regroup03', {
'data': [{'at': date(2012, 2, 14)},
{'at': date(2012, 2, 28)},
{'at': date(2012, 7, 4)}],
})
self.assertEqual(output, '02:1428,07:04,')
@setup({'regroup04': ''
'{% regroup data by bar|join:"" as grouped %}'
'{% for group in grouped %}'
'{{ group.grouper }}:'
'{% for item in group.list %}'
'{{ item.foo|first }}'
'{% endfor %},'
'{% endfor %}'})
def test_regroup04(self):
"""
The join template filter has needs_autoescape = True
"""
output = self.engine.render_to_string('regroup04', {
'data': [{'foo': 'x', 'bar': ['ab', 'c']},
{'foo': 'y', 'bar': ['a', 'bc']},
{'foo': 'z', 'bar': ['a', 'd']}],
})
self.assertEqual(output, 'abc:xy,ad:z,')
# Test syntax errors
@setup({'regroup05': '{% regroup data by bar as %}'})
def test_regroup05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('regroup05')
@setup({'regroup06': '{% regroup data by bar thisaintright grouped %}'})
def test_regroup06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('regroup06')
@setup({'regroup07': '{% regroup data thisaintright bar as grouped %}'})
def test_regroup07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('regroup07')
@setup({'regroup08': '{% regroup data by bar as grouped toomanyargs %}'})
def test_regroup08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('regroup08')
| bsd-3-clause | 3,075,434,005,305,183,700 | 38.058824 | 77 | 0.443273 | false |
defionscode/ansible | test/units/modules/network/f5/test_bigip_firewall_dos_profile.py | 8 | 3629 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_firewall_dos_profile import ApiParameters
from library.modules.bigip_firewall_dos_profile import ModuleParameters
from library.modules.bigip_firewall_dos_profile import ModuleManager
from library.modules.bigip_firewall_dos_profile import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_firewall_dos_profile import ApiParameters
from ansible.modules.network.f5.bigip_firewall_dos_profile import ModuleParameters
from ansible.modules.network.f5.bigip_firewall_dos_profile import ModuleManager
from ansible.modules.network.f5.bigip_firewall_dos_profile import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
description='my description',
threshold_sensitivity='low',
default_whitelist='whitelist1'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.description == 'my description'
assert p.threshold_sensitivity == 'low'
assert p.default_whitelist == '/Common/whitelist1'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
description='this is a description',
threshold_sensitivity='low',
default_whitelist='whitelist1',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'this is a description'
| gpl-3.0 | -3,020,930,308,509,654,000 | 31.990909 | 91 | 0.680628 | false |
harlanhaskins/swift | utils/gyb_syntax_support/PatternNodes.py | 12 | 3648 | from Child import Child
from Node import Node # noqa: I201
PATTERN_NODES = [
# type-annotation -> ':' type
Node('TypeAnnotation', kind='Syntax',
children=[
Child('Colon', kind='ColonToken'),
Child('Type', kind='Type'),
]),
# enum-case-pattern -> type-identifier? '.' identifier tuple-pattern?
Node('EnumCasePattern', kind='Pattern',
children=[
Child('Type', kind='Type',
is_optional=True),
Child('Period', kind='PeriodToken'),
Child('CaseName', kind='IdentifierToken'),
Child('AssociatedTuple', kind='TuplePattern',
is_optional=True),
]),
# is-type-pattern -> 'is' type
Node('IsTypePattern', kind='Pattern',
children=[
Child('IsKeyword', kind='IsToken'),
Child('Type', kind='Type'),
]),
# optional-pattern -> pattern '?'
Node('OptionalPattern', kind='Pattern',
children=[
Child('SubPattern', kind='Pattern'),
Child('QuestionMark', kind='PostfixQuestionMarkToken'),
]),
# identifier-pattern -> identifier
Node('IdentifierPattern', kind='Pattern',
children=[
Child('Identifier', kind='Token',
token_choices=[
'SelfToken',
'IdentifierToken',
]),
]),
# as-pattern -> pattern 'as' type
Node('AsTypePattern', kind='Pattern',
children=[
Child('Pattern', kind='Pattern'),
Child('AsKeyword', kind='AsToken'),
Child('Type', kind='Type'),
]),
# tuple-pattern -> '(' tuple-pattern-element-list ')'
Node('TuplePattern', kind='Pattern',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Elements', kind='TuplePatternElementList',
collection_element_name='Element'),
Child('RightParen', kind='RightParenToken'),
]),
# wildcard-pattern -> '_' type-annotation?
Node('WildcardPattern', kind='Pattern',
children=[
Child('Wildcard', kind='WildcardToken'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
]),
# tuple-pattern-element -> identifier? ':' pattern ','?
Node('TuplePatternElement', kind='Syntax',
traits=['WithTrailingComma', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('Pattern', kind='Pattern'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# expr-pattern -> expr
Node('ExpressionPattern', kind='Pattern',
children=[
Child('Expression', kind='Expr'),
]),
# tuple-pattern-element-list -> tuple-pattern-element
# tuple-pattern-element-list?
Node('TuplePatternElementList', kind='SyntaxCollection',
element='TuplePatternElement'),
# value-binding-pattern -> 'let' pattern
# | 'var' pattern
Node('ValueBindingPattern', kind='Pattern',
children=[
Child('LetOrVarKeyword', kind='Token',
token_choices=[
'LetToken',
'VarToken',
]),
Child('ValuePattern', kind='Pattern'),
]),
]
| apache-2.0 | -6,545,401,852,280,253,000 | 32.46789 | 73 | 0.514803 | false |
xiaoyaozi5566/DiamondCache | src/mem/slicc/symbols/SymbolTable.py | 14 | 5906 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.util import makeDir
from slicc.generate import html
from slicc.symbols.StateMachine import StateMachine
from slicc.symbols.Type import Type
from slicc.util import Location
class SymbolTable(object):
def __init__(self, slicc):
self.slicc = slicc
self.sym_vec = []
self.sym_map_vec = [ {} ]
self.machine_components = {}
pairs = {}
pairs["enumeration"] = "yes"
location = Location("init", 0, no_warning=not slicc.verbose)
MachineType = Type(self, "MachineType", location, pairs)
self.newSymbol(MachineType)
pairs = {}
pairs["primitive"] = "yes"
pairs["external"] = "yes"
location = Location("init", 0, no_warning=not slicc.verbose)
void = Type(self, "void", location, pairs)
self.newSymbol(void)
def __repr__(self):
return "[SymbolTable]" # FIXME
def codeFormatter(self, *args, **kwargs):
return self.slicc.codeFormatter(*args, **kwargs)
def newSymbol(self, sym):
self.registerSym(str(sym), sym)
self.sym_vec.append(sym)
def registerSym(self, id, sym):
# Check for redeclaration (in the current frame only)
if id in self.sym_map_vec[-1]:
sym.error("Symbol '%s' redeclared in same scope.", id)
# FIXME - warn on masking of a declaration in a previous frame
self.sym_map_vec[-1][id] = sym
def find(self, ident, types=None):
for sym_map in reversed(self.sym_map_vec):
try:
symbol = sym_map[ident]
except KeyError:
continue
if types is not None:
if not isinstance(symbol, types):
symbol.error("Symbol '%s' is not of types '%s'.",
symbol,
types)
return symbol
return None
def newMachComponentSym(self, symbol):
# used to cheat-- that is, access components in other machines
machine = self.find("current_machine", StateMachine)
if machine:
self.machine_components[str(machine)][str(symbol)] = symbol
def newCurrentMachine(self, sym):
self.registerGlobalSym(str(sym), sym)
self.registerSym("current_machine", sym)
self.sym_vec.append(sym)
self.machine_components[str(sym)] = {}
@property
def state_machine(self):
return self.find("current_machine", StateMachine)
def pushFrame(self):
self.sym_map_vec.append({})
def popFrame(self):
assert len(self.sym_map_vec) > 0
self.sym_map_vec.pop()
def registerGlobalSym(self, ident, symbol):
# Check for redeclaration (global frame only)
if ident in self.sym_map_vec[0]:
symbol.error("Symbol '%s' redeclared in global scope." % ident)
self.sym_map_vec[0][ident] = symbol
def getAllType(self, type):
for symbol in self.sym_vec:
if isinstance(symbol, type):
yield symbol
def writeCodeFiles(self, path):
makeDir(path)
code = self.codeFormatter()
code('''
/** Auto generated C++ code started by $__file__:$__line__ */
#include "mem/ruby/slicc_interface/RubySlicc_includes.hh"
''')
for symbol in self.sym_vec:
if isinstance(symbol, Type) and not symbol.isPrimitive:
code('#include "mem/protocol/${{symbol.c_ident}}.hh"')
code.write(path, "Types.hh")
for symbol in self.sym_vec:
symbol.writeCodeFiles(path)
def writeHTMLFiles(self, path):
makeDir(path)
machines = list(self.getAllType(StateMachine))
if len(machines) > 1:
name = "%s_table.html" % machines[0].ident
else:
name = "empty.html"
code = self.codeFormatter()
code('''
<html>
<head>
<title>$path</title>
</head>
<frameset rows="*,30">
<frame name="Table" src="$name">
<frame name="Status" src="empty.html">
</frameset>
</html>
''')
code.write(path, "index.html")
code = self.codeFormatter()
code("<HTML></HTML>")
code.write(path, "empty.html")
for symbol in self.sym_vec:
symbol.writeHTMLFiles(path)
__all__ = [ "SymbolTable" ]
| bsd-3-clause | -5,852,742,959,842,511,000 | 32.748571 | 75 | 0.635963 | false |
kanagasabapathi/python-for-android | python-modules/twisted/twisted/manhole/_inspectro.py | 62 | 10118 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""An input/output window for the glade reactor inspector.
"""
import time
import gtk
import gobject
import gtk.glade
from twisted.python.util import sibpath
from twisted.python import reflect
from twisted.manhole.ui import gtk2manhole
from twisted.python.components import Adapter, registerAdapter
from twisted.python import log
from twisted.protocols import policies
from zope.interface import implements, Interface
# the glade file uses stock icons, which requires gnome to be installed
import gnome
version = "$Revision: 1.1 $"[11:-2]
gnome.init("gladereactor Inspector", version)
class ConsoleOutput(gtk2manhole.ConsoleOutput):
def _captureLocalLog(self):
self.fobs = log.FileLogObserver(gtk2manhole._Notafile(self, "log"))
self.fobs.start()
def stop(self):
self.fobs.stop()
del self.fobs
class ConsoleInput(gtk2manhole.ConsoleInput):
def sendMessage(self):
buffer = self.textView.get_buffer()
iter1, iter2 = buffer.get_bounds()
text = buffer.get_text(iter1, iter2, False)
self.do(text)
def do(self, text):
self.toplevel.do(text)
class INode(Interface):
"""A node in the inspector tree model.
"""
def __adapt__(adaptable, default):
if hasattr(adaptable, "__dict__"):
return InstanceNode(adaptable)
return AttributesNode(adaptable)
class InspectorNode(Adapter):
implements(INode)
def postInit(self, offset, parent, slot):
self.offset = offset
self.parent = parent
self.slot = slot
def getPath(self):
L = []
x = self
while x.parent is not None:
L.append(x.offset)
x = x.parent
L.reverse()
return L
def __getitem__(self, index):
slot, o = self.get(index)
n = INode(o, persist=False)
n.postInit(index, self, slot)
return n
def origstr(self):
return str(self.original)
def format(self):
return (self.slot, self.origstr())
class ConstantNode(InspectorNode):
def __len__(self):
return 0
class DictionaryNode(InspectorNode):
def get(self, index):
L = self.original.items()
L.sort()
return L[index]
def __len__(self):
return len(self.original)
def origstr(self):
return "Dictionary"
class ListNode(InspectorNode):
def get(self, index):
return index, self.original[index]
def origstr(self):
return "List"
def __len__(self):
return len(self.original)
class AttributesNode(InspectorNode):
def __len__(self):
return len(dir(self.original))
def get(self, index):
L = dir(self.original)
L.sort()
return L[index], getattr(self.original, L[index])
class InstanceNode(InspectorNode):
def __len__(self):
return len(self.original.__dict__) + 1
def get(self, index):
if index == 0:
if hasattr(self.original, "__class__"):
v = self.original.__class__
else:
v = type(self.original)
return "__class__", v
else:
index -= 1
L = self.original.__dict__.items()
L.sort()
return L[index]
import types
for x in dict, types.DictProxyType:
registerAdapter(DictionaryNode, x, INode)
for x in list, tuple:
registerAdapter(ListNode, x, INode)
for x in int, str:
registerAdapter(ConstantNode, x, INode)
class InspectorTreeModel(gtk.GenericTreeModel):
def __init__(self, root):
gtk.GenericTreeModel.__init__(self)
self.root = INode(root, persist=False)
self.root.postInit(0, None, 'root')
def on_get_flags(self):
return 0
def on_get_n_columns(self):
return 1
def on_get_column_type(self, index):
return gobject.TYPE_STRING
def on_get_path(self, node):
return node.getPath()
def on_get_iter(self, path):
x = self.root
for elem in path:
x = x[elem]
return x
def on_get_value(self, node, column):
return node.format()[column]
def on_iter_next(self, node):
try:
return node.parent[node.offset + 1]
except IndexError:
return None
def on_iter_children(self, node):
return node[0]
def on_iter_has_child(self, node):
return len(node)
def on_iter_n_children(self, node):
return len(node)
def on_iter_nth_child(self, node, n):
if node is None:
return None
return node[n]
def on_iter_parent(self, node):
return node.parent
class Inspectro:
selected = None
def __init__(self, o=None):
self.xml = x = gtk.glade.XML(sibpath(__file__, "inspectro.glade"))
self.tree_view = x.get_widget("treeview")
colnames = ["Name", "Value"]
for i in range(len(colnames)):
self.tree_view.append_column(
gtk.TreeViewColumn(
colnames[i], gtk.CellRendererText(), text=i))
d = {}
for m in reflect.prefixedMethods(self, "on_"):
d[m.im_func.__name__] = m
self.xml.signal_autoconnect(d)
if o is not None:
self.inspect(o)
self.ns = {'inspect': self.inspect}
iwidget = x.get_widget('input')
self.input = ConsoleInput(iwidget)
self.input.toplevel = self
iwidget.connect("key_press_event", self.input._on_key_press_event)
self.output = ConsoleOutput(x.get_widget('output'))
def select(self, o):
self.selected = o
self.ns['it'] = o
self.xml.get_widget("itname").set_text(repr(o))
self.xml.get_widget("itpath").set_text("???")
def inspect(self, o):
self.model = InspectorTreeModel(o)
self.tree_view.set_model(self.model)
self.inspected = o
def do(self, command):
filename = '<inspector>'
try:
print repr(command)
try:
code = compile(command, filename, 'eval')
except:
code = compile(command, filename, 'single')
val = eval(code, self.ns, self.ns)
if val is not None:
print repr(val)
self.ns['_'] = val
except:
log.err()
def on_inspect(self, *a):
self.inspect(self.selected)
def on_inspect_new(self, *a):
Inspectro(self.selected)
def on_row_activated(self, tv, path, column):
self.select(self.model.on_get_iter(path).original)
class LoggingProtocol(policies.ProtocolWrapper):
"""Log network traffic."""
logging = True
logViewer = None
def __init__(self, *args):
policies.ProtocolWrapper.__init__(self, *args)
self.inLog = []
self.outLog = []
def write(self, data):
if self.logging:
self.outLog.append((time.time(), data))
if self.logViewer:
self.logViewer.updateOut(self.outLog[-1])
policies.ProtocolWrapper.write(self, data)
def dataReceived(self, data):
if self.logging:
self.inLog.append((time.time(), data))
if self.logViewer:
self.logViewer.updateIn(self.inLog[-1])
policies.ProtocolWrapper.dataReceived(self, data)
def __repr__(self):
r = "wrapped " + repr(self.wrappedProtocol)
if self.logging:
r += " (logging)"
return r
class LoggingFactory(policies.WrappingFactory):
"""Wrap protocols with logging wrappers."""
protocol = LoggingProtocol
logging = True
def buildProtocol(self, addr):
p = self.protocol(self, self.wrappedFactory.buildProtocol(addr))
p.logging = self.logging
return p
def __repr__(self):
r = "wrapped " + repr(self.wrappedFactory)
if self.logging:
r += " (logging)"
return r
class LogViewer:
"""Display log of network traffic."""
def __init__(self, p):
self.p = p
vals = [time.time()]
if p.inLog:
vals.append(p.inLog[0][0])
if p.outLog:
vals.append(p.outLog[0][0])
self.startTime = min(vals)
p.logViewer = self
self.xml = x = gtk.glade.XML(sibpath(__file__, "logview.glade"))
self.xml.signal_autoconnect(self)
self.loglist = self.xml.get_widget("loglist")
# setup model, connect it to my treeview
self.model = gtk.ListStore(str, str, str)
self.loglist.set_model(self.model)
self.loglist.set_reorderable(1)
self.loglist.set_headers_clickable(1)
# self.servers.set_headers_draggable(1)
# add a column
for col in [
gtk.TreeViewColumn('Time',
gtk.CellRendererText(),
text=0),
gtk.TreeViewColumn('D',
gtk.CellRendererText(),
text=1),
gtk.TreeViewColumn('Data',
gtk.CellRendererText(),
text=2)]:
self.loglist.append_column(col)
col.set_resizable(1)
r = []
for t, data in p.inLog:
r.append(((str(t - self.startTime), "R", repr(data)[1:-1])))
for t, data in p.outLog:
r.append(((str(t - self.startTime), "S", repr(data)[1:-1])))
r.sort()
for i in r:
self.model.append(i)
def updateIn(self, (time, data)):
self.model.append((str(time - self.startTime), "R", repr(data)[1:-1]))
def updateOut(self, (time, data)):
self.model.append((str(time - self.startTime), "S", repr(data)[1:-1]))
def on_logview_destroy(self, w):
self.p.logViewer = None
del self.p
def main():
x = Inspectro()
x.inspect(x)
gtk.main()
if __name__ == '__main__':
import sys
log.startLogging(sys.stdout)
main()
| apache-2.0 | 2,285,607,363,313,309,000 | 26.420054 | 78 | 0.568492 | false |
nkrinner/nova | nova/cells/filters/target_cell.py | 26 | 2862 | # Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Target cell filter.
A scheduler hint of 'target_cell' with a value of a full cell name may be
specified to route a build to a particular cell. No error handling is
done as there's no way to know whether the full path is a valid.
"""
from nova.cells import filters
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class TargetCellFilter(filters.BaseCellFilter):
"""Target cell filter. Works by specifying a scheduler hint of
'target_cell'. The value should be the full cell path.
"""
def filter_all(self, cells, filter_properties):
"""Override filter_all() which operates on the full list
of cells...
"""
scheduler_hints = filter_properties.get('scheduler_hints')
if not scheduler_hints:
return cells
# This filter only makes sense at the top level, as a full
# cell name is specified. So we pop 'target_cell' out of the
# hints dict.
cell_name = scheduler_hints.pop('target_cell', None)
if not cell_name:
return cells
# This authorization is after popping off target_cell, so
# that in case this fails, 'target_cell' is not left in the
# dict when child cells go to schedule.
if not self.authorized(filter_properties['context']):
# No filtering, if not authorized.
return cells
LOG.info(_("Forcing direct route to %(cell_name)s because "
"of 'target_cell' scheduler hint"),
{'cell_name': cell_name})
scheduler = filter_properties['scheduler']
if cell_name == filter_properties['routing_path']:
return [scheduler.state_manager.get_my_state()]
ctxt = filter_properties['context']
# NOTE(belliott) Remove after deprecated schedule_run_instance
# code goes away:
schedule = filter_properties['cell_scheduler_method']
schedule = getattr(scheduler.msg_runner, schedule)
schedule(ctxt, cell_name, filter_properties['host_sched_kwargs'])
# Returning None means to skip further scheduling, because we
# handled it.
| apache-2.0 | 4,194,269,038,920,204,000 | 37.675676 | 78 | 0.665968 | false |
clemkoa/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 39 | 7489 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.), edgecolor='k')
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause | 8,605,176,948,871,718,000 | 36.633166 | 100 | 0.722793 | false |
mearleycf/mwepd | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/generator/ninja_test.py | 610 | 1611 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 8,053,624,731,567,576,000 | 35.613636 | 74 | 0.646803 | false |
clairetang6/bokeh | examples/embed/simple/simple.py | 4 | 2031 | '''This example demonstrates embedding a standalone Bokeh document
into a simple Flask application, with a basic HTML web form.
To view the example, run:
python simple.py
in this directory, and navigate to:
http://localhost:5000
'''
from __future__ import print_function
import flask
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.util.string import encode_utf8
app = flask.Flask(__name__)
colors = {
'Black': '#000000',
'Red': '#FF0000',
'Green': '#00FF00',
'Blue': '#0000FF',
}
def getitem(obj, item, default):
if item not in obj:
return default
else:
return obj[item]
@app.route("/")
def polynomial():
""" Very simple embedding of a polynomial chart
"""
# Grab the inputs arguments from the URL
# This is automated by the button
args = flask.request.args
# Get all the form arguments in the url with defaults
color = colors[getitem(args, 'color', 'Black')]
_from = int(getitem(args, '_from', 0))
to = int(getitem(args, 'to', 10))
# Create a polynomial line graph
x = list(range(_from, to + 1))
fig = figure(title="Polynomial")
fig.line(x, [i ** 2 for i in x], color=color, line_width=2)
# Configure resources to include BokehJS inline in the document.
# For more details see:
# http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#bokeh-embed
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
# For more details see:
# http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components
script, div = components(fig, INLINE)
html = flask.render_template(
'embed.html',
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources,
color=color,
_from=_from,
to=to
)
return encode_utf8(html)
if __name__ == "__main__":
print(__doc__)
app.run()
| bsd-3-clause | -7,129,391,511,357,709,000 | 24.3875 | 93 | 0.642541 | false |
devcline/mtasa-blue | vendor/google-breakpad/src/testing/test/gmock_leak_test.py | 224 | 3584 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
__author__ = '[email protected] (Zhanyong Wan)'
import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0']).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0']).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks']).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks']).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1']).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks']).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main()
| gpl-3.0 | 8,672,953,853,256,990,000 | 38.822222 | 80 | 0.686942 | false |
shaufi/odoo | openerp/addons/base/ir/ir_model.py | 148 | 62274 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
# add model in registry
self.instanciate(cr, user, vals['model'], context)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
if isinstance(model, unicode):
model = model.encode('utf-8')
class CustomModel(models.Model):
_name = model
_module = False
_custom = True
CustomModel._build_model(self.pool, cr)
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = model._fields[field.name].relation
cr.execute('DROP table "%s"' % (rel_name))
model._pop_field(field.name)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# The field we just deleted might have be inherited, and registry is
# inconsistent in this case; therefore we reload the registry.
cr.commit()
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
self.pool.clear_manual_fields()
if vals['model'] in self.pool:
model = self.pool[vals['model']]
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
model._rec_name = 'x_name'
# re-initialize model in registry
model.__init__(self.pool, cr)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
# if set, *one* column can be renamed here
column_rename = None
# field patches {model: {field_name: {prop_name: prop_value, ...}, ...}, ...}
patches = defaultdict(lambda: defaultdict(dict))
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', 'domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('select_level', 'index', lambda x: bool(int(x))),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id.id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
# find out which properties (per model) we need to update
for field_name, prop_name, func in model_props:
if field_name in vals:
prop_value = func(vals[field_name])
if getattr(field, prop_name) != prop_value:
patches[obj][final_name][prop_name] = prop_value
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
obj, rename = column_rename
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % rename)
# This is VERY risky, but let us have this feature:
# we want to change the key of field in obj._fields and obj._columns
field = obj._pop_field(rename[1])
obj._add_field(rename[2], field)
self.pool.setup_models(cr, partial=(not self.pool.ready))
if patches:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context,
select=vals.get('select_level', '0'),
update_custom_fields=True,
)
for obj, model_patches in patches.iteritems():
for field_name, field_patches in model_patches.iteritems():
# update field properties, and adapt corresponding column
field = obj._fields[field_name]
attrs = dict(field._attrs, **field_patches)
obj._add_field(field_name, field.new(**attrs))
# update database schema
self.pool.setup_models(cr, partial=(not self.pool.ready))
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
if column_rename or patches:
RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
# The context parameter is useful when the method translates error messages.
# But as the method raises an exception in that case, the key 'lang' might
# not be really necessary as a cache key, unless the `ormcache_context`
# decorator catches the exception (it does not at the moment.)
@tools.ormcache_context(accepted_keys=('lang',))
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
bymodel = defaultdict(dict)
names = {}
for res in self.browse(cr, uid, ids, context=context):
bymodel[res.model][res.res_id] = res
names[res.id] = res.complete_name
#result[res.model][res.res_id] = res.id
for model, id_map in bymodel.iteritems():
try:
ng = dict(self.pool[model].name_get(cr, uid, id_map.keys(), context=context))
except Exception:
pass
else:
for r in id_map.itervalues():
names[r.id] = ng.get(r.res_id, r.complete_name)
return [(i, names[i]) for i in ids]
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
id = False
try:
# One step to check the ID is defined and the record actually exists
record = self.get_object(cr, uid, module, xml_id)
if record:
id = record.id
self.loads[(module,xml_id)] = (model,id)
for table, inherit_field in self.pool[model]._inherits.iteritems():
parent_id = record[inherit_field].id
parent_xid = '%s_%s' % (xml_id, table.replace('.', '_'))
self.loads[(module, parent_xid)] = (table, parent_id)
except Exception:
pass
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, SUPERUSER_ID, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
if not res:
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
ir_values_obj.invalidate_cache(cr, uid, ['value'])
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules or config.get('import_partial'):
return True
bad_imd_ids = []
context = {MODULE_UNINSTALL_FLAG: True}
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC
""", (tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module, name) not in self.loads:
if model in self.pool:
_logger.info('Deleting %s@%s (%s.%s)', res_id, model, module, name)
if self.pool[model].exists(cr, uid, [res_id], context=context):
self.pool[model].unlink(cr, uid, [res_id], context=context)
else:
bad_imd_ids.append(id)
if bad_imd_ids:
self.unlink(cr, uid, bad_imd_ids, context=context)
self.loads.clear()
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,361,404,553,823,068,700 | 47.31187 | 207 | 0.558371 | false |
upworkgluu71/CE_v1 | genSchemaMarkdown.py | 3 | 3430 | #!/usr/bin/python
# MIT License
#
# Copyright 2014 Gluu, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the Software), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
# THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This program is used to generate the documentation for the Gluu Server schema found on
# http://www.gluu.org/docs/reference/ldap/schema
import sys
import string
from ldif import LDIFParser, LDIFWriter
class SchemaParser(LDIFParser):
def __init__(self, input):
LDIFParser.__init__(self, input)
self.objectclasses = {}
self.attributes = {}
def __repr__(self):
s = ''
oc_list = self.objectclasses.keys()
oc_list.sort()
for name in oc_list:
desc = self.objectclasses[name][0]
attrs = self.objectclasses[name][1]
s = s + "### Objectclass %s\n" % name
s = s + " * __Description__ %s\n" % desc
for attr in attrs:
attrDesc = ''
if self.attributes.has_key(attr):
attrDesc = self.attributes[attr]
s = s + " * __%s__ %s\n" % (attr, attrDesc)
s = s + "\n"
return s
def handle(self, dn, entry):
attributeTypes = entry['attributeTypes']
objectclasses = entry['objectclasses']
for attr in attributeTypes:
desc = self.getDESC(attr)
name_list = self.getName(attr)
for name in name_list:
self.attributes[name] = desc
for oc in objectclasses:
name = self.getName(oc)[0] # Assumes OC has one name
desc = self.getDESC(oc)
if not desc:
desc = ''
mays = self.getMays(oc)
self.objectclasses[name] = (desc, mays)
def getMays(self, oc):
mays = oc.split('MAY')[1].split('(')[1].split(')')[0].split('$')
return map(string.strip, mays)
def getDESC(self, s):
desc = None
try:
desc = s.split('DESC')[1].split("'")[1]
except:
pass
return desc
def getName(self,s):
name_list = None
try:
name_string = s.split('NAME')[1].split("'")[1].strip()
name_list = name_string.split(" ")
except:
pass
return name_list
if __name__ == '__main__':
input_ldif = './static/opendj/101-ox.ldif'
output_md = "./ldap-schema-table.md"
parser = SchemaParser(open(input_ldif, 'rb'))
parser.parse()
print parser
| mit | 5,584,636,900,356,251,000 | 34 | 97 | 0.604665 | false |
efajardo/osg-test | osgtest/tests/test_170_pbs.py | 1 | 5425 | import os
import time
from datetime import date
import osgtest.library.core as core
import osgtest.library.files as files
import osgtest.library.osgunittest as osgunittest
import osgtest.library.service as service
class TestStartPBS(osgunittest.OSGTestCase):
pbs_config = """
create queue batch queue_type=execution
set queue batch started=true
set queue batch enabled=true
set queue batch resources_default.nodes=1
set queue batch resources_default.walltime=3600
set server default_queue=batch
set server keep_completed = 600
set server job_nanny = True
set server scheduling=true
set server acl_hosts += *
set server acl_host_enable = True
"""
required_rpms = ['torque',
'torque-mom',
'torque-server',
'torque-scheduler',
'torque-client', # for qmgr
'munge']
def test_01_start_mom(self):
core.state['pbs_mom.started-service'] = False
core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
self.skip_ok_if(service.is_running('pbs_mom'), 'PBS mom already running')
core.config['torque.mom-config'] = '/var/lib/torque/mom_priv/config'
files.write(core.config['torque.mom-config'],
"$pbsserver %s\n" % core.get_hostname(),
owner='pbs')
core.config['torque.mom-layout'] = '/var/lib/torque/mom_priv/mom.layout'
files.write(core.config['torque.mom-layout'],
"nodes=0",
owner='pbs')
service.check_start('pbs_mom')
def test_02_start_pbs_sched(self):
core.state['pbs_sched.started-service'] = False
core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
self.skip_ok_if(service.is_running('pbs_sched'), 'PBS sched already running')
service.check_start('pbs_sched')
def test_03_start_trqauthd(self):
core.state['trqauthd.started-service'] = False
core.config['torque.pbs-servername-file'] = '/var/lib/torque/server_name'
core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
self.skip_ok_if(service.is_running('trqauthd'), 'trqauthd is already running')
# set hostname as servername instead of localhost
# config required before starting trqauthd
files.write(core.config['torque.pbs-servername-file'],
"%s" % core.get_hostname(),
owner='pbs')
service.check_start('trqauthd')
def test_04_configure_pbs(self):
core.config['torque.pbs-nodes-file'] = '/var/lib/torque/server_priv/nodes'
core.config['torque.pbs-serverdb'] = '/var/lib/torque/server_priv/serverdb'
core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
self.skip_bad_unless(service.is_running('trqauthd'), 'pbs_server requires trqauthd')
self.skip_ok_if(service.is_running('pbs_server'), 'pbs server already running')
files.preserve(core.config['torque.pbs-serverdb'], 'pbs')
if not os.path.exists(core.config['torque.pbs-serverdb']):
command = ('/usr/sbin/pbs_server -d /var/lib/torque -t create -f && '
'sleep 10 && /usr/bin/qterm')
stdout, _, fail = core.check_system(command, 'create initial pbs serverdb config', shell=True)
self.assert_(stdout.find('error') == -1, fail)
# This gets wiped if we write it before the initial 'service pbs_server create'
# However, this file needs to be in place before the service is started so we
# restart the service after 'initial configuration'
files.write(core.config['torque.pbs-nodes-file'], # add the local node as a compute node
"%s np=1 num_node_boards=1\n" % core.get_hostname(),
owner='pbs')
def test_05_start_pbs(self):
core.state['pbs_server.started-service'] = False
core.state['torque.nodes-up'] = False
core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
self.skip_bad_unless(service.is_running('trqauthd'), 'pbs_server requires trqauthd')
self.skip_ok_if(service.is_running('pbs_server'), 'pbs server already running')
server_log = '/var/log/torque/server_logs/' + date.today().strftime('%Y%m%d')
try:
server_log_stat = os.stat(server_log)
except OSError:
server_log_stat = None
service.check_start('pbs_server')
# Wait until the server is up before writing the rest of the config
core.monitor_file(server_log, server_log_stat, '.*Server Ready.*', 60.0)
core.check_system("echo '%s' | qmgr %s" % (self.pbs_config, core.get_hostname()),
"Configuring pbs server",
shell=True)
# wait up to 5 minutes for the server to recognize the node
start_time = time.time()
while (time.time() - start_time) < 600:
command = ('/usr/bin/qnodes', '-s', core.get_hostname())
stdout, _, fail = core.check_system(command, 'Get pbs node info')
self.assert_(stdout.find('error') == -1, fail)
if stdout.find('state = free'):
core.state['torque.nodes-up'] = True
break
if not core.state['torque.nodes-up']:
self.fail('PBS nodes not coming up')
| apache-2.0 | 1,993,299,584,414,067,700 | 44.208333 | 106 | 0.619171 | false |
camargoanderso/thumbor | thumbor/storages/redis_storage.py | 11 | 4960 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import logging
import inspect
from json import loads, dumps
from datetime import datetime, timedelta
from redis import Redis, RedisError
from thumbor.storages import BaseStorage
from thumbor.utils import on_exception
from tornado.concurrent import return_future
logger = logging.getLogger('thumbor')
class Storage(BaseStorage):
storage = None
def __init__(self, context, shared_client=True):
'''Initialize the RedisStorage
:param thumbor.context.Context shared_client: Current context
:param boolean shared_client: When set to True a singleton client will
be used.
'''
BaseStorage.__init__(self, context)
self.shared_client = shared_client
self.storage = self.reconnect_redis()
def get_storage(self):
'''Get the storage instance.
:return Redis: Redis instance
'''
if self.storage:
return self.storage
self.storage = self.reconnect_redis()
return self.storage
def reconnect_redis(self):
if self.shared_client and Storage.storage:
return Storage.storage
storage = Redis(
port=self.context.config.REDIS_STORAGE_SERVER_PORT,
host=self.context.config.REDIS_STORAGE_SERVER_HOST,
db=self.context.config.REDIS_STORAGE_SERVER_DB,
password=self.context.config.REDIS_STORAGE_SERVER_PASSWORD
)
if self.shared_client:
Storage.storage = storage
return storage
def on_redis_error(self, fname, exc_type, exc_value):
'''Callback executed when there is a redis error.
:param string fname: Function name that was being called.
:param type exc_type: Exception type
:param Exception exc_value: The current exception
:returns: Default value or raise the current exception
'''
if self.shared_client:
Storage.storage = None
else:
self.storage = None
if self.context.config.REDIS_STORAGE_IGNORE_ERRORS is True:
logger.error("[REDIS_STORAGE] %s" % exc_value)
if fname == '_exists':
return False
return None
else:
raise exc_value
def __key_for(self, url):
return 'thumbor-crypto-%s' % url
def __detector_key_for(self, url):
return 'thumbor-detector-%s' % url
@on_exception(on_redis_error, RedisError)
def put(self, path, bytes):
storage = self.get_storage()
storage.set(path, bytes)
storage.expireat(
path, datetime.now() + timedelta(
seconds=self.context.config.STORAGE_EXPIRATION_SECONDS
)
)
@on_exception(on_redis_error, RedisError)
def put_crypto(self, path):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
return
if not self.context.server.security_key:
raise RuntimeError(
"STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True if no "
"SECURITY_KEY specified"
)
key = self.__key_for(path)
self.get_storage().set(key, self.context.server.security_key)
@on_exception(on_redis_error, RedisError)
def put_detector_data(self, path, data):
key = self.__detector_key_for(path)
self.get_storage().set(key, dumps(data))
@return_future
def get_crypto(self, path, callback):
callback(self._get_crypto(path))
@on_exception(on_redis_error, RedisError)
def _get_crypto(self, path):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
return None
crypto = self.get_storage().get(self.__key_for(path))
if not crypto:
return None
return crypto
@return_future
def get_detector_data(self, path, callback):
callback(self._get_detector_data(path))
@on_exception(on_redis_error, RedisError)
def _get_detector_data(self, path):
data = self.get_storage().get(self.__detector_key_for(path))
if not data:
return None
return loads(data)
@return_future
def exists(self, path, callback):
callback(self._exists(path))
@on_exception(on_redis_error, RedisError)
def _exists(self, path):
return self.get_storage().exists(path)
@on_exception(on_redis_error, RedisError)
def remove(self, path):
self.get_storage().delete(path)
@return_future
def get(self, path, callback):
@on_exception(self.on_redis_error, RedisError)
def wrap():
return self.get_storage().get(path)
callback(wrap())
| mit | 6,211,598,423,491,158,000 | 28.005848 | 78 | 0.617339 | false |
Earthson/RecGen | recgen.py | 2 | 4323 | #!/usr/bin/env python
import sys
import types
from traceback import format_exc
def rec_gen(func, callback=None, err_callback=None):
'''
callback accept arguments with output of func
err_callback is called after Exception occured, accept Exception instance as it's arguments
'''
def trans_func(*args, **kwargs):
def error_do(e):
print('@rec_func_error:', e, file=sys.stderr)
if err_callback is not None:
err_callback(e)
try:
g = func(*args, **kwargs)
except Exception as e:
error_do(e)
return
if not isinstance(g, types.GeneratorType):
#return if g is not generator
if callback is not None:
callback(g)
return
#ans = []
def go_through(it=None):
try:
em = g.send(it)
if not hasattr(em, 'dojob'):
#ans.append(em)
go_through(None)
else:
em.dojob(callback=go_through, err_callback=error_do)
except StopIteration as st:
if callback is not None:
callback(st.value)
return
except Exception as e:
g.close()
error_do(e)
return
go_through()
return trans_func
from functools import partial
class RecTask(object):
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def dojob(self, callback=None, err_callback=None):
self.run(self.transform(partial(rec_gen, callback=callback, err_callback=err_callback)))
def transform(self, f):
return f(self.func)
def run(self, func=None):
if func is None:
func = self.func
return func(*self.args, **self.kwargs)
class TaskWithRetry(RecTask):
retry_limit = 1
def dojob(self, callback=None, err_callback=None):
try_cnt = 0
def ierr(e, *args, **kwargs):
nonlocal try_cnt
if not hasattr(self, 'retry_limit') or try_cnt > self.retry_limit:
print('@error: overflow retry limit! task has complete failed', file=sys.stderr)
if err_callback is not None:
return err_callback(e, *args, **kwargs)
return
try_cnt += 1
print('@warning_retry: retry count: %s, %s' % (try_cnt, e), file=sys.stderr)
self.run(self.transform(partial(rec_gen, callback=callback, err_callback=ierr)))
self.run(self.transform(partial(rec_gen, callback=callback, err_callback=ierr)))
class MapTask(object):
def __init__(self, *tasks):
self.tasks = list(tasks)
def dojob(self, callback=None, err_callback=None):
self.ans = [None for e in self.tasks]
self.flags = [False for e in self.tasks]
self.cnt = len(self.tasks)
self.todo = callback
self.apply_tasks(err_callback=err_callback)
def apply_tasks(self, err_callback):
for i, e in zip(range(len(self.tasks)), self.tasks):
e.dojob(callback=self.acker(i), err_callback=err_callback)
def acker(self, posi):
def ack(x):
if self.flags[posi] is False:
self.flags[posi] = True
self.ans[posi] = x
self.cnt -= 1
if self.cnt == 0:
if self.todo is not None:
self.todo(tuple(self.ans))
return ack
class HTTPTask(TaskWithRetry):
def __init__(self, sender, req, callback):
self.sender = sender
self.req = req
self.callback = callback
self.retry_limit = 10
def transform(self, f):
return f(self.callback)
def run(self, callback=None):
if callback is None:
callback = self.callback
self.sender(self.req, callback)
if __name__ == '__main__':
sys.setrecursionlimit(1000000000)
def fib(n):
if n <= 1:
return n
return (yield RecTask(fib, n-1)) + (yield RecTask(fib, n-2))
#x, y = yield MapTask(RecTask(fib, n-1), RecTask(fib, n-2))
#return x + y
pfib = rec_gen(fib, lambda x: print(x))
pfib(25)
| mit | -5,362,806,630,386,945,000 | 29.878571 | 96 | 0.547768 | false |
mdworks2016/work_development | Python/05_FirstPython/Chapter9_WebApp/fppython_develop/lib/python3.7/site-packages/pip/_vendor/pep517/wrappers.py | 22 | 5912 | from contextlib import contextmanager
import os
from os.path import dirname, abspath, join as pjoin
import shutil
from subprocess import check_call
import sys
from tempfile import mkdtemp
from . import compat
_in_proc_script = pjoin(dirname(abspath(__file__)), '_in_process.py')
@contextmanager
def tempdir():
td = mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
class BackendUnavailable(Exception):
"""Will be raised if the backend cannot be imported in the hook process."""
class UnsupportedOperation(Exception):
"""May be raised by build_sdist if the backend indicates that it can't."""
def default_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""The default method of calling the wrapper subprocess."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_call(cmd, cwd=cwd, env=env)
class Pep517HookCaller(object):
"""A wrapper around a source directory to be built with a PEP 517 backend.
source_dir : The path to the source directory, containing pyproject.toml.
backend : The build backend spec, as per PEP 517, from pyproject.toml.
"""
def __init__(self, source_dir, build_backend):
self.source_dir = abspath(source_dir)
self.build_backend = build_backend
self._subprocess_runner = default_subprocess_runner
# TODO: Is this over-engineered? Maybe frontends only need to
# set this when creating the wrapper, not on every call.
@contextmanager
def subprocess_runner(self, runner):
prev = self._subprocess_runner
self._subprocess_runner = runner
yield
self._subprocess_runner = prev
def get_requires_for_build_wheel(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.:
["wheel >= 0.25", "setuptools"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_wheel', {
'config_settings': config_settings
})
def prepare_metadata_for_build_wheel(
self, metadata_directory, config_settings=None):
"""Prepare a *.dist-info folder with metadata for this project.
Returns the name of the newly created folder.
If the build backend defines a hook with this name, it will be called
in a subprocess. If not, the backend will be asked to build a wheel,
and the dist-info extracted from that.
"""
return self._call_hook('prepare_metadata_for_build_wheel', {
'metadata_directory': abspath(metadata_directory),
'config_settings': config_settings,
})
def build_wheel(
self, wheel_directory, config_settings=None,
metadata_directory=None):
"""Build a wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_wheel' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_wheel', and the same metadata_directory is
used, the previously built wheel will be copied to wheel_directory.
"""
if metadata_directory is not None:
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_wheel', {
'wheel_directory': abspath(wheel_directory),
'config_settings': config_settings,
'metadata_directory': metadata_directory,
})
def get_requires_for_build_sdist(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.:
["setuptools >= 26"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_sdist', {
'config_settings': config_settings
})
def build_sdist(self, sdist_directory, config_settings=None):
"""Build an sdist from this project.
Returns the name of the newly created file.
This calls the 'build_sdist' backend hook in a subprocess.
"""
return self._call_hook('build_sdist', {
'sdist_directory': abspath(sdist_directory),
'config_settings': config_settings,
})
def _call_hook(self, hook_name, kwargs):
# On Python 2, pytoml returns Unicode values (which is correct) but the
# environment passed to check_call needs to contain string values. We
# convert here by encoding using ASCII (the backend can only contain
# letters, digits and _, . and : characters, and will be used as a
# Python identifier, so non-ASCII content is wrong on Python 2 in
# any case).
if sys.version_info[0] == 2:
build_backend = self.build_backend.encode('ASCII')
else:
build_backend = self.build_backend
with tempdir() as td:
compat.write_json({'kwargs': kwargs}, pjoin(td, 'input.json'),
indent=2)
# Run the hook in a subprocess
self._subprocess_runner(
[sys.executable, _in_proc_script, hook_name, td],
cwd=self.source_dir,
extra_environ={'PEP517_BUILD_BACKEND': build_backend}
)
data = compat.read_json(pjoin(td, 'output.json'))
if data.get('unsupported'):
raise UnsupportedOperation
if data.get('no_backend'):
raise BackendUnavailable
return data['return_val']
| apache-2.0 | -6,558,537,575,456,037,000 | 35.269939 | 79 | 0.633457 | false |
chipx86/reviewboard | reviewboard/diffviewer/filetypes.py | 10 | 2625 | from __future__ import unicode_literals
import re
# A list of regular expressions for headers in the source code that we can
# display in collapsed regions of diffs and diff fragments in reviews.
HEADER_REGEXES = {
'.cs': [
re.compile(
r'^\s*((public|private|protected|static)\s+)+'
r'([a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?' # return arguments
r'[a-zA-Z_][a-zA-Z0-9_]*' # method name
r'\s*\(' # signature start
),
re.compile(
r'^\s*('
r'(public|static|private|protected|internal|abstract|partial)'
r'\s+)*'
r'(class|struct)\s+([A-Za-z0-9_])+'
),
],
# This can match C/C++/Objective C header files
'.c': [
re.compile(r'^@(interface|implementation|class|protocol)'),
re.compile(r'^[A-Za-z0-9$_]'),
],
'.java': [
re.compile(
r'^\s*((public|private|protected|static)\s+)+'
r'([a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?' # return arguments
r'[a-zA-Z_][a-zA-Z0-9_]*' # method name
r'\s*\(' # signature start
),
re.compile(
r'^\s*('
r'(public|static|private|protected)'
r'\s+)*'
r'(class|struct)\s+([A-Za-z0-9_])+'
),
],
'.js': [
re.compile(r'^\s*function [A-Za-z0-9_]+\s*\('),
re.compile(r'^\s*(var\s+)?[A-Za-z0-9_]+\s*[=:]\s*function\s*\('),
],
'.m': [
re.compile(r'^@(interface|implementation|class|protocol)'),
re.compile(r'^[-+]\s+\([^\)]+\)\s+[A-Za-z0-9_]+[^;]*$'),
re.compile(r'^[A-Za-z0-9$_]'),
],
'.php': [
re.compile(r'^\s*(public|private|protected)?\s*'
r'(class|function) [A-Za-z0-9_]+'),
],
'.pl': [
re.compile(r'^\s*sub [A-Za-z0-9_]+'),
],
'.py': [
re.compile(r'^\s*(def|class) [A-Za-z0-9_]+\s*\(?'),
],
'.rb': [
re.compile(r'^\s*(def|class) [A-Za-z0-9_]+\s*\(?'),
],
}
HEADER_REGEX_ALIASES = {
# C/C++/Objective-C
'.cc': '.c',
'.cpp': '.c',
'.cxx': '.c',
'.c++': '.c',
'.h': '.c',
'.hh': '.c',
'.hpp': '.c',
'.hxx': '.c',
'.h++': '.c',
'.C': '.c',
'.H': '.c',
'.mm': '.m',
# Perl
'.pm': '.pl',
# Python
'SConstruct': '.py',
'SConscript': '.py',
'.pyw': '.py',
'.sc': '.py',
# Ruby
'Rakefile': '.rb',
'.rbw': '.rb',
'.rake': '.rb',
'.gemspec': '.rb',
'.rbx': '.rb',
}
| mit | -7,895,959,472,329,690,000 | 26.061856 | 74 | 0.400762 | false |
mohamed--abdel-maksoud/chromium.src | third_party/libxml/src/check-xinclude-test-suite.py | 347 | 5333 | #!/usr/bin/python
import sys
import time
import os
import string
sys.path.insert(0, "python")
import libxml2
#
# the testsuite description
#
DIR="xinclude-test-suite"
CONF="testdescr.xml"
LOG="check-xinclude-test-suite.log"
log = open(LOG, "w")
os.chdir(DIR)
test_nr = 0
test_succeed = 0
test_failed = 0
test_error = 0
#
# Error and warning handlers
#
error_nr = 0
error_msg = ''
def errorHandler(ctx, str):
global error_nr
global error_msg
if string.find(str, "error:") >= 0:
error_nr = error_nr + 1
if len(error_msg) < 300:
if len(error_msg) == 0 or error_msg[-1] == '\n':
error_msg = error_msg + " >>" + str
else:
error_msg = error_msg + str
libxml2.registerErrorHandler(errorHandler, None)
def testXInclude(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
print "testXInclude(%s, %s)" % (filename, id)
return 1
def runTest(test, basedir):
global test_nr
global test_failed
global test_error
global test_succeed
global error_msg
global log
fatal_error = 0
uri = test.prop('href')
id = test.prop('id')
type = test.prop('type')
if uri == None:
print "Test without ID:", uri
return -1
if id == None:
print "Test without URI:", id
return -1
if type == None:
print "Test without URI:", id
return -1
if basedir != None:
URI = basedir + "/" + uri
else:
URI = uri
if os.access(URI, os.R_OK) == 0:
print "Test %s missing: base %s uri %s" % (URI, basedir, uri)
return -1
expected = None
outputfile = None
diff = None
if type != 'error':
output = test.xpathEval('string(output)')
if output == 'No output file.':
output = None
if output == '':
output = None
if output != None:
if basedir != None:
output = basedir + "/" + output
if os.access(output, os.R_OK) == 0:
print "Result for %s missing: %s" % (id, output)
output = None
else:
try:
f = open(output)
expected = f.read()
outputfile = output
except:
print "Result for %s unreadable: %s" % (id, output)
try:
# print "testing %s" % (URI)
doc = libxml2.parseFile(URI)
except:
doc = None
if doc != None:
res = doc.xincludeProcess()
if res >= 0 and expected != None:
result = doc.serialize()
if result != expected:
print "Result for %s differs" % (id)
open("xinclude.res", "w").write(result)
diff = os.popen("diff %s xinclude.res" % outputfile).read()
doc.freeDoc()
else:
print "Failed to parse %s" % (URI)
res = -1
test_nr = test_nr + 1
if type == 'success':
if res > 0:
test_succeed = test_succeed + 1
elif res == 0:
test_failed = test_failed + 1
print "Test %s: no substitution done ???" % (id)
elif res < 0:
test_error = test_error + 1
print "Test %s: failed valid XInclude processing" % (id)
elif type == 'error':
if res > 0:
test_error = test_error + 1
print "Test %s: failed to detect invalid XInclude processing" % (id)
elif res == 0:
test_failed = test_failed + 1
print "Test %s: Invalid but no substitution done" % (id)
elif res < 0:
test_succeed = test_succeed + 1
elif type == 'optional':
if res > 0:
test_succeed = test_succeed + 1
else:
print "Test %s: failed optional test" % (id)
# Log the ontext
if res != 1:
log.write("Test ID %s\n" % (id))
log.write(" File: %s\n" % (URI))
content = string.strip(test.content)
while content[-1] == '\n':
content = content[0:-1]
log.write(" %s:%s\n\n" % (type, content))
if error_msg != '':
log.write(" ----\n%s ----\n" % (error_msg))
error_msg = ''
log.write("\n")
if diff != None:
log.write("diff from test %s:\n" %(id))
log.write(" -----------\n%s\n -----------\n" % (diff));
return 0
def runTestCases(case):
creator = case.prop('creator')
if creator != None:
print "=>", creator
base = case.getBase(None)
basedir = case.prop('basedir')
if basedir != None:
base = libxml2.buildURI(basedir, base)
test = case.children
while test != None:
if test.name == 'testcase':
runTest(test, base)
if test.name == 'testcases':
runTestCases(test)
test = test.next
conf = libxml2.parseFile(CONF)
if conf == None:
print "Unable to load %s" % CONF
sys.exit(1)
testsuite = conf.getRootElement()
if testsuite.name != 'testsuite':
print "Expecting TESTSUITE root element: aborting"
sys.exit(1)
profile = testsuite.prop('PROFILE')
if profile != None:
print profile
start = time.time()
case = testsuite.children
while case != None:
if case.name == 'testcases':
old_test_nr = test_nr
old_test_succeed = test_succeed
old_test_failed = test_failed
old_test_error = test_error
runTestCases(case)
print " Ran %d tests: %d suceeded, %d failed and %d generated an error" % (
test_nr - old_test_nr, test_succeed - old_test_succeed,
test_failed - old_test_failed, test_error - old_test_error)
case = case.next
conf.freeDoc()
log.close()
print "Ran %d tests: %d suceeded, %d failed and %d generated an error in %.2f s." % (
test_nr, test_succeed, test_failed, test_error, time.time() - start)
| bsd-3-clause | 2,339,830,988,688,862,700 | 23.131222 | 85 | 0.591224 | false |
sdiehl/numpile | numpile.py | 1 | 29002 | # LLVM based numeric specializer in 1000 lines.
from __future__ import print_function
from functools import reduce
import sys
import ast
import types
import ctypes
import inspect
import pprint
import string
import numpy as np
from textwrap import dedent
from collections import deque, defaultdict
from llvmlite import ir
import llvmlite.binding as llvm
DEBUG = False
### == Core Language ==
class Var(ast.AST):
_fields = ["id", "type"]
def __init__(self, id, type=None):
self.id = id
self.type = type
class Assign(ast.AST):
_fields = ["ref", "val", "type"]
def __init__(self, ref, val, type=None):
self.ref = ref
self.val = val
self.type = type
class Return(ast.AST):
_fields = ["val"]
def __init__(self, val):
self.val = val
class Loop(ast.AST):
_fields = ["var", "begin", "end", "body"]
def __init__(self, var, begin, end, body):
self.var = var
self.begin = begin
self.end = end
self.body = body
class App(ast.AST):
_fields = ["fn", "args"]
def __init__(self, fn, args):
self.fn = fn
self.args = args
class Fun(ast.AST):
_fields = ["fname", "args", "body"]
def __init__(self, fname, args, body):
self.fname = fname
self.args = args
self.body = body
class LitInt(ast.AST):
_fields = ["n"]
def __init__(self, n, type=None):
self.n = n
self.type = type
class LitFloat(ast.AST):
_fields = ["n"]
def __init__(self, n, type=None):
self.n = n
self.type = None
class LitBool(ast.AST):
_fields = ["n"]
def __init__(self, n):
self.n = n
class Prim(ast.AST):
_fields = ["fn", "args"]
def __init__(self, fn, args):
self.fn = fn
self.args = args
class Index(ast.AST):
_fields = ["val", "ix"]
def __init__(self, val, ix):
self.val = val
self.ix = ix
class Noop(ast.AST):
_fields = []
primops = {ast.Add: "add#", ast.Mult: "mult#"}
### == Type System ==
class TVar(object):
def __init__(self, s):
self.s = s
def __hash__(self):
return hash(self.s)
def __eq__(self, other):
if isinstance(other, TVar):
return (self.s == other.s)
else:
return False
def __str__(self):
return self.s
class TCon(object):
def __init__(self, s):
self.s = s
def __eq__(self, other):
if isinstance(other, TCon):
return (self.s == other.s)
else:
return False
def __hash__(self):
return hash(self.s)
def __str__(self):
return self.s
class TApp(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
if isinstance(other, TApp):
return (self.a == other.a) & (self.b == other.b)
else:
return False
def __hash__(self):
return hash((self.a, self.b))
def __str__(self):
return str(self.a) + " " + str(self.b)
class TFun(object):
def __init__(self, argtys, retty):
assert isinstance(argtys, list)
self.argtys = argtys
self.retty = retty
def __eq__(self, other):
if isinstance(other, TFun):
return (self.argtys == other.argtys) & (self.retty == other.retty)
else:
return False
def __str__(self):
return str(self.argtys) + " -> " + str(self.retty)
def ftv(x):
if isinstance(x, TCon):
return set()
elif isinstance(x, TApp):
return ftv(x.a) | ftv(x.b)
elif isinstance(x, TFun):
return reduce(set.union, map(ftv, x.argtys)) | ftv(x.retty)
elif isinstance(x, TVar):
return set([x])
def is_array(ty):
return isinstance(ty, TApp) and ty.a == TCon("Array")
int32 = TCon("Int32")
int64 = TCon("Int64")
float32 = TCon("Float")
double64 = TCon("Double")
void = TCon("Void")
array = lambda t: TApp(TCon("Array"), t)
array_int32 = array(int32)
array_int64 = array(int64)
array_double64 = array(double64)
### == Type Inference ==
def naming():
k = 0
while True:
for a in string.ascii_lowercase:
yield ("'"+a+str(k)) if (k > 0) else (a)
k = k+1
class TypeInfer(object):
def __init__(self):
self.constraints = []
self.env = {}
self.names = naming()
def fresh(self):
return TVar('$' + next(self.names)) # New meta type variable.
def visit(self, node):
name = "visit_%s" % type(node).__name__
if hasattr(self, name):
return getattr(self, name)(node)
else:
return self.generic_visit(node)
def visit_Fun(self, node):
self.argtys = [self.fresh() for v in node.args]
self.retty = TVar("$retty")
for (arg, ty) in zip(node.args, self.argtys):
arg.type = ty
self.env[arg.id] = ty
list(map(self.visit, node.body))
return TFun(self.argtys, self.retty)
def visit_Noop(self, node):
return None
def visit_LitInt(self, node):
tv = self.fresh()
node.type = tv
return tv
def visit_LitFloat(self, node):
tv = self.fresh()
node.type = tv
return tv
def visit_Assign(self, node):
ty = self.visit(node.val)
if node.ref in self.env:
# Subsequent uses of a variable must have the same type.
self.constraints += [(ty, self.env[node.ref])]
self.env[node.ref] = ty
node.type = ty
return None
def visit_Index(self, node):
tv = self.fresh()
ty = self.visit(node.val)
ixty = self.visit(node.ix)
self.constraints += [(ty, array(tv)), (ixty, int32)]
return tv
def visit_Prim(self, node):
if node.fn == "shape#":
return array(int32)
elif node.fn == "mult#":
tya = self.visit(node.args[0])
tyb = self.visit(node.args[1])
self.constraints += [(tya, tyb)]
return tyb
elif node.fn == "add#":
tya = self.visit(node.args[0])
tyb = self.visit(node.args[1])
self.constraints += [(tya, tyb)]
return tyb
else:
raise NotImplementedError
def visit_Var(self, node):
ty = self.env[node.id]
node.type = ty
return ty
def visit_Return(self, node):
ty = self.visit(node.val)
self.constraints += [(ty, self.retty)]
def visit_Loop(self, node):
self.env[node.var.id] = int32
varty = self.visit(node.var)
begin = self.visit(node.begin)
end = self.visit(node.end)
self.constraints += [(varty, int32), (
begin, int64), (end, int32)]
list(map(self.visit, node.body))
def generic_visit(self, node):
raise NotImplementedError
class UnderDetermined(Exception):
def __str__(self):
return "The types in the function are not fully determined by the \
input types. Add annotations."
class InferError(Exception):
def __init__(self, ty1, ty2):
self.ty1 = ty1
self.ty2 = ty2
def __str__(self):
return '\n'.join([
"Type mismatch: ",
"Given: ", "\t" + str(self.ty1),
"Expected: ", "\t" + str(self.ty2)
])
### == Constraint Solver ==
def empty():
return {}
def apply(s, t):
if isinstance(t, TCon):
return t
elif isinstance(t, TApp):
return TApp(apply(s, t.a), apply(s, t.b))
elif isinstance(t, TFun):
argtys = [apply(s, a) for a in t.argtys]
retty = apply(s, t.retty)
return TFun(argtys, retty)
elif isinstance(t, TVar):
return s.get(t.s, t)
def applyList(s, xs):
return [(apply(s, x), apply(s, y)) for (x, y) in xs]
def unify(x, y):
if isinstance(x, TApp) and isinstance(y, TApp):
s1 = unify(x.a, y.a)
s2 = unify(apply(s1, x.b), apply(s1, y.b))
return compose(s2, s1)
elif isinstance(x, TCon) and isinstance(y, TCon) and (x == y):
return empty()
elif isinstance(x, TFun) and isinstance(y, TFun):
if len(x.argtys) != len(y.argtys):
return Exception("Wrong number of arguments")
s1 = solve(zip(x.argtys, y.argtys))
s2 = unify(apply(s1, x.retty), apply(s1, y.retty))
return compose(s2, s1)
elif isinstance(x, TVar):
return bind(x.s, y)
elif isinstance(y, TVar):
return bind(y.s, x)
else:
raise InferError(x, y)
def solve(xs):
mgu = empty()
cs = deque(xs)
while len(cs):
(a, b) = cs.pop()
s = unify(a, b)
mgu = compose(s, mgu)
cs = deque(applyList(s, cs))
return mgu
def bind(n, x):
if x == n:
return empty()
elif occurs_check(n, x):
raise InfiniteType(n, x)
else:
return dict([(n, x)])
def occurs_check(n, x):
return n in ftv(x)
def union(s1, s2):
nenv = s1.copy()
nenv.update(s2)
return nenv
def compose(s1, s2):
s3 = dict((t, apply(s1, u)) for t, u in s2.items())
return union(s1, s3)
### == Core Translator ==
class PythonVisitor(ast.NodeVisitor):
def __init__(self):
pass
def __call__(self, source):
if isinstance(source, types.ModuleType):
source = dedent(inspect.getsource(source))
if isinstance(source, types.FunctionType):
source = dedent(inspect.getsource(source))
if isinstance(source, types.LambdaType):
source = dedent(inspect.getsource(source))
elif isinstance(source, str):
source = dedent(source)
else:
raise NotImplementedError
self._source = source
self._ast = ast.parse(source)
return self.visit(self._ast)
def visit_Module(self, node):
body = list(map(self.visit, node.body))
return body[0]
def visit_Name(self, node):
return Var(node.id)
def visit_Num(self, node):
if isinstance(node.n, float):
return LitFloat(node.n)
else:
return LitInt(node.n)
def visit_Bool(self, node):
return LitBool(node.n)
def visit_Call(self, node):
name = self.visit(node.func)
args = list(map(self.visit, node.args))
return App(name, args)
def visit_BinOp(self, node):
op_str = node.op.__class__
a = self.visit(node.left)
b = self.visit(node.right)
opname = primops[op_str]
return Prim(opname, [a, b])
def visit_Assign(self, node):
assert len(node.targets) == 1
var = node.targets[0].id
val = self.visit(node.value)
return Assign(var, val)
def visit_FunctionDef(self, node):
stmts = list(node.body)
stmts = list(map(self.visit, stmts))
args = [Var(a.arg) for a in node.args.args]
res = Fun(node.name, args, stmts)
return res
def visit_Pass(self, node):
return Noop()
def visit_Return(self, node):
val = self.visit(node.value)
return Return(val)
def visit_Attribute(self, node):
if node.attr == "shape":
val = self.visit(node.value)
return Prim("shape#", [val])
else:
raise NotImplementedError
def visit_Subscript(self, node):
if isinstance(node.ctx, ast.Load):
if node.slice:
val = self.visit(node.value)
ix = self.visit(node.slice.value)
return Index(val, ix)
elif isinstance(node.ctx, ast.Store):
raise NotImplementedError
def visit_For(self, node):
target = self.visit(node.target)
stmts = list(map(self.visit, node.body))
if node.iter.func.id in {"xrange", "range"}:
args = list(map(self.visit, node.iter.args))
else:
raise Exception("Loop must be over range")
if len(args) == 1: # xrange(n)
return Loop(target, LitInt(0, type=int32), args[0], stmts)
elif len(args) == 2: # xrange(n,m)
return Loop(target, args[0], args[1], stmts)
def visit_AugAssign(self, node):
if isinstance(node.op, ast.Add):
ref = node.target.id
value = self.visit(node.value)
return Assign(ref, Prim("add#", [Var(ref), value]))
if isinstance(node.op, ast.Mul):
ref = node.target.id
value = self.visit(node.value)
return Assign(ref, Prim("mult#", [Var(ref), value]))
else:
raise NotImplementedError
def generic_visit(self, node):
raise NotImplementedError(ast.dump(node))
### == Pretty Printer ==
# From my coworker John Riehl, pretty sure he dont't care.
def ast2tree(node, include_attrs=True):
def _transform(node):
if isinstance(node, ast.AST):
fields = ((a, _transform(b))
for a, b in ast.iter_fields(node))
if include_attrs:
attrs = ((a, _transform(getattr(node, a)))
for a in node._attributes
if hasattr(node, a))
return (node.__class__.__name__, dict(fields), dict(attrs))
return (node.__class__.__name__, dict(fields))
elif isinstance(node, list):
return [_transform(x) for x in node]
elif isinstance(node, str):
return repr(node)
return node
if not isinstance(node, ast.AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _transform(node)
def pformat_ast(node, include_attrs=False, **kws):
return pprint.pformat(ast2tree(node, include_attrs), **kws)
def dump(node):
return pformat_ast(node)
### == LLVM Codegen ==
pointer = ir.PointerType
int_type = ir.IntType(32)
float_type = ir.FloatType()
double_type = ir.DoubleType()
bool_type = ir.IntType(1)
void_type = ir.VoidType()
void_ptr = pointer(ir.IntType(8))
def array_type(elt_type):
struct_type = ir.global_context.get_identified_type('ndarray_' + str(elt_type))
# The type can already exist.
if struct_type.elements:
return struct_type
# If not, initialize it.
struct_type.set_body(
pointer(elt_type), # data
ir.IntType(32), # dimensions
pointer(ir.IntType(32)), # shape
)
return struct_type
int32_array = pointer(array_type(int_type))
int64_array = pointer(array_type(ir.IntType(64)))
double_array = pointer(array_type(double_type))
lltypes_map = {
int32 : int_type,
int64 : int_type,
float32 : float_type,
double64 : double_type,
array_int32 : int32_array,
array_int64 : int64_array,
array_double64 : double_array
}
def to_lltype(ptype):
return lltypes_map[ptype]
def determined(ty):
return len(ftv(ty)) == 0
class LLVMEmitter(object):
def __init__(self, spec_types, retty, argtys):
self.function = None # LLVM Function
self.builder = None # LLVM Builder
self.locals = {} # Local variables
self.arrays = defaultdict(dict) # Array metadata
self.exit_block = None # Exit block
self.spec_types = spec_types # Type specialization
self.retty = retty # Return type
self.argtys = argtys # Argument types
def start_function(self, name, module, rettype, argtypes):
func_type = ir.FunctionType(rettype, argtypes, False)
function = ir.Function(module, func_type, name)
entry_block = function.append_basic_block("entry")
builder = ir.IRBuilder(entry_block)
self.exit_block = function.append_basic_block("exit")
self.function = function
self.builder = builder
def end_function(self):
self.builder.position_at_end(self.exit_block)
if 'retval' in self.locals:
retval = self.builder.load(self.locals['retval'])
self.builder.ret(retval)
else:
self.builder.ret_void()
def add_block(self, name):
return self.function.append_basic_block(name)
def set_block(self, block):
self.block = block
self.builder.position_at_end(block)
def cbranch(self, cond, true_block, false_block):
self.builder.cbranch(cond, true_block, false_block)
def branch(self, next_block):
self.builder.branch(next_block)
def specialize(self, val):
if isinstance(val.type, TVar):
return to_lltype(self.spec_types[val.type.s])
else:
return val.type
def const(self, val):
if isinstance(val, int):
return ir.Constant(int_type, val)
elif isinstance(val, float):
return ir.Constant(double_type, val)
elif isinstance(val, bool):
return ir.Constant(bool_type, int(val))
elif isinstance(val, str):
raise NotImplementedError
#return Constant.stringz(val)
else:
raise NotImplementedError
def visit_LitInt(self, node):
ty = self.specialize(node)
if ty is double_type:
return ir.Constant(double_type, node.n)
elif ty == int_type:
return ir.Constant(int_type, node.n)
def visit_LitFloat(self, node):
ty = self.specialize(node)
if ty is double_type:
return ir.Constant(double_type, node.n)
elif ty == int_type:
return ir.Constant(int_type, node.n)
def visit_Noop(self, node):
pass
def visit_Fun(self, node):
rettype = to_lltype(self.retty)
argtypes = list(map(to_lltype, self.argtys))
# Create a unique specialized name
func_name = mangler(node.fname, self.argtys)
self.start_function(func_name, module, rettype, argtypes)
for (ar, llarg, argty) in zip(node.args, self.function.args, self.argtys):
name = ar.id
llarg.name = name
if is_array(argty):
zero = self.const(0)
one = self.const(1)
two = self.const(2)
data = self.builder.gep(llarg, [
zero, zero], name=(name + '_data'))
dims = self.builder.gep(llarg, [
zero, one], name=(name + '_dims'))
shape = self.builder.gep(llarg, [
zero, two], name=(name + '_strides'))
self.arrays[name]['data'] = self.builder.load(data)
self.arrays[name]['dims'] = self.builder.load(dims)
self.arrays[name]['shape'] = self.builder.load(shape)
self.locals[name] = llarg
else:
argref = self.builder.alloca(to_lltype(argty))
self.builder.store(llarg, argref)
self.locals[name] = argref
# Setup the register for return type.
if rettype is not void_type:
self.locals['retval'] = self.builder.alloca(rettype, name="retval")
list(map(self.visit, node.body))
self.end_function()
def visit_Index(self, node):
if isinstance(node.val, Var) and node.val.id in self.arrays:
val = self.visit(node.val)
ix = self.visit(node.ix)
dataptr = self.arrays[node.val.id]['data']
ret = self.builder.gep(dataptr, [ix])
return self.builder.load(ret)
else:
val = self.visit(node.val)
ix = self.visit(node.ix)
ret = self.builder.gep(val, [ix])
return self.builder.load(ret)
def visit_Var(self, node):
return self.builder.load(self.locals[node.id])
def visit_Return(self, node):
val = self.visit(node.val)
if val.type != void_type:
self.builder.store(val, self.locals['retval'])
self.builder.branch(self.exit_block)
def visit_Loop(self, node):
init_block = self.function.append_basic_block('for.init')
test_block = self.function.append_basic_block('for.cond')
body_block = self.function.append_basic_block('for.body')
end_block = self.function.append_basic_block("for.end")
self.branch(init_block)
self.set_block(init_block)
start = self.visit(node.begin)
stop = self.visit(node.end)
step = 1
# Setup the increment variable
varname = node.var.id
inc = self.builder.alloca(int_type, name=varname)
self.builder.store(start, inc)
self.locals[varname] = inc
# Setup the loop condition
self.branch(test_block)
self.set_block(test_block)
cond = self.builder.icmp_signed('<', self.builder.load(inc), stop)
self.builder.cbranch(cond, body_block, end_block)
# Generate the loop body
self.set_block(body_block)
list(map(self.visit, node.body))
# Increment the counter
succ = self.builder.add(self.const(step), self.builder.load(inc))
self.builder.store(succ, inc)
# Exit the loop
self.builder.branch(test_block)
self.set_block(end_block)
def visit_Prim(self, node):
if node.fn == "shape#":
ref = node.args[0]
shape = self.arrays[ref.id]['shape']
return shape
elif node.fn == "mult#":
a = self.visit(node.args[0])
b = self.visit(node.args[1])
if a.type == double_type:
return self.builder.fmul(a, b)
else:
return self.builder.mul(a, b)
elif node.fn == "add#":
a = self.visit(node.args[0])
b = self.visit(node.args[1])
if a.type == double_type:
return self.builder.fadd(a, b)
else:
return self.builder.add(a, b)
else:
raise NotImplementedError
def visit_Assign(self, node):
# Subsequent assignment
if node.ref in self.locals:
name = node.ref
var = self.locals[name]
val = self.visit(node.val)
self.builder.store(val, var)
self.locals[name] = var
return var
# First assignment
else:
name = node.ref
val = self.visit(node.val)
ty = self.specialize(node)
var = self.builder.alloca(ty, name=name)
self.builder.store(val, var)
self.locals[name] = var
return var
def visit(self, node):
name = "visit_%s" % type(node).__name__
if hasattr(self, name):
return getattr(self, name)(node)
else:
return self.generic_visit(node)
### == Type Mapping ==
# Adapt the LLVM types to use libffi/ctypes wrapper so we can dynamically create
# the appropriate C types for our JIT'd function at runtime.
_nptypemap = {
'i': ctypes.c_int,
'f': ctypes.c_float,
'd': ctypes.c_double,
}
def mangler(fname, sig):
return fname + str(hash(tuple(sig)))
def wrap_module(sig, llfunc):
pfunc = wrap_function(llfunc, engine)
dispatch = dispatcher(pfunc)
return dispatch
def wrap_function(func, engine):
args = func.type.pointee.args
ret_type = func.type.pointee.return_type
ret_ctype = wrap_type(ret_type)
args_ctypes = list(map(wrap_type, args))
functype = ctypes.CFUNCTYPE(ret_ctype, *args_ctypes)
fptr = engine.get_function_address(func.name)
cfunc = functype(fptr)
cfunc.__name__ = func.name
return cfunc
def wrap_type(llvm_type):
if isinstance(llvm_type, ir.IntType):
ctype = getattr(ctypes, "c_int"+str(llvm_type.width))
elif isinstance(llvm_type, ir.DoubleType):
ctype = ctypes.c_double
elif isinstance(llvm_type, ir.FloatType):
ctype = ctypes.c_float
elif isinstance(llvm_type, ir.VoidType):
ctype = None
elif isinstance(llvm_type, ir.PointerType):
pointee = llvm_type.pointee
if isinstance(pointee, ir.IntType):
width = pointee.width
if width == 8:
ctype = ctypes.c_char_p
else:
ctype = ctypes.POINTER(wrap_type(pointee))
elif isinstance(pointee, ir.VoidType):
ctype = ctypes.c_void_p
else:
ctype = ctypes.POINTER(wrap_type(pointee))
elif isinstance(llvm_type, ir.IdentifiedStructType):
struct_name = llvm_type.name.split('.')[-1]
struct_type = None
if struct_type and issubclass(struct_type, ctypes.Structure):
return struct_type
if hasattr(struct_type, '_fields_'):
names = struct_type._fields_
else:
names = ["field"+str(n) for n in range(len(llvm_type.elements))]
ctype = type(ctypes.Structure)(struct_name, (ctypes.Structure,),
{'__module__': "numpile"})
fields = [(name, wrap_type(elem))
for name, elem in zip(names, llvm_type.elements)]
setattr(ctype, '_fields_', fields)
else:
raise Exception("Unknown LLVM type %s" % llvm_type)
return ctype
def wrap_ndarray(na):
# For NumPy arrays grab the underlying data pointer. Doesn't copy.
ctype = _nptypemap[na.dtype.char]
_shape = list(na.shape)
data = na.ctypes.data_as(ctypes.POINTER(ctype))
dims = len(na.strides)
shape = (ctypes.c_int*dims)(*_shape)
return (data, dims, shape)
def wrap_arg(arg, val):
if isinstance(val, np.ndarray):
ndarray = arg._type_
data, dims, shape = wrap_ndarray(val)
return ndarray(data, dims, shape)
else:
return val
def dispatcher(fn):
def _call_closure(*args):
cargs = list(fn._argtypes_)
pargs = list(args)
rargs = list(map(wrap_arg, cargs, pargs))
return fn(*rargs)
_call_closure.__name__ = fn.__name__
return _call_closure
### == Toplevel ==
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
module = ir.Module('numpile.module')
engine = None
function_cache = {}
target = llvm.Target.from_default_triple()
target_machine = target.create_target_machine()
backing_mod = llvm.parse_assembly("")
engine = llvm.create_mcjit_compiler(backing_mod, target_machine)
def autojit(fn):
transformer = PythonVisitor()
ast = transformer(fn)
(ty, mgu) = typeinfer(ast)
debug(dump(ast))
return specialize(ast, ty, mgu)
def arg_pytype(arg):
if isinstance(arg, np.ndarray):
if arg.dtype == np.dtype('int32'):
return array(int32)
elif arg.dtype == np.dtype('int64'):
return array(int64)
elif arg.dtype == np.dtype('double'):
return array(double64)
elif arg.dtype == np.dtype('float'):
return array(float32)
elif isinstance(arg, int) and arg <= sys.maxsize:
return int64
elif isinstance(arg, float):
return double64
else:
raise Exception("Type not supported: %s" % type(arg))
def specialize(ast, infer_ty, mgu):
def _wrapper(*args):
types = list(map(arg_pytype, list(args)))
spec_ty = TFun(argtys=types, retty=TVar("$retty"))
unifier = unify(infer_ty, spec_ty)
specializer = compose(unifier, mgu)
retty = apply(specializer, TVar("$retty"))
argtys = [apply(specializer, ty) for ty in types]
debug('Specialized Function:', TFun(argtys, retty))
if determined(retty) and all(map(determined, argtys)):
key = mangler(ast.fname, argtys)
# Don't recompile after we've specialized.
if key in function_cache:
return function_cache[key](*args)
else:
llfunc = codegen(ast, specializer, retty, argtys)
pyfunc = wrap_module(argtys, llfunc)
function_cache[key] = pyfunc
return pyfunc(*args)
else:
raise UnderDetermined()
return _wrapper
def typeinfer(ast):
infer = TypeInfer()
ty = infer.visit(ast)
mgu = solve(infer.constraints)
infer_ty = apply(mgu, ty)
debug(infer_ty)
debug(mgu)
debug(infer.constraints)
return (infer_ty, mgu)
def codegen(ast, specializer, retty, argtys):
cgen = LLVMEmitter(specializer, retty, argtys)
cgen.visit(ast)
mod = llvm.parse_assembly(str(module))
mod.verify()
pmb = llvm.PassManagerBuilder()
pmb.opt_level=3
pmb.loop_vectorize = True
pm = llvm.ModulePassManager()
pmb.populate(pm)
pm.run(mod)
engine.add_module(mod)
debug(cgen.function)
debug(target_machine.emit_assembly(mod))
return cgen.function
def debug(fmt, *args):
if DEBUG:
print('=' * 80)
print(fmt, *args)
| mit | 9,192,580,984,235,952,000 | 28.031031 | 83 | 0.562306 | false |
RannyeriDev/Solfege | tools/screenshot.py | 5 | 1316 | #!/usr/bin/python
import os
import sys
import re
from subprocess import *
import time
def run(cmd):
print "run:", cmd
os.system(cmd)
def get_image_dim(fn):
output = Popen(["file", fn], stdout=PIPE).communicate()[0]
r = re.compile("(\d+)\s*x+\s*(\d+)")
m = r.search(output)
if m:
return int(m.groups()[0]), int(m.groups()[1])
else:
return None, None
def do_file(fn, width):
time.sleep(2)
f, ext = os.path.splitext(fn)
run("import -frame %s" % fn)
#x, y = get_image_dim(fn)
run("pngnq -n 16 -f %s" % fn)
run("mv %s-nq8.png %s" % (f, fn))
#if not width:
# width = 510
#if x > width:
# run("convert -scale %i %s %s-resized.png" % (width, fn, f))
# run("mv %s-resized.png %s" % (f, fn))
#run("pngquant -nofs 8 %s" % fn)
#run("mv %s-or8.png %s" % (f, fn))
help = """
Usage: ./tools/screenshot path/to/image.png [width]
Make a screenshot using "import". Run this script, and then
click on the window you want to make a screenshot of.
"""
if len(sys.argv) not in (2, 3):
print help
sys.exit()
if sys.argv[1] in ('-h', '--help'):
print help
sys.exit()
try:
width = int(sys.argv[2])
except:
width = None
do_file(sys.argv[1], width)
print "Remember to use the Simple (Enkelt, nb_NO) theme."
| gpl-3.0 | -2,471,564,551,908,236,300 | 22.927273 | 68 | 0.570669 | false |
AyoubZahid/odoo | addons/hr_recruitment/models/hr_job.py | 37 | 5037 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import _, api, fields, models
class Job(models.Model):
_inherit = "hr.job"
_name = "hr.job"
_inherits = {'mail.alias': 'alias_id'}
@api.model
def _default_address_id(self):
return self.env.user.company_id.partner_id
address_id = fields.Many2one(
'res.partner', "Job Location", default=_default_address_id,
help="Address where employees are working")
application_ids = fields.One2many('hr.applicant', 'job_id', "Applications")
application_count = fields.Integer(compute='_compute_application_count', string="Applications")
manager_id = fields.Many2one(
'hr.employee', related='department_id.manager_id', string="Department Manager",
readonly=True, store=True)
user_id = fields.Many2one('res.users', "Recruitment Responsible", track_visibility='onchange')
stage_ids = fields.Many2many(
'hr.recruitment.stage', 'job_stage_rel', 'job_id', 'stage_id',
'Job Stages',
default=[(0, 0, {'name': _('New')})])
document_ids = fields.One2many('ir.attachment', compute='_compute_document_ids', string="Applications")
documents_count = fields.Integer(compute='_compute_document_ids', string="Documents")
survey_id = fields.Many2one(
'survey.survey', "Interview Form",
help="Choose an interview form for this job position and you will be able to print/answer this interview from all applicants who apply for this job")
alias_id = fields.Many2one(
'mail.alias', "Alias", ondelete="restrict", required=True,
help="Email alias for this job position. New emails will automatically create new applicants for this job position.")
color = fields.Integer("Color Index")
def _compute_document_ids(self):
applicants = self.mapped('application_ids').filtered(lambda self: not self.emp_id)
app_to_job = dict((applicant.id, applicant.job_id.id) for applicant in applicants)
attachments = self.env['ir.attachment'].search([
'|',
'&', ('res_model', '=', 'hr.job'), ('res_id', 'in', self.ids),
'&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', applicants.ids)])
result = dict.fromkeys(self.ids, self.env['ir.attachment'])
for attachment in attachments:
if attachment.res_model == 'hr.applicant':
result[app_to_job[attachment.res_id]] |= attachment
else:
result[attachment.res_id] |= attachment
for job in self:
job.document_ids = result[job.id]
job.documents_count = len(job.document_ids)
@api.multi
def _compute_application_count(self):
read_group_result = self.env['hr.applicant'].read_group([('job_id', '=', self.id)], ['job_id'], ['job_id'])
result = dict((data['job_id'][0], data['job_id_count']) for data in read_group_result)
for job in self:
job.application_count = result.get(job.id, 0)
@api.model
def create(self, vals):
job = super(Job, self.with_context(alias_model_name='hr.applicant',
mail_create_nolog=True,
alias_parent_model_name=self._name)).create(vals)
job.alias_id.write({'alias_parent_thread_id': job.id, "alias_defaults": {'job_id': job.id}})
return job
@api.multi
def unlink(self):
# Cascade-delete mail aliases as well, as they should not exist without the job position.
aliases = self.mapped('alias_id')
res = super(Job, self).unlink()
aliases.unlink()
return res
def _auto_init(self, cr, context=None):
"""Installation hook to create aliases for all jobs and avoid constraint errors."""
return self.pool.get('mail.alias').migrate_to_alias(
cr, self._name, self._table, super(Job, self)._auto_init,
'hr.applicant', self._columns['alias_id'], 'name',
alias_prefix='job+', alias_defaults={'job_id': 'id'}, context=context)
@api.multi
def _track_subtype(self, init_values):
if 'state' in init_values and self.state == 'open':
return 'hr_recruitment.mt_job_new'
return super(Job, self)._track_subtype(init_values)
@api.multi
def action_print_survey(self):
return self.survey_id.action_print_survey()
@api.multi
def action_get_attachment_tree_view(self):
action = self.env.ref('base.action_attachment').read()[0]
action['context'] = {
'default_res_model': self._name,
'default_res_id': self.ids[0]
}
action['domain'] = ['|', '&', ('res_model', '=', 'hr.job'), ('res_id', 'in', self.ids), '&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', self.mapped('application_ids').ids)]
return action
@api.multi
def action_set_no_of_recruitment(self, value):
return self.write({'no_of_recruitment': value})
| gpl-3.0 | -2,836,516,430,629,853,000 | 45.638889 | 190 | 0.609887 | false |
jyotikamboj/container | django/contrib/admin/__init__.py | 160 | 1237 | # ACTION_CHECKBOX_NAME is unused, but should stay since its import from here
# has been referenced in documentation.
from django.contrib.admin.decorators import register
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.options import (HORIZONTAL, VERTICAL,
ModelAdmin, StackedInline, TabularInline)
from django.contrib.admin.filters import (ListFilter, SimpleListFilter,
FieldListFilter, BooleanFieldListFilter, RelatedFieldListFilter,
ChoicesFieldListFilter, DateFieldListFilter, AllValuesFieldListFilter,
RelatedOnlyFieldListFilter)
from django.contrib.admin.sites import AdminSite, site
from django.utils.module_loading import autodiscover_modules
__all__ = [
"register", "ACTION_CHECKBOX_NAME", "ModelAdmin", "HORIZONTAL", "VERTICAL",
"StackedInline", "TabularInline", "AdminSite", "site", "ListFilter",
"SimpleListFilter", "FieldListFilter", "BooleanFieldListFilter",
"RelatedFieldListFilter", "ChoicesFieldListFilter", "DateFieldListFilter",
"AllValuesFieldListFilter", "RelatedOnlyFieldListFilter", "autodiscover",
]
def autodiscover():
autodiscover_modules('admin', register_to=site)
default_app_config = 'django.contrib.admin.apps.AdminConfig'
| mit | -3,329,540,222,273,793,000 | 44.814815 | 79 | 0.789814 | false |
ScreamingUdder/mantid | Framework/PythonInterface/test/python/mantid/geometry/IPeakTest.py | 1 | 6523 | import unittest
from mantid.kernel import V3D
from mantid.simpleapi import CreateSimulationWorkspace, CreatePeaksWorkspace
import numpy as np
import numpy.testing as npt
class IPeakTest(unittest.TestCase):
def setUp(self):
# IPeak cannot currently be instatiated so this is a quick way
# getting a handle to a peak object
ws = CreateSimulationWorkspace("SXD", BinParams="1,1,10")
peaks = CreatePeaksWorkspace(ws, 1)
self._peak = peaks.getPeak(0)
# tolerance for differences in q vectors that a recomputed
# on every call.
self._tolerance = 1e-2
def test_set_detector_id_with_valid_id(self):
det_id = 101
self._peak.setDetectorID(det_id)
self.assertEqual(self._peak.getDetectorID(), det_id)
def test_set_detector_id_with_invalid_id(self):
det_id = -1
self.assertRaises(RuntimeError, self._peak.setDetectorID, det_id)
def test_set_run_number(self):
run_number = 101
self._peak.setRunNumber(run_number)
self.assertEqual(self._peak.getRunNumber(), run_number)
def test_set_monitor_count(self):
mon_count = 3
self._peak.setMonitorCount(mon_count)
self.assertEqual(self._peak.getMonitorCount(), mon_count)
def test_set_hkl_all_at_once(self):
H, K, L = 1, 2, 3
self._peak.setHKL(H, K, L)
self.assertEqual(self._peak.getH(), H)
self.assertEqual(self._peak.getK(), K)
self.assertEqual(self._peak.getL(), L)
def test_set_hkl_individually(self):
H, K, L = 1, 2, 3
self._peak.setH(H)
self._peak.setK(K)
self._peak.setL(L)
self.assertEqual(self._peak.getH(), H)
self.assertEqual(self._peak.getK(), K)
self.assertEqual(self._peak.getL(), L)
def test_set_q_lab_frame(self):
q_lab = V3D(0, 1, 1)
self._peak.setQLabFrame(q_lab)
npt.assert_allclose(self._peak.getQLabFrame(), q_lab, atol=self._tolerance)
npt.assert_allclose(self._peak.getQSampleFrame(), q_lab, atol=self._tolerance)
def test_set_q_sample_frame(self):
q_sample = V3D(0, 1, 1)
self._peak.setQSampleFrame(q_sample)
npt.assert_allclose(self._peak.getQSampleFrame(), q_sample, atol=self._tolerance)
npt.assert_allclose(self._peak.getQLabFrame(), q_sample, atol=self._tolerance)
def test_set_goniometer_matrix_with_valid_matrix(self):
angle = np.pi/4
rotation = np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
q_sample = V3D(1, 1, 1)
self._peak.setGoniometerMatrix(rotation)
self._peak.setQSampleFrame(q_sample)
q_lab = np.dot(self._peak.getQLabFrame(), rotation)
npt.assert_allclose(self._peak.getGoniometerMatrix(), rotation)
npt.assert_allclose(self._peak.getQSampleFrame(), q_sample, atol=self._tolerance)
npt.assert_allclose(q_lab, q_sample, atol=self._tolerance)
def test_set_goniometer_matrix_with_singular_matrix(self):
rotation = np.zeros((3,3))
self.assertRaises(ValueError, self._peak.setGoniometerMatrix, rotation)
def test_set_wavelength(self):
wavelength = 1.3
self._peak.setWavelength(wavelength)
self.assertAlmostEqual(self._peak.getWavelength(), wavelength)
def test_get_scattering(self):
det_id = 101
expected_scattering_angle = 2.878973314094696
self._peak.setDetectorID(det_id)
self.assertAlmostEqual(self._peak.getScattering(), expected_scattering_angle)
def test_get_tof(self):
det_id = 101
wavelength = 1.9
expected_tof = 4103.70182610731
self._peak.setDetectorID(det_id)
self._peak.setWavelength(wavelength)
self.assertEqual(self._peak.getTOF(), expected_tof)
def test_get_d_spacing(self):
det_id = 101
wavelength = 1.9
expected_d = 0.958249313959493
self._peak.setDetectorID(det_id)
self._peak.setWavelength(wavelength)
self.assertEqual(self._peak.getDSpacing(), expected_d)
def test_set_initial_energy(self):
initial_energy = 10.0
self._peak.setInitialEnergy(initial_energy)
self.assertAlmostEqual(self._peak.getInitialEnergy(), initial_energy)
def test_set_final_energy(self):
final_energy = 10.0
self._peak.setFinalEnergy(final_energy)
self.assertAlmostEqual(self._peak.getFinalEnergy(), final_energy)
def test_get_energy(self):
initial_energy = 10.0
final_energy = 10.0
self._peak.setFinalEnergy(final_energy)
self._peak.setInitialEnergy(initial_energy)
self.assertAlmostEqual(self._peak.getEnergyTransfer(), initial_energy - final_energy)
def test_set_intensity(self):
intensity = 10.0
self._peak.setIntensity(intensity)
self.assertAlmostEqual(self._peak.getIntensity(), intensity)
def test_set_sigma_intensity(self):
sigma = 10.0
self._peak.setSigmaIntensity(sigma)
self.assertAlmostEqual(self._peak.getSigmaIntensity(), sigma)
def test_get_intensity_over_sigma(self):
intensity = 100.0
sigma = 10.0
self._peak.setIntensity(intensity)
self._peak.setSigmaIntensity(sigma)
self.assertAlmostEqual(self._peak.getIntensityOverSigma(), intensity / sigma)
def test_set_bin_count(self):
bin_count = 10.0
self._peak.setBinCount(bin_count)
self.assertAlmostEqual(self._peak.getBinCount(), bin_count)
def test_get_row_and_column(self):
det_id = 101
row, col = 36, 1
self._peak.setDetectorID(det_id)
self.assertEqual(self._peak.getRow(), row)
self.assertEqual(self._peak.getCol(), col)
def test_get_det_pos(self):
det_id = 101
expected_det_pos = np.array([0.061999, 0.0135, -0.236032])
self._peak.setDetectorID(det_id)
npt.assert_allclose(self._peak.getDetPos(), expected_det_pos, atol=self._tolerance)
def test_get_l1(self):
det_id = 101
expected_l1 = 8.3
self._peak.setDetectorID(det_id)
self.assertEqual(self._peak.getL1(), expected_l1)
def test_get_l2(self):
det_id = 101
expected_l2 = 0.2444125610784556
self._peak.setDetectorID(det_id)
self.assertEqual(self._peak.getL2(), expected_l2)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -5,101,369,517,707,989,000 | 34.069892 | 93 | 0.635291 | false |
aeklant/scipy | scipy/ndimage/__init__.py | 4 | 4755 | """
=========================================================
Multidimensional image processing (:mod:`scipy.ndimage`)
=========================================================
.. currentmodule:: scipy.ndimage
This package contains various functions for multidimensional image
processing.
Filters
=======
.. autosummary::
:toctree: generated/
convolve - Multidimensional convolution
convolve1d - 1-D convolution along the given axis
correlate - Multidimensional correlation
correlate1d - 1-D correlation along the given axis
gaussian_filter
gaussian_filter1d
gaussian_gradient_magnitude
gaussian_laplace
generic_filter - Multidimensional filter using a given function
generic_filter1d - 1-D generic filter along the given axis
generic_gradient_magnitude
generic_laplace
laplace - N-D Laplace filter based on approximate second derivatives
maximum_filter
maximum_filter1d
median_filter - Calculates a multidimensional median filter
minimum_filter
minimum_filter1d
percentile_filter - Calculates a multidimensional percentile filter
prewitt
rank_filter - Calculates a multidimensional rank filter
sobel
uniform_filter - Multidimensional uniform filter
uniform_filter1d - 1-D uniform filter along the given axis
Fourier filters
===============
.. autosummary::
:toctree: generated/
fourier_ellipsoid
fourier_gaussian
fourier_shift
fourier_uniform
Interpolation
=============
.. autosummary::
:toctree: generated/
affine_transform - Apply an affine transformation
geometric_transform - Apply an arbritrary geometric transform
map_coordinates - Map input array to new coordinates by interpolation
rotate - Rotate an array
shift - Shift an array
spline_filter
spline_filter1d
zoom - Zoom an array
Measurements
============
.. autosummary::
:toctree: generated/
center_of_mass - The center of mass of the values of an array at labels
extrema - Min's and max's of an array at labels, with their positions
find_objects - Find objects in a labeled array
histogram - Histogram of the values of an array, optionally at labels
label - Label features in an array
labeled_comprehension
maximum
maximum_position
mean - Mean of the values of an array at labels
median
minimum
minimum_position
standard_deviation - Standard deviation of an N-D image array
sum - Sum of the values of the array
variance - Variance of the values of an N-D image array
watershed_ift
Morphology
==========
.. autosummary::
:toctree: generated/
binary_closing
binary_dilation
binary_erosion
binary_fill_holes
binary_hit_or_miss
binary_opening
binary_propagation
black_tophat
distance_transform_bf
distance_transform_cdt
distance_transform_edt
generate_binary_structure
grey_closing
grey_dilation
grey_erosion
grey_opening
iterate_structure
morphological_gradient
morphological_laplace
white_tophat
"""
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .filters import *
from .fourier import *
from .interpolation import *
from .measurements import *
from .morphology import *
__version__ = '2.0'
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| bsd-3-clause | 6,112,507,694,913,354,000 | 28.171779 | 74 | 0.724501 | false |
samsara/samsara | clients/python/samsara_sdk/constants.py | 2 | 1329 | # -*- coding: utf-8 -*-
"""
samsara_sdk.constants
~~~~~~~~~~~~~~~~~~~~~
Samsara SDK client constants
"""
# Samara specific HTTP Header
PUBLISHED_TIMESTAMP_HEADER = "X-Samsara-publishedTimestamp"
API_PATH = '/v1/events'
DEFAULT_CONFIG = {
# a samsara ingestion api endpoint "http://samsara-ingestion.local/"
# url - REQUIRED
# the identifier of the source of these events
# source_id - OPTIONAL only for record-event
# whether to start the publishing thread.
"start_publishing_thread": True,
# how often should the events being sent to samsara
# in milliseconds
# default 30s
"publish_interval": 30000,
# max size of the buffer, when buffer is full
# older events are dropped.
"max_buffer_size": 10000,
# minimum number of events to that must be in the buffer
# before attempting to publish them
"min_buffer_size": 100,
# network timeout for send operations
# in milliseconds
# default 30s
"send_timeout": 30000,
# whether of not the payload should be compressed
# allowed values "gzip" "none"
"compression": "gzip"
# add samsara client statistics events
# this helps you to understand whether the
# buffer size and publish-intervals are
# adequately configured.
# "send_client_stats": True
}
| apache-2.0 | -6,348,739,950,510,200,000 | 26.122449 | 73 | 0.662152 | false |
junhuac/MQUIC | depot_tools/git_rebase_update.py | 1 | 11254 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tool to update all branches to have the latest changes from their upstreams.
"""
import argparse
import collections
import logging
import sys
import textwrap
import os
from fnmatch import fnmatch
from pprint import pformat
import git_common as git
STARTING_BRANCH_KEY = 'depot-tools.rebase-update.starting-branch'
STARTING_WORKDIR_KEY = 'depot-tools.rebase-update.starting-workdir'
def find_return_branch_workdir():
"""Finds the branch and working directory which we should return to after
rebase-update completes.
These values may persist across multiple invocations of rebase-update, if
rebase-update runs into a conflict mid-way.
"""
return_branch = git.config(STARTING_BRANCH_KEY)
workdir = git.config(STARTING_WORKDIR_KEY)
if not return_branch:
workdir = os.getcwd()
git.set_config(STARTING_WORKDIR_KEY, workdir)
return_branch = git.current_branch()
if return_branch != 'HEAD':
git.set_config(STARTING_BRANCH_KEY, return_branch)
return return_branch, workdir
def fetch_remotes(branch_tree):
"""Fetches all remotes which are needed to update |branch_tree|."""
fetch_tags = False
remotes = set()
tag_set = git.tags()
fetchspec_map = {}
all_fetchspec_configs = git.config_regexp(r'^remote\..*\.fetch')
for fetchspec_config in all_fetchspec_configs:
key, _, fetchspec = fetchspec_config.partition(' ')
dest_spec = fetchspec.partition(':')[2]
remote_name = key.split('.')[1]
fetchspec_map[dest_spec] = remote_name
for parent in branch_tree.itervalues():
if parent in tag_set:
fetch_tags = True
else:
full_ref = git.run('rev-parse', '--symbolic-full-name', parent)
for dest_spec, remote_name in fetchspec_map.iteritems():
if fnmatch(full_ref, dest_spec):
remotes.add(remote_name)
break
fetch_args = []
if fetch_tags:
# Need to fetch all because we don't know what remote the tag comes from :(
# TODO(iannucci): assert that the tags are in the remote fetch refspec
fetch_args = ['--all']
else:
fetch_args.append('--multiple')
fetch_args.extend(remotes)
# TODO(iannucci): Should we fetch git-svn?
if not fetch_args: # pragma: no cover
print 'Nothing to fetch.'
else:
git.run_with_stderr('fetch', *fetch_args, stdout=sys.stdout,
stderr=sys.stderr)
def remove_empty_branches(branch_tree):
tag_set = git.tags()
ensure_root_checkout = git.once(lambda: git.run('checkout', git.root()))
deletions = {}
reparents = {}
downstreams = collections.defaultdict(list)
for branch, parent in git.topo_iter(branch_tree, top_down=False):
downstreams[parent].append(branch)
# If branch and parent have the same tree, then branch has to be marked
# for deletion and its children and grand-children reparented to parent.
if git.hash_one(branch+":") == git.hash_one(parent+":"):
ensure_root_checkout()
logging.debug('branch %s merged to %s', branch, parent)
# Mark branch for deletion while remembering the ordering, then add all
# its children as grand-children of its parent and record reparenting
# information if necessary.
deletions[branch] = len(deletions)
for down in downstreams[branch]:
if down in deletions:
continue
# Record the new and old parent for down, or update such a record
# if it already exists. Keep track of the ordering so that reparenting
# happen in topological order.
downstreams[parent].append(down)
if down not in reparents:
reparents[down] = (len(reparents), parent, branch)
else:
order, _, old_parent = reparents[down]
reparents[down] = (order, parent, old_parent)
# Apply all reparenting recorded, in order.
for branch, value in sorted(reparents.iteritems(), key=lambda x:x[1][0]):
_, parent, old_parent = value
if parent in tag_set:
git.set_branch_config(branch, 'remote', '.')
git.set_branch_config(branch, 'merge', 'refs/tags/%s' % parent)
print ('Reparented %s to track %s [tag] (was tracking %s)'
% (branch, parent, old_parent))
else:
git.run('branch', '--set-upstream-to', parent, branch)
print ('Reparented %s to track %s (was tracking %s)'
% (branch, parent, old_parent))
# Apply all deletions recorded, in order.
for branch, _ in sorted(deletions.iteritems(), key=lambda x: x[1]):
print git.run('branch', '-d', branch)
def rebase_branch(branch, parent, start_hash):
logging.debug('considering %s(%s) -> %s(%s) : %s',
branch, git.hash_one(branch), parent, git.hash_one(parent),
start_hash)
# If parent has FROZEN commits, don't base branch on top of them. Instead,
# base branch on top of whatever commit is before them.
back_ups = 0
orig_parent = parent
while git.run('log', '-n1', '--format=%s',
parent, '--').startswith(git.FREEZE):
back_ups += 1
parent = git.run('rev-parse', parent+'~')
if back_ups:
logging.debug('Backed parent up by %d from %s to %s',
back_ups, orig_parent, parent)
if git.hash_one(parent) != start_hash:
# Try a plain rebase first
print 'Rebasing:', branch
rebase_ret = git.rebase(parent, start_hash, branch, abort=True)
if not rebase_ret.success:
# TODO(iannucci): Find collapsible branches in a smarter way?
print "Failed! Attempting to squash", branch, "...",
squash_branch = branch+"_squash_attempt"
git.run('checkout', '-b', squash_branch)
git.squash_current_branch(merge_base=start_hash)
# Try to rebase the branch_squash_attempt branch to see if it's empty.
squash_ret = git.rebase(parent, start_hash, squash_branch, abort=True)
empty_rebase = git.hash_one(squash_branch) == git.hash_one(parent)
git.run('checkout', branch)
git.run('branch', '-D', squash_branch)
if squash_ret.success and empty_rebase:
print 'Success!'
git.squash_current_branch(merge_base=start_hash)
git.rebase(parent, start_hash, branch)
else:
print "Failed!"
print
# rebase and leave in mid-rebase state.
# This second rebase attempt should always fail in the same
# way that the first one does. If it magically succeeds then
# something very strange has happened.
second_rebase_ret = git.rebase(parent, start_hash, branch)
if second_rebase_ret.success: # pragma: no cover
print "Second rebase succeeded unexpectedly!"
print "Please see: http://crbug.com/425696"
print "First rebased failed with:"
print rebase_ret.stderr
else:
print "Here's what git-rebase (squashed) had to say:"
print
print squash_ret.stdout
print squash_ret.stderr
print textwrap.dedent(
"""\
Squashing failed. You probably have a real merge conflict.
Your working copy is in mid-rebase. Either:
* completely resolve like a normal git-rebase; OR
* abort the rebase and mark this branch as dormant:
git config branch.%s.dormant true
And then run `git rebase-update` again to resume.
""" % branch)
return False
else:
print '%s up-to-date' % branch
git.remove_merge_base(branch)
git.get_or_create_merge_base(branch)
return True
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument('--keep-going', '-k', action='store_true',
help='Keep processing past failed rebases.')
parser.add_argument('--no_fetch', '--no-fetch', '-n',
action='store_true',
help='Skip fetching remotes.')
opts = parser.parse_args(args)
if opts.verbose: # pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
# TODO(iannucci): snapshot all branches somehow, so we can implement
# `git rebase-update --undo`.
# * Perhaps just copy packed-refs + refs/ + logs/ to the side?
# * commit them to a secret ref?
# * Then we could view a summary of each run as a
# `diff --stat` on that secret ref.
if git.in_rebase():
# TODO(iannucci): Be able to resume rebase with flags like --continue,
# etc.
print (
'Rebase in progress. Please complete the rebase before running '
'`git rebase-update`.'
)
return 1
return_branch, return_workdir = find_return_branch_workdir()
os.chdir(git.run('rev-parse', '--show-toplevel'))
if git.current_branch() == 'HEAD':
if git.run('status', '--porcelain'):
print 'Cannot rebase-update with detached head + uncommitted changes.'
return 1
else:
git.freeze() # just in case there are any local changes.
skipped, branch_tree = git.get_branch_tree()
for branch in skipped:
print 'Skipping %s: No upstream specified' % branch
if not opts.no_fetch:
fetch_remotes(branch_tree)
merge_base = {}
for branch, parent in branch_tree.iteritems():
merge_base[branch] = git.get_or_create_merge_base(branch, parent)
logging.debug('branch_tree: %s' % pformat(branch_tree))
logging.debug('merge_base: %s' % pformat(merge_base))
retcode = 0
unrebased_branches = []
# Rebase each branch starting with the root-most branches and working
# towards the leaves.
for branch, parent in git.topo_iter(branch_tree):
if git.is_dormant(branch):
print 'Skipping dormant branch', branch
else:
ret = rebase_branch(branch, parent, merge_base[branch])
if not ret:
retcode = 1
if opts.keep_going:
print '--keep-going set, continuing with next branch.'
unrebased_branches.append(branch)
if git.in_rebase():
git.run_with_retcode('rebase', '--abort')
if git.in_rebase(): # pragma: no cover
print 'Failed to abort rebase. Something is really wrong.'
break
else:
break
if unrebased_branches:
print
print 'The following branches could not be cleanly rebased:'
for branch in unrebased_branches:
print ' %s' % branch
if not retcode:
remove_empty_branches(branch_tree)
# return_branch may not be there any more.
if return_branch in git.branches():
git.run('checkout', return_branch)
git.thaw()
else:
root_branch = git.root()
if return_branch != 'HEAD':
print (
"%r was merged with its parent, checking out %r instead."
% (return_branch, root_branch)
)
git.run('checkout', root_branch)
if return_workdir:
os.chdir(return_workdir)
git.set_config(STARTING_BRANCH_KEY, '')
git.set_config(STARTING_WORKDIR_KEY, '')
return retcode
if __name__ == '__main__': # pragma: no cover
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| mit | 8,658,482,790,569,833,000 | 33.415902 | 79 | 0.641639 | false |
MultiPath/Dep-Compo | execute.py | 1 | 1033 | #/usr/bin/python2.7
#*-coding:utf-8-*-
from structure import DependencyTree
from DepNN import DepNN
#Main class to execute the task.
class Execute(object):
def __init__(self):
self.depnn = DepNN()
self.depnn.extendLookUp('../data/depcheck2')
self.depnn.load_wordvector('../data/word-vec.bin')
self.deptree = DependencyTree()
def build_model(self):
self.depnn.build_AutoEncoder(200, '../data/compilation.bin')
def load_model(self):
self.depnn.load_AutoEncoder('../data/compilation.bin')
def train_sentence(self, lines):
w = self.deptree.read_sent(lines[0])
y = self.deptree.read_dependency(lines[1])
#self.depnn.saveRAE('../data/weights.bin')
#self.depnn.loadRAE('../data/weights.bin')
print ' '.join(x[0] for x in w)
self.depnn.build_DepRNN_Tree(y)
def dump_weights(self, db_file):
self.depnn.saveRAE(db_file)
def load_weights(self, db_file):
self.depnn.loadRAE(db_file)
| gpl-2.0 | -6,566,271,291,311,976,000 | 27.694444 | 68 | 0.619555 | false |
FreeAgent/djangoappengine-starter | djangotoolbox/middleware.py | 85 | 2801 | from django.conf import settings
from django.http import HttpResponseRedirect
from django.utils.cache import patch_cache_control
LOGIN_REQUIRED_PREFIXES = getattr(settings, 'LOGIN_REQUIRED_PREFIXES', ())
NO_LOGIN_REQUIRED_PREFIXES = getattr(settings, 'NO_LOGIN_REQUIRED_PREFIXES', ())
ALLOWED_DOMAINS = getattr(settings, 'ALLOWED_DOMAINS', None)
NON_REDIRECTED_PATHS = getattr(settings, 'NON_REDIRECTED_PATHS', ())
NON_REDIRECTED_BASE_PATHS = tuple(path.rstrip('/') + '/'
for path in NON_REDIRECTED_PATHS)
class LoginRequiredMiddleware(object):
"""
Redirects to login page if request path begins with a
LOGIN_REQURED_PREFIXES prefix. You can also specify
NO_LOGIN_REQUIRED_PREFIXES which take precedence.
"""
def process_request(self, request):
for prefix in NO_LOGIN_REQUIRED_PREFIXES:
if request.path.startswith(prefix):
return None
for prefix in LOGIN_REQUIRED_PREFIXES:
if request.path.startswith(prefix) and \
not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
return None
class RedirectMiddleware(object):
"""
A static redirect middleware. Mostly useful for hosting providers that
automatically setup an alternative domain for your website. You might
not want anyone to access the site via those possibly well-known URLs.
"""
def process_request(self, request):
host = request.get_host().split(':')[0]
# Turn off redirects when in debug mode, running unit tests, or
# when handling an App Engine cron job.
if (settings.DEBUG or host == 'testserver' or
not ALLOWED_DOMAINS or
request.META.get('HTTP_X_APPENGINE_CRON') == 'true' or
request.path.startswith('/_ah/') or
request.path in NON_REDIRECTED_PATHS or
request.path.startswith(NON_REDIRECTED_BASE_PATHS)):
return
if host not in settings.ALLOWED_DOMAINS:
return HttpResponseRedirect('http://' + settings.ALLOWED_DOMAINS[0]
+ request.path)
class NoHistoryCacheMiddleware(object):
"""
If user is authenticated we disable browser caching of pages in history.
"""
def process_response(self, request, response):
if 'Expires' not in response and \
'Cache-Control' not in response and \
hasattr(request, 'session') and \
request.user.is_authenticated():
patch_cache_control(response,
no_store=True, no_cache=True, must_revalidate=True, max_age=0)
return response
| bsd-3-clause | -1,271,916,617,337,417,700 | 44.177419 | 80 | 0.640843 | false |
soasme/rio | tests/blueprints/dashboard/test_views.py | 1 | 1243 | # -*- coding: utf-8 -*-
from flask import url_for
from flask_login import login_user
def test_new_project_require_name(client, login):
resp = client.post(url_for('dashboard.new_project'), data={'name': ''})
assert resp.status_code == 400
assert resp.json['errors']['name']
def test_new_project_success(client, login):
resp = client.post(url_for('dashboard.new_project'), data={'name': 'New Project'})
assert resp.status_code == 200
assert resp.json['id']
assert resp.json['name'] == 'New Project'
assert resp.json['slug'] == 'new-project'
assert resp.json['owner_id'] == login.id
def test_new_project_twice(client, login):
resp = client.post(url_for('dashboard.new_project'), data={'name': 'New Project'})
resp = client.post(url_for('dashboard.new_project'), data={'name': 'New Project'})
assert resp.status_code == 400
assert resp.json['errors']['name'] == ['duplicated slug.']
def test_new_project_insensive_slug(client, login):
resp = client.post(url_for('dashboard.new_project'), data={'name': 'New Project'})
assert resp.json['slug'] == 'new-project'
resp = client.post(url_for('dashboard.new_project'), data={'name': 'new project'})
assert resp.status_code == 400
| mit | 4,806,526,466,730,353,000 | 41.862069 | 86 | 0.661303 | false |
teoliphant/numpy-refactor | numpy/lib/twodim_base.py | 5 | 22944 | """ Basic functions for manipulating 2d arrays
"""
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
'tril','vander','histogram2d','mask_indices',
'tril_indices','tril_indices_from','triu_indices','triu_indices_from',
]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
empty
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Does not require the array to be
two-dimensional.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError, "Input must be >= 2-d."
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError, "Input must be >= 1-d."
return m[::-1,...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError, "Input must >= 2-d."
k = k % 4
if k == 0: return m
elif k == 1: return fliplr(m).swapaxes(0,1)
elif k == 2: return fliplr(flipud(m))
else: return fliplr(m.swapaxes(0,1)) # k==3
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal, a positive value
refers to an upper diagonal, and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned array.
Returns
-------
I : ndarray (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
diag : Return a diagonal 2-D array using a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triange of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n,n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
if k >= s[1]:
return empty(0, dtype=v.dtype)
if v.flags.f_contiguous:
# faster slicing
v, k, s = v.T, -k, s[::-1]
if k >= 0:
i = k
else:
i = (-k) * s[1]
return v[:s[1]-k].flat[i::s[1]+1]
else:
raise ValueError, "Input must be 1- or 2-d."
def diagflat(v,k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set. The default is 0.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : Matlab workalike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n,n), v.dtype)
if (k>=0):
i = arange(0,n-k)
fi = i+k+i*n
else:
i = arange(0,n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
Construct an array filled with ones at and below the given diagonal.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
T : (N,M) ndarray
Array with a lower triangle filled with ones, in other words
``T[i,j] == 1`` for ``i <= j + k``.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None: M = N
m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int
Diagonal above which to zero elements.
`k = 0` is the main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
L : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=int),m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Construct a copy of a matrix with elements below the k-th diagonal zeroed.
Please refer to the documentation for `tril`.
See Also
--------
tril
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1-tri(m.shape[0], m.shape[1], k-1, int)),m)
return out
# borrowed from John Hunter and matplotlib
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the i-th output column is the input vector to
the power of ``N - i - 1``. Such a matrix with a geometric progression
in each row is named Van Der Monde, or Vandermonde matrix, from
Alexandre-Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
References
----------
.. [1] Wikipedia, "Vandermonde matrix",
http://en.wikipedia.org/wiki/Vandermonde_matrix
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if N is None: N=len(x)
X = ones( (len(x),N), x.dtype)
for i in range(N-1):
X[:,i] = x**(N-i-1)
return X
def histogram2d(x,y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape(N,)
A sequence of values to be histogrammed along the first dimension.
y : array_like, shape(M,)
A sequence of values to be histogrammed along the second dimension.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
* If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, i.e. the bin count divided by the bin area.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights
are normalized to 1 if `normed` is True. If `normed` is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram: 1D histogram
histogramdd: Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample density,
defined such that:
.. math::
\\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
the area of bin `{i,j}`.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
Rather, `x` is histogrammed along the first dimension of the array
(vertical), and `y` along the second dimension of the array (horizontal).
This ensures compatibility with `histogramdd`.
Examples
--------
>>> x, y = np.random.randn(2, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
>>> H.shape, xedges.shape, yedges.shape
((5, 8), (6,), (9,))
We can now use the Matplotlib to visualize this 2-dimensional histogram:
>>> extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
>>> import matplotlib.pyplot as plt
>>> plt.imshow(H, extent=extent)
<matplotlib.image.AxesImage object at ...>
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x,y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n,mask_func,k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n,n),int)
a = mask_func(m,k)
return where(a != 0)
def tril_indices(n,k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n,tril,k)
def tril_indices_from(arr,k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
See `tril_indices` for full details.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0],k)
def triu_indices(n,k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n,triu,k)
def triu_indices_from(arr,k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
See `triu_indices` for full details.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `triu` for details).
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0],k)
| bsd-3-clause | 8,970,260,017,086,218,000 | 25.161916 | 89 | 0.530073 | false |
ChronoMonochrome/android_external_chromium_org | tools/linux/procfs.py | 23 | 20724 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# A Python library to read and store procfs (/proc) information on Linux.
#
# Each information storage class in this file stores original data as original
# as reasonablly possible. Translation is done when requested. It is to make it
# always possible to probe the original data.
import collections
import logging
import os
import re
import struct
import sys
class _NullHandler(logging.Handler):
def emit(self, record):
pass
_LOGGER = logging.getLogger('procfs')
_LOGGER.addHandler(_NullHandler())
class ProcStat(object):
"""Reads and stores information in /proc/pid/stat."""
_PATTERN = re.compile(r'^'
'(?P<PID>-?[0-9]+) '
'\((?P<COMM>.+)\) '
'(?P<STATE>[RSDZTW]) '
'(?P<PPID>-?[0-9]+) '
'(?P<PGRP>-?[0-9]+) '
'(?P<SESSION>-?[0-9]+) '
'(?P<TTY_NR>-?[0-9]+) '
'(?P<TPGID>-?[0-9]+) '
'(?P<FLAGS>[0-9]+) '
'(?P<MINFIT>[0-9]+) '
'(?P<CMINFIT>[0-9]+) '
'(?P<MAJFIT>[0-9]+) '
'(?P<CMAJFIT>[0-9]+) '
'(?P<UTIME>[0-9]+) '
'(?P<STIME>[0-9]+) '
'(?P<CUTIME>[0-9]+) '
'(?P<CSTIME>[0-9]+) '
'(?P<PRIORITY>[0-9]+) '
'(?P<NICE>[0-9]+) '
'(?P<NUM_THREADS>[0-9]+) '
'(?P<ITREALVALUE>[0-9]+) '
'(?P<STARTTIME>[0-9]+) '
'(?P<VSIZE>[0-9]+) '
'(?P<RSS>[0-9]+) '
'(?P<RSSLIM>[0-9]+) '
'(?P<STARTCODE>[0-9]+) '
'(?P<ENDCODE>[0-9]+) '
'(?P<STARTSTACK>[0-9]+) '
'(?P<KSTKESP>[0-9]+) '
'(?P<KSTKEIP>[0-9]+) '
'(?P<SIGNAL>[0-9]+) '
'(?P<BLOCKED>[0-9]+) '
'(?P<SIGIGNORE>[0-9]+) '
'(?P<SIGCATCH>[0-9]+) '
'(?P<WCHAN>[0-9]+) '
'(?P<NSWAP>[0-9]+) '
'(?P<CNSWAP>[0-9]+) '
'(?P<EXIT_SIGNAL>[0-9]+) '
'(?P<PROCESSOR>[0-9]+) '
'(?P<RT_PRIORITY>[0-9]+) '
'(?P<POLICY>[0-9]+) '
'(?P<DELAYACCT_BLKIO_TICKS>[0-9]+) '
'(?P<GUEST_TIME>[0-9]+) '
'(?P<CGUEST_TIME>[0-9]+)', re.IGNORECASE)
def __init__(self, raw, pid, vsize, rss):
self._raw = raw
self._pid = pid
self._vsize = vsize
self._rss = rss
@staticmethod
def load_file(stat_f):
raw = stat_f.readlines()
stat = ProcStat._PATTERN.match(raw[0])
return ProcStat(raw,
stat.groupdict().get('PID'),
stat.groupdict().get('VSIZE'),
stat.groupdict().get('RSS'))
@staticmethod
def load(pid):
with open(os.path.join('/proc', str(pid), 'stat'), 'r') as stat_f:
return ProcStat.load_file(stat_f)
@property
def raw(self):
return self._raw
@property
def pid(self):
return int(self._pid)
@property
def vsize(self):
return int(self._vsize)
@property
def rss(self):
return int(self._rss)
class ProcStatm(object):
"""Reads and stores information in /proc/pid/statm."""
_PATTERN = re.compile(r'^'
'(?P<SIZE>[0-9]+) '
'(?P<RESIDENT>[0-9]+) '
'(?P<SHARE>[0-9]+) '
'(?P<TEXT>[0-9]+) '
'(?P<LIB>[0-9]+) '
'(?P<DATA>[0-9]+) '
'(?P<DT>[0-9]+)', re.IGNORECASE)
def __init__(self, raw, size, resident, share, text, lib, data, dt):
self._raw = raw
self._size = size
self._resident = resident
self._share = share
self._text = text
self._lib = lib
self._data = data
self._dt = dt
@staticmethod
def load_file(statm_f):
raw = statm_f.readlines()
statm = ProcStatm._PATTERN.match(raw[0])
return ProcStatm(raw,
statm.groupdict().get('SIZE'),
statm.groupdict().get('RESIDENT'),
statm.groupdict().get('SHARE'),
statm.groupdict().get('TEXT'),
statm.groupdict().get('LIB'),
statm.groupdict().get('DATA'),
statm.groupdict().get('DT'))
@staticmethod
def load(pid):
with open(os.path.join('/proc', str(pid), 'statm'), 'r') as statm_f:
return ProcStatm.load_file(statm_f)
@property
def raw(self):
return self._raw
@property
def size(self):
return int(self._size)
@property
def resident(self):
return int(self._resident)
@property
def share(self):
return int(self._share)
@property
def text(self):
return int(self._text)
@property
def lib(self):
return int(self._lib)
@property
def data(self):
return int(self._data)
@property
def dt(self):
return int(self._dt)
class ProcStatus(object):
"""Reads and stores information in /proc/pid/status."""
_PATTERN = re.compile(r'^(?P<NAME>[A-Za-z0-9_]+):\s+(?P<VALUE>.*)')
def __init__(self, raw, dct):
self._raw = raw
self._pid = dct.get('Pid')
self._name = dct.get('Name')
self._vm_peak = dct.get('VmPeak')
self._vm_size = dct.get('VmSize')
self._vm_lck = dct.get('VmLck')
self._vm_pin = dct.get('VmPin')
self._vm_hwm = dct.get('VmHWM')
self._vm_rss = dct.get('VmRSS')
self._vm_data = dct.get('VmData')
self._vm_stack = dct.get('VmStk')
self._vm_exe = dct.get('VmExe')
self._vm_lib = dct.get('VmLib')
self._vm_pte = dct.get('VmPTE')
self._vm_swap = dct.get('VmSwap')
@staticmethod
def load_file(status_f):
raw = status_f.readlines()
dct = {}
for line in raw:
status_match = ProcStatus._PATTERN.match(line)
if status_match:
match_dict = status_match.groupdict()
dct[match_dict['NAME']] = match_dict['VALUE']
else:
raise SyntaxError('Unknown /proc/pid/status format.')
return ProcStatus(raw, dct)
@staticmethod
def load(pid):
with open(os.path.join('/proc', str(pid), 'status'), 'r') as status_f:
return ProcStatus.load_file(status_f)
@property
def raw(self):
return self._raw
@property
def pid(self):
return int(self._pid)
@property
def vm_peak(self):
"""Returns a high-water (peak) virtual memory size in kilo-bytes."""
if self._vm_peak.endswith('kB'):
return int(self._vm_peak.split()[0])
raise ValueError('VmPeak is not in kB.')
@property
def vm_size(self):
"""Returns a virtual memory size in kilo-bytes."""
if self._vm_size.endswith('kB'):
return int(self._vm_size.split()[0])
raise ValueError('VmSize is not in kB.')
@property
def vm_hwm(self):
"""Returns a high-water (peak) resident set size (RSS) in kilo-bytes."""
if self._vm_hwm.endswith('kB'):
return int(self._vm_hwm.split()[0])
raise ValueError('VmHWM is not in kB.')
@property
def vm_rss(self):
"""Returns a resident set size (RSS) in kilo-bytes."""
if self._vm_rss.endswith('kB'):
return int(self._vm_rss.split()[0])
raise ValueError('VmRSS is not in kB.')
class ProcMapsEntry(object):
"""A class representing one line in /proc/pid/maps."""
def __init__(
self, begin, end, readable, writable, executable, private, offset,
major, minor, inode, name):
self.begin = begin
self.end = end
self.readable = readable
self.writable = writable
self.executable = executable
self.private = private
self.offset = offset
self.major = major
self.minor = minor
self.inode = inode
self.name = name
def as_dict(self):
return {
'begin': self.begin,
'end': self.end,
'readable': self.readable,
'writable': self.writable,
'executable': self.executable,
'private': self.private,
'offset': self.offset,
'major': self.major,
'minor': self.minor,
'inode': self.inode,
'name': self.name,
}
class ProcMaps(object):
"""Reads and stores information in /proc/pid/maps."""
MAPS_PATTERN = re.compile(
r'^([a-f0-9]+)-([a-f0-9]+)\s+(.)(.)(.)(.)\s+([a-f0-9]+)\s+(\S+):(\S+)\s+'
r'(\d+)\s*(.*)$', re.IGNORECASE)
def __init__(self):
self._sorted_indexes = []
self._dictionary = {}
self._sorted = True
def iter(self, condition):
if not self._sorted:
self._sorted_indexes.sort()
self._sorted = True
for index in self._sorted_indexes:
if not condition or condition(self._dictionary[index]):
yield self._dictionary[index]
def __iter__(self):
if not self._sorted:
self._sorted_indexes.sort()
self._sorted = True
for index in self._sorted_indexes:
yield self._dictionary[index]
@staticmethod
def load_file(maps_f):
table = ProcMaps()
for line in maps_f:
table.append_line(line)
return table
@staticmethod
def load(pid):
with open(os.path.join('/proc', str(pid), 'maps'), 'r') as maps_f:
return ProcMaps.load_file(maps_f)
def append_line(self, line):
entry = self.parse_line(line)
if entry:
self._append_entry(entry)
return entry
@staticmethod
def parse_line(line):
matched = ProcMaps.MAPS_PATTERN.match(line)
if matched:
return ProcMapsEntry( # pylint: disable=W0212
int(matched.group(1), 16), # begin
int(matched.group(2), 16), # end
matched.group(3), # readable
matched.group(4), # writable
matched.group(5), # executable
matched.group(6), # private
int(matched.group(7), 16), # offset
matched.group(8), # major
matched.group(9), # minor
int(matched.group(10), 10), # inode
matched.group(11) # name
)
else:
return None
@staticmethod
def constants(entry):
return (entry.writable == '-' and entry.executable == '-' and re.match(
'\S+(\.(so|dll|dylib|bundle)|chrome)((\.\d+)+\w*(\.\d+){0,3})?',
entry.name))
@staticmethod
def executable(entry):
return (entry.executable == 'x' and re.match(
'\S+(\.(so|dll|dylib|bundle)|chrome)((\.\d+)+\w*(\.\d+){0,3})?',
entry.name))
@staticmethod
def executable_and_constants(entry):
return (((entry.writable == '-' and entry.executable == '-') or
entry.executable == 'x') and re.match(
'\S+(\.(so|dll|dylib|bundle)|chrome)((\.\d+)+\w*(\.\d+){0,3})?',
entry.name))
def _append_entry(self, entry):
if self._sorted_indexes and self._sorted_indexes[-1] > entry.begin:
self._sorted = False
self._sorted_indexes.append(entry.begin)
self._dictionary[entry.begin] = entry
class ProcSmaps(object):
"""Reads and stores information in /proc/pid/smaps."""
_SMAPS_PATTERN = re.compile(r'^(?P<NAME>[A-Za-z0-9_]+):\s+(?P<VALUE>.*)')
class VMA(object):
def __init__(self):
self._size = 0
self._rss = 0
self._pss = 0
def append(self, name, value):
dct = {
'Size': '_size',
'Rss': '_rss',
'Pss': '_pss',
'Referenced': '_referenced',
'Private_Clean': '_private_clean',
'Shared_Clean': '_shared_clean',
'KernelPageSize': '_kernel_page_size',
'MMUPageSize': '_mmu_page_size',
}
if name in dct:
self.__setattr__(dct[name], value)
@property
def size(self):
if self._size.endswith('kB'):
return int(self._size.split()[0])
return int(self._size)
@property
def rss(self):
if self._rss.endswith('kB'):
return int(self._rss.split()[0])
return int(self._rss)
@property
def pss(self):
if self._pss.endswith('kB'):
return int(self._pss.split()[0])
return int(self._pss)
def __init__(self, raw, total_dct, maps, vma_internals):
self._raw = raw
self._size = total_dct['Size']
self._rss = total_dct['Rss']
self._pss = total_dct['Pss']
self._referenced = total_dct['Referenced']
self._shared_clean = total_dct['Shared_Clean']
self._private_clean = total_dct['Private_Clean']
self._kernel_page_size = total_dct['KernelPageSize']
self._mmu_page_size = total_dct['MMUPageSize']
self._maps = maps
self._vma_internals = vma_internals
@staticmethod
def load(pid):
with open(os.path.join('/proc', str(pid), 'smaps'), 'r') as smaps_f:
raw = smaps_f.readlines()
vma = None
vma_internals = collections.OrderedDict()
total_dct = collections.defaultdict(int)
maps = ProcMaps()
for line in raw:
maps_match = ProcMaps.MAPS_PATTERN.match(line)
if maps_match:
vma = maps.append_line(line.strip())
vma_internals[vma] = ProcSmaps.VMA()
else:
smaps_match = ProcSmaps._SMAPS_PATTERN.match(line)
if smaps_match:
match_dict = smaps_match.groupdict()
vma_internals[vma].append(match_dict['NAME'], match_dict['VALUE'])
total_dct[match_dict['NAME']] += int(match_dict['VALUE'].split()[0])
return ProcSmaps(raw, total_dct, maps, vma_internals)
@property
def size(self):
return self._size
@property
def rss(self):
return self._rss
@property
def referenced(self):
return self._referenced
@property
def pss(self):
return self._pss
@property
def private_clean(self):
return self._private_clean
@property
def shared_clean(self):
return self._shared_clean
@property
def kernel_page_size(self):
return self._kernel_page_size
@property
def mmu_page_size(self):
return self._mmu_page_size
@property
def vma_internals(self):
return self._vma_internals
class ProcPagemap(object):
"""Reads and stores partial information in /proc/pid/pagemap.
It picks up virtual addresses to read based on ProcMaps (/proc/pid/maps).
See https://www.kernel.org/doc/Documentation/vm/pagemap.txt for details.
"""
_BYTES_PER_PAGEMAP_VALUE = 8
_BYTES_PER_OS_PAGE = 4096
_VIRTUAL_TO_PAGEMAP_OFFSET = _BYTES_PER_OS_PAGE / _BYTES_PER_PAGEMAP_VALUE
_MASK_PRESENT = 1 << 63
_MASK_SWAPPED = 1 << 62
_MASK_FILEPAGE_OR_SHAREDANON = 1 << 61
_MASK_SOFTDIRTY = 1 << 55
_MASK_PFN = (1 << 55) - 1
class VMA(object):
def __init__(self, vsize, present, swapped, pageframes):
self._vsize = vsize
self._present = present
self._swapped = swapped
self._pageframes = pageframes
@property
def vsize(self):
return int(self._vsize)
@property
def present(self):
return int(self._present)
@property
def swapped(self):
return int(self._swapped)
@property
def pageframes(self):
return self._pageframes
def __init__(self, vsize, present, swapped, vma_internals, in_process_dup):
self._vsize = vsize
self._present = present
self._swapped = swapped
self._vma_internals = vma_internals
self._in_process_dup = in_process_dup
@staticmethod
def load(pid, maps):
total_present = 0
total_swapped = 0
total_vsize = 0
in_process_dup = 0
vma_internals = collections.OrderedDict()
process_pageframe_set = set()
pagemap_fd = os.open(
os.path.join('/proc', str(pid), 'pagemap'), os.O_RDONLY)
for vma in maps:
present = 0
swapped = 0
vsize = 0
pageframes = collections.defaultdict(int)
begin_offset = ProcPagemap._offset(vma.begin)
chunk_size = ProcPagemap._offset(vma.end) - begin_offset
os.lseek(pagemap_fd, begin_offset, os.SEEK_SET)
buf = os.read(pagemap_fd, chunk_size)
if len(buf) < chunk_size:
_LOGGER.warn('Failed to read pagemap at 0x%x in %d.' % (vma.begin, pid))
pagemap_values = struct.unpack(
'=%dQ' % (len(buf) / ProcPagemap._BYTES_PER_PAGEMAP_VALUE), buf)
for pagemap_value in pagemap_values:
vsize += ProcPagemap._BYTES_PER_OS_PAGE
if pagemap_value & ProcPagemap._MASK_PRESENT:
if (pagemap_value & ProcPagemap._MASK_PFN) in process_pageframe_set:
in_process_dup += ProcPagemap._BYTES_PER_OS_PAGE
else:
process_pageframe_set.add(pagemap_value & ProcPagemap._MASK_PFN)
if (pagemap_value & ProcPagemap._MASK_PFN) not in pageframes:
present += ProcPagemap._BYTES_PER_OS_PAGE
pageframes[pagemap_value & ProcPagemap._MASK_PFN] += 1
if pagemap_value & ProcPagemap._MASK_SWAPPED:
swapped += ProcPagemap._BYTES_PER_OS_PAGE
vma_internals[vma] = ProcPagemap.VMA(vsize, present, swapped, pageframes)
total_present += present
total_swapped += swapped
total_vsize += vsize
os.close(pagemap_fd)
return ProcPagemap(total_vsize, total_present, total_swapped,
vma_internals, in_process_dup)
@staticmethod
def _offset(virtual_address):
return virtual_address / ProcPagemap._VIRTUAL_TO_PAGEMAP_OFFSET
@property
def vsize(self):
return int(self._vsize)
@property
def present(self):
return int(self._present)
@property
def swapped(self):
return int(self._swapped)
@property
def vma_internals(self):
return self._vma_internals
class _ProcessMemory(object):
"""Aggregates process memory information from /proc for manual testing."""
def __init__(self, pid):
self._pid = pid
self._maps = None
self._pagemap = None
self._stat = None
self._status = None
self._statm = None
self._smaps = []
def _read(self, proc_file):
lines = []
with open(os.path.join('/proc', str(self._pid), proc_file), 'r') as proc_f:
lines = proc_f.readlines()
return lines
def read_all(self):
self.read_stat()
self.read_statm()
self.read_status()
self.read_smaps()
self.read_maps()
self.read_pagemap(self._maps)
def read_maps(self):
self._maps = ProcMaps.load(self._pid)
def read_pagemap(self, maps):
self._pagemap = ProcPagemap.load(self._pid, maps)
def read_smaps(self):
self._smaps = ProcSmaps.load(self._pid)
def read_stat(self):
self._stat = ProcStat.load(self._pid)
def read_statm(self):
self._statm = ProcStatm.load(self._pid)
def read_status(self):
self._status = ProcStatus.load(self._pid)
@property
def pid(self):
return self._pid
@property
def maps(self):
return self._maps
@property
def pagemap(self):
return self._pagemap
@property
def smaps(self):
return self._smaps
@property
def stat(self):
return self._stat
@property
def statm(self):
return self._statm
@property
def status(self):
return self._status
def main(argv):
"""The main function for manual testing."""
_LOGGER.setLevel(logging.WARNING)
handler = logging.StreamHandler()
handler.setLevel(logging.WARNING)
handler.setFormatter(logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s:%(message)s'))
_LOGGER.addHandler(handler)
pids = []
for arg in argv[1:]:
try:
pid = int(arg)
except ValueError:
raise SyntaxError("%s is not an integer." % arg)
else:
pids.append(pid)
procs = {}
for pid in pids:
procs[pid] = _ProcessMemory(pid)
procs[pid].read_all()
print '=== PID: %d ===' % pid
print ' stat: %d' % procs[pid].stat.vsize
print ' statm: %d' % (procs[pid].statm.size * 4096)
print ' status: %d (Peak:%d)' % (procs[pid].status.vm_size * 1024,
procs[pid].status.vm_peak * 1024)
print ' smaps: %d' % (procs[pid].smaps.size * 1024)
print 'pagemap: %d' % procs[pid].pagemap.vsize
print ' stat: %d' % (procs[pid].stat.rss * 4096)
print ' statm: %d' % (procs[pid].statm.resident * 4096)
print ' status: %d (Peak:%d)' % (procs[pid].status.vm_rss * 1024,
procs[pid].status.vm_hwm * 1024)
print ' smaps: %d' % (procs[pid].smaps.rss * 1024)
print 'pagemap: %d' % procs[pid].pagemap.present
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | 4,213,629,520,312,965,600 | 27.427984 | 80 | 0.557711 | false |
sorgerlab/indra | indra/sources/isi/experiments.py | 6 | 2074 | import pickle
import time
import os
from indra.sources.isi.api import process_preprocessed
from indra.sources.isi.preprocessor import IsiPreprocessor
def abstracts_runtime():
pfile = '/Users/daniel/Downloads/text_content_sample.pkl'
dump = pickle.load(open(pfile, 'rb'))
all_abstracts = dump['pubmed']
# abstract_counts = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
# abstract_counts = [20, 40, 60, 80, 100]
# abstract_counts = [20, 50, 100, 1000, None]
# abstract_counts = [20, 50, 100, 200, 300, 400, 500]
abstract_counts = [10]
times = []
for count in abstract_counts:
print('==============================================================')
print('Count:', count)
print('==============================================================')
with open('isi_experiment_log.txt', 'a') as f:
f.write('Reading and processing ' + str(count) + 'abstracts\n')
start_time = time.time()
if count is None:
abstract_subset = all_abstracts
else:
abstract_subset = all_abstracts[:count]
assert(len(abstract_subset) == count)
preprocessed_dir = 'preprocessed_' + str(count)
os.mkdir(preprocessed_dir)
preprocessor = IsiPreprocessor(preprocessed_dir)
preprocessor.preprocess_abstract_list(abstract_subset)
output_dir = 'output_' + str(count)
os.mkdir(output_dir)
ip = process_preprocessed(preprocessor, 8, output_dir)
print('Statements:', ip.statements)
output_pickle = 'isi_processed_abstracts_' + str(count) + '.pkl'
pickle.dump(ip, open(output_pickle, 'wb'))
# How long did that take?
ellapsed_s = time.time() - start_time
ellapsed_min = ellapsed_s / 60.0
times.append(ellapsed_min)
with open('isi_experiment_log.txt', 'a') as f:
f.write('Times so far: ')
f.write(repr(times))
f.write('\n')
print('Times:')
print(times)
if __name__ == '__main__':
abstracts_runtime()
| bsd-2-clause | 634,808,397,518,998,700 | 33 | 79 | 0.559306 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.