repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
town-hall-pinball/project-omega | tests/machine/test_playfield.py | 1 | 2243 | # Copyright (c) 2014 - 2016 townhallpinball.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import unittest
from pin.lib import p
from tests import fixtures
import logging
log = logging.getLogger("pin")
class TestPlayfield(unittest.TestCase):
def setUp(self):
fixtures.reset()
self.playfield = p.modes["playfield"]
self.playfield.enable(children=True)
p.modes["simulator"].enable()
def test_live(self):
p.modes["trough"].eject()
p.now = 1
fixtures.loop()
p.switches["ball_launch_button"].activate()
p.now = 2
fixtures.loop()
p.now = 3
fixtures.loop()
p.now = 4
fixtures.loop()
self.assertTrue(self.playfield.live)
def test_dead(self):
p.modes["trough"].eject()
p.now = 1
fixtures.loop()
p.switches["ball_launch_button"].activate()
p.now = 2
fixtures.loop()
p.now = 3
fixtures.loop()
p.now = 4
fixtures.loop()
self.assertTrue(self.playfield.live)
p.switches["trough_4"].activate()
fixtures.loop()
p.now = 5
fixtures.loop()
self.assertFalse(self.playfield.live)
| mit | 6,205,678,714,841,098,000 | 31.507246 | 77 | 0.670085 | false |
umlfri/umlfri2 | umlfri2/qtgui/base/contextmenu.py | 1 | 1680 | from functools import partial
from PyQt5.QtGui import QIcon, QKeySequence
from PyQt5.QtWidgets import QMenu, QAction
from umlfri2.application import Application
from umlfri2.qtgui.base import image_loader
class ContextMenu(QMenu):
def _add_menu_item(self, icon, label, shortcut, action=None, sub_menu=None):
ret = QAction(label, sub_menu or self)
if shortcut is not None:
ret.setShortcut(QKeySequence(shortcut))
if isinstance(icon, str):
ret.setIcon(QIcon.fromTheme(icon))
elif isinstance(icon, QIcon):
ret.setIcon(icon)
if action is None:
ret.setEnabled(False)
else:
ret.triggered.connect(action)
(sub_menu or self).addAction(ret)
return ret
def _add_type_menu_item(self, type, action=None, sub_menu=None, format="{0}"):
translation = type.metamodel.get_translation(Application().language.current_language)
ret = QAction(format.format(translation.translate(type)), sub_menu or self)
ret.setIcon(image_loader.load_icon(type.icon))
if action is None:
ret.setEnabled(False)
else:
ret.triggered.connect(partial(action, type))
(sub_menu or self).addAction(ret)
return ret
def _add_sub_menu_item(self, label, enabled=True, sub_menu=None):
ret = QAction(label, sub_menu or self)
menu = QMenu()
ret.setMenu(menu)
ret.setEnabled(enabled)
(sub_menu or self).addAction(ret)
return menu
| gpl-3.0 | 3,216,306,957,850,884,000 | 28.473684 | 93 | 0.591667 | false |
piotrmaslanka/satella | tests/test_coding/test_monitor.py | 1 | 5062 | import unittest
from queue import Queue
from threading import Thread
from time import sleep
from satella.coding import Monitor
class MonitorTest(unittest.TestCase):
def test_synchronize_on(self):
class TestedMasterClass(Monitor):
def __init__(self):
self.value = 0
super().__init__()
def get_locking_class(self):
class LockingClass:
@Monitor.synchronize_on(self)
def get_value(self2):
self.value += 1
return LockingClass()
msc = TestedMasterClass()
lc = msc.get_locking_class()
class TesterThread(Thread):
def run(self):
lc.get_value()
with Monitor.acquire(msc):
TesterThread().start()
sleep(0.1)
self.assertEqual(msc.value, 0)
with Monitor.release(msc):
sleep(0.1)
self.assertEqual(msc.value, 1)
def test_release_contextmanager(self):
class TestedClass(Monitor):
def __init__(self, cqueue):
self.cqueue = cqueue
Monitor.__init__(self)
@Monitor.synchronized
def execute(self):
self.cqueue.put(1)
sleep(1)
self.cqueue.get()
class TesterThread(Thread):
def __init__(self, tc):
self.tc = tc
Thread.__init__(self)
def run(self):
self.tc.execute()
cq = Queue()
cq.put(1)
tc = TestedClass(cq)
tt = TesterThread(tc)
with Monitor.acquire(tc):
with Monitor.release(tc):
tt.start()
sleep(0.4)
self.assertEqual(cq.qsize(), 2)
def test_release_contextmanager_syntax(self):
class TestedClass(Monitor):
def __init__(self, cqueue):
self.cqueue = cqueue
Monitor.__init__(self)
@Monitor.synchronized
def execute(self):
self.cqueue.put(1)
sleep(1)
self.cqueue.get()
class TesterThread(Thread):
def __init__(self, tc):
self.tc = tc
Thread.__init__(self)
def run(self):
self.tc.execute()
cq = Queue()
cq.put(1)
tc = TestedClass(cq)
tt = TesterThread(tc)
with tc:
with Monitor.release(tc):
tt.start()
sleep(0.4)
self.assertEqual(cq.qsize(), 2)
def test_acquire_contextmanager(self):
class TestedClass(Monitor):
def __init__(self, cqueue):
self.cqueue = cqueue
Monitor.__init__(self)
@Monitor.synchronized
def execute(self):
self.cqueue.put(1)
sleep(1)
self.cqueue.get()
class TesterThread(Thread):
def __init__(self, tc):
self.tc = tc
Thread.__init__(self)
def run(self):
self.tc.execute()
cq = Queue()
cq.put(1)
tc = TestedClass(cq)
tt = TesterThread(tc)
with Monitor.acquire(tc):
tt.start()
sleep(0.4)
self.assertEqual(cq.qsize(), 1)
def test_monitoring(self):
class TestedClass(Monitor):
def __init__(self, cqueue):
self.cqueue = cqueue
Monitor.__init__(self)
@Monitor.synchronized
def execute(self):
self.cqueue.put(1)
sleep(1)
self.cqueue.get()
class TesterThread(Thread):
def __init__(self, tc):
self.tc = tc
Thread.__init__(self)
def run(self):
self.tc.execute()
q = Queue()
tc = TestedClass(q)
a, b = TesterThread(tc), TesterThread(tc)
a.start(), b.start()
while a.is_alive() or b.is_alive():
sleep(0.1)
self.assertNotEqual(q.qsize(), 2)
def test_monitoring_synchronize_on_attribute(self):
class TestedClass:
def __init__(self, cqueue):
self.cqueue = cqueue
self.monitor = Monitor()
@Monitor.synchronize_on_attribute('monitor')
def execute(self):
self.cqueue.put(1)
sleep(1)
self.cqueue.get()
class TesterThread(Thread):
def __init__(self, tc):
self.tc = tc
Thread.__init__(self)
def run(self):
self.tc.execute()
q = Queue()
tc = TestedClass(q)
a, b = TesterThread(tc), TesterThread(tc)
a.start(), b.start()
while a.is_alive() or b.is_alive():
sleep(0.1)
self.assertNotEqual(q.qsize(), 2)
| bsd-3-clause | 3,409,629,692,295,850,500 | 26.069519 | 56 | 0.467404 | false |
JJMinton/conferenceTimer | file_change_handler.py | 1 | 2937 | import path
import asyncio
from datetime import datetime, timedelta
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from read_schedule import read_schedule
import config
from config import logging
class FileChangeHandler(PatternMatchingEventHandler):
def __init__(self, watch_file, controller_function, args=[], loop=None):
PatternMatchingEventHandler.__init__(self, patterns=[watch_file])
self.controller_function = controller_function
self.args = args
self.loop = asyncio.SelectorEventLoop() if loop is None else loop
self.async_task = None
self.watch_file = watch_file
def process(self, schedule_file_name=None):
if schedule_file_name is None:
schedule_file_name = self.watch_file
logging.debug('FileChangeHnadler.process: Processing {}'.format(schedule_file_name))
schedule = read_schedule(schedule_file_name)
#Stop current run_schedule
if self.async_task is not None:
logging.debug('Stopping previous async_task')
self.async_task.cancel()
asyncio.wait_for(self.async_task, 100, loop=self.loop)
del self.async_task
self.async_task = None
#Start new run_schedule
logging.debug('FileChangeHandler.process: Starting new async_task')
self.async_task = asyncio.ensure_future(self.controller_function(schedule, self.loop, *self.args), loop=self.loop)
logging.debug('FileChangeHandler.process: Return from processing')
return
#ensure immediate return
def on_created(self, event):
logging.info('FileChangeHandler.on_created: File creation detected')
self.process(event.src_path)
def on_modified(self, event):
logging.info('FileChangeHandler.on_modified: File change detected')
self.process(event.src_path)
if __name__=="__main__":
if config.LIGHT_DEBUG:
from light_controls import debug
debug()
from schedule_handler import Schedule_Runner
schedule_runner = Schedule_Runner()
loop = schedule_runner.controller.loop
file_change_handler = FileChangeHandler(config.SCHEDULE_FILE, schedule_runner.run_schedule, loop=loop)
obs = Observer();
obs.schedule(file_change_handler, path.Path(config.SCHEDULE_FILE).abspath().dirname()) #Define what file to watch and how
obs.start() #start watching file
file_change_handler.process() #start schedule running
try:
while True:
#This does nothing except step through the loops (why is this necessary?)
file_change_handler.loop.run_until_complete(asyncio.ensure_future(asyncio.sleep(0.1, loop=file_change_handler.loop), loop=file_change_handler.loop)) #arbitrary sleep time here I think. Could it be forever?
except KeyboardInterrupt:
obs.stop();
#finally:
# obs.join();
| gpl-3.0 | -4,415,477,771,282,434,000 | 39.232877 | 217 | 0.688798 | false |
FNST-OpenStack/cloudkitty | cloudkitty/tenant_fetcher/__init__.py | 1 | 1212 | # -*- coding: utf-8 -*-
# !/usr/bin/env python
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
import abc
from oslo.config import cfg
import six
fetchers_opts = [
cfg.StrOpt('backend',
default='keystone',
help='Driver used to fetch tenant list.')
]
cfg.CONF.register_opts(fetchers_opts, 'tenant_fetcher')
@six.add_metaclass(abc.ABCMeta)
class BaseFetcher(object):
"""CloudKitty tenants fetcher.
Provides Cloudkitty integration with a backend announcing ratable tenants.
"""
@abc.abstractmethod
def get_tenants(self):
"""Get a list a tenants to rate."""
| apache-2.0 | 2,087,861,238,708,588,500 | 27.833333 | 78 | 0.69199 | false |
HPI-SWA-Lab/RSqueak | rsqueakvm/test/test_socket_primitives.py | 1 | 6424 | import py
import time
from rsqueakvm import constants
from rsqueakvm.model.compiled_methods import W_PreSpurCompiledMethod
from rsqueakvm.model.variable import W_BytesObject
from rsqueakvm.primitives import prim_table
from rsqueakvm.primitives.constants import EXTERNAL_CALL
from rsqueakvm.error import PrimitiveFailedError
from rsqueakvm.plugins import socket_plugin as socket
from .util import create_space, copy_to_module, cleanup_module
from .test_primitives import mock
def setup_module():
space = create_space(bootstrap = True)
space.set_system_attribute(constants.SYSTEM_ATTRIBUTE_IMAGE_NAME_INDEX, "IMAGENAME")
wrap = space.w
bootstrap_class = space.bootstrap_class
new_frame = space.make_frame
copy_to_module(locals(), __name__)
def teardown_module():
cleanup_module(__name__)
IMAGENAME = "anImage.image"
def _prim(space, name, module, stack, context = None):
interp, w_frame, argument_count = mock(space, stack, context)
orig_stack = list(w_frame.as_context_get_shadow(space).stack())
prim_meth = W_PreSpurCompiledMethod(space, 0, header=17045052)
prim_meth._primitive = EXTERNAL_CALL
prim_meth.argsize = argument_count - 1
descr = space.wrap_list([space.wrap_string(module), space.wrap_string(name)])
prim_meth.literalatput0(space, 1, descr)
def call():
prim_table[EXTERNAL_CALL](interp, w_frame.as_context_get_shadow(space), argument_count-1, prim_meth)
return w_frame, orig_stack, call
def prim(name, module=None, stack = None, context = None):
if module is None: module = "SocketPlugin"
if stack is None: stack = [space.w_nil]
w_frame, orig_stack, call = _prim(space, name, module, stack, context)
call()
res = w_frame.as_context_get_shadow(space).pop()
s_frame = w_frame.as_context_get_shadow(space)
assert not s_frame.stackdepth() - s_frame.tempsize() # check args are consumed
return res
def prim_fails(name, module, stack):
w_frame, orig_stack, call = _prim(name, module, stack)
with py.test.raises(PrimitiveFailedError):
call()
assert w_frame.as_context_get_shadow(space).stack() == orig_stack
def test_vmdebugging():
assert prim("isRSqueak", "VMDebugging") is space.w_true
def test_resolver_start_lookup():
assert prim("primitiveResolverStartNameLookup", "SocketPlugin",
[space.w_nil, space.wrap_string("google.com")]) == space.w_nil
def test_resolver_lookup_result():
assert prim("primitiveResolverStartNameLookup", "SocketPlugin",
[space.w_nil, space.wrap_string("google.com")]) == space.w_nil
w_res = prim("primitiveResolverNameLookupResult", "SocketPlugin")
assert isinstance(w_res, W_BytesObject)
def test_socket_create():
assert isinstance(prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15]), socket.W_SocketHandle)
assert isinstance(prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 0, 0, 8000, 8000, 13, 14, 15]), socket.W_SocketHandle)
def test_socket_status():
handle = prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15])
assert prim("primitiveSocketConnectionStatus", "SocketPlugin",
[space.w_nil, handle]).value == 0
assert prim("primitiveSocketConnectionStatus", "SocketPlugin",
[space.w_nil, 3200]).value == -1
def test_socket_connect():
handle = prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15])
prim("primitiveResolverStartNameLookup", "SocketPlugin",
[space.w_nil, space.wrap_string("google.com")])
w_host = prim("primitiveResolverNameLookupResult", "SocketPlugin")
assert prim("primitiveSocketConnectToPort", "SocketPlugin",
[space.w_nil, handle, w_host, space.wrap_int(80)])
assert prim("primitiveSocketConnectionStatus", "SocketPlugin",
[space.w_nil, handle]).value == 2
def test_socket_ready():
handle = prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15])
prim("primitiveResolverStartNameLookup", "SocketPlugin",
[space.w_nil, space.wrap_string("google.com")])
w_host = prim("primitiveResolverNameLookupResult", "SocketPlugin")
assert prim("primitiveSocketConnectToPort", "SocketPlugin",
[space.w_nil, handle, w_host, space.wrap_int(80)])
assert prim("primitiveSocketConnectionStatus", "SocketPlugin",
[space.w_nil, handle]).value == 2
time.sleep(0.5)
assert prim("primitiveSocketReceiveDataAvailable", "SocketPlugin",
[space.w_nil, handle]) == space.w_false
_http_get = """
GET / HTTP/1.1
User-Agent: curl/7.37.1
Host: www.google.de
Accept: */*
"""
def test_socket_send_and_read_into():
handle = prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15])
prim("primitiveResolverStartNameLookup", "SocketPlugin",
[space.w_nil, space.wrap_string("google.com")])
w_host = prim("primitiveResolverNameLookupResult", "SocketPlugin")
assert prim("primitiveSocketConnectToPort", "SocketPlugin",
[space.w_nil, handle, w_host, space.wrap_int(80)])
assert prim("primitiveSocketConnectionStatus", "SocketPlugin",
[space.w_nil, handle]).value == 2
assert prim("primitiveSocketSendDataBufCount", "SocketPlugin",
[space.w_nil, handle, space.wrap_string(_http_get),
space.wrap_int(1), space.wrap_int(len(_http_get))]).value == len(_http_get)
time.sleep(0.5)
assert prim("primitiveSocketReceiveDataAvailable", "SocketPlugin",
[space.w_nil, handle]) == space.w_true
w_str = space.wrap_string("_hello")
assert prim("primitiveSocketReceiveDataBufCount", "SocketPlugin",
[space.w_nil, handle, w_str, space.wrap_int(2), space.wrap_int(5)]).value == 5
assert w_str.unwrap_string(None) == "_HTTP/"
def test_socket_destroy():
handle = prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15])
assert prim("primitiveSocketDestroy", "SocketPlugin",
[space.w_nil, handle]).value == -1
| bsd-3-clause | -5,544,249,213,781,126,000 | 43.611111 | 108 | 0.669988 | false |
mlabru/ptracks | view/piloto/dlg_aproximacao_ui.py | 1 | 2854 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './dlg_aproximacao.ui'
#
# Created: Tue Dec 6 11:23:22 2016
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CDlgAproximacao(object):
def setupUi(self, CDlgAproximacao):
CDlgAproximacao.setObjectName(_fromUtf8("CDlgAproximacao"))
CDlgAproximacao.resize(259, 151)
self.verticalLayout_2 = QtGui.QVBoxLayout(CDlgAproximacao)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.gbx_aproximacao = QtGui.QGroupBox(CDlgAproximacao)
self.gbx_aproximacao.setObjectName(_fromUtf8("gbx_aproximacao"))
self.verticalLayout = QtGui.QVBoxLayout(self.gbx_aproximacao)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.cbx_apx = QtGui.QComboBox(self.gbx_aproximacao)
self.cbx_apx.setObjectName(_fromUtf8("cbx_apx"))
self.verticalLayout.addWidget(self.cbx_apx)
self.verticalLayout_2.addWidget(self.gbx_aproximacao)
self.lbl_comando = QtGui.QLabel(CDlgAproximacao)
self.lbl_comando.setStyleSheet(_fromUtf8("background-color:rgb(0, 0, 0);\n"
"color:rgb(0, 190, 0)"))
self.lbl_comando.setObjectName(_fromUtf8("lbl_comando"))
self.verticalLayout_2.addWidget(self.lbl_comando)
self.bbx_aproximacao = QtGui.QDialogButtonBox(CDlgAproximacao)
self.bbx_aproximacao.setOrientation(QtCore.Qt.Horizontal)
self.bbx_aproximacao.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.bbx_aproximacao.setObjectName(_fromUtf8("bbx_aproximacao"))
self.verticalLayout_2.addWidget(self.bbx_aproximacao)
self.retranslateUi(CDlgAproximacao)
QtCore.QObject.connect(self.bbx_aproximacao, QtCore.SIGNAL(_fromUtf8("accepted()")), CDlgAproximacao.accept)
QtCore.QObject.connect(self.bbx_aproximacao, QtCore.SIGNAL(_fromUtf8("rejected()")), CDlgAproximacao.reject)
QtCore.QMetaObject.connectSlotsByName(CDlgAproximacao)
def retranslateUi(self, CDlgAproximacao):
CDlgAproximacao.setWindowTitle(_translate("CDlgAproximacao", "Aproximação", None))
self.gbx_aproximacao.setTitle(_translate("CDlgAproximacao", "Aproximações", None))
self.lbl_comando.setText(_translate("CDlgAproximacao", "APX 1001", None))
| gpl-3.0 | -5,954,425,699,437,925,000 | 46.5 | 116 | 0.72386 | false |
matthieu-meaux/DLLM | examples/Wrapper_Multipoint/test_multipoint_analysis_AoA.py | 1 | 2570 | # -*-mode: python; py-indent-offset: 4; tab-width: 8; coding: iso-8859-1 -*-
# DLLM (non-linear Differentiated Lifting Line Model, open source software)
#
# Copyright (C) 2013-2015 Airbus Group SAS
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# https://github.com/matthieu-meaux/DLLM.git
#
# @author : Matthieu MEAUX
#
from DLLM.DLLMEval.DLLMMP import DLLMMP
import os
from glob import glob
config_dict={}
config_dict['Case.nb_conditions']=3
config_dict['Case.condition_name']='cond'
config_dict['Case.AoA_id_list']=['AoA1','AoA2','AoA3']
# cond1 Operating condition information
config_dict['Case.cond1.OC.Mach']=0.8
config_dict['Case.cond1.OC.AoA']=3.5
config_dict['Case.cond1.OC.altitude']=10000.
# cond2 Operating condition information
config_dict['Case.cond2.OC.Mach']=0.6
config_dict['Case.cond2.OC.AoA']=4.5
config_dict['Case.cond2.OC.altitude']=5000.
# cond3 Operating condition information
config_dict['Case.cond3.OC.Mach']=0.4
config_dict['Case.cond3.OC.AoA']=6.
config_dict['Case.cond3.OC.altitude']=1000.
# Parameterisation configuration
config_dict['Case.param.geom_type']='Broken'
config_dict['Case.param.n_sect']=20
config_dict['Case.param.BCfilename']='input_parameters_AoA.par'
config_dict['Case.param.airfoil.type']='simple'
config_dict['Case.param.airfoil.AoA0']=-2.
config_dict['Case.param.airfoil.Cm0']=-0.1
# DLLM configuration
config_dict['Case.DLLM.type']='Solver'
config_dict['Case.DLLM.method']='inhouse'
config_dict['Case.DLLM.relax_factor']=0.99
config_dict['Case.DLLM.stop_residual']=1e-9
config_dict['Case.DLLM.max_iterations']=100
config_dict['Case.cond1.DLLM.gamma_file_name']='cond1_gamma.dat'
config_dict['Case.cond2.DLLM.gamma_file_name']='cond2_gamma.dat'
config_dict['Case.cond3.DLLM.gamma_file_name']='cond3_gamma.dat'
list_log=glob('*.log')
for log in list_log:
os.remove(log)
MP=DLLMMP('Case')
MP.configure(config_dict)
MP.analysis()
| gpl-2.0 | -4,441,377,056,161,138,000 | 34.694444 | 82 | 0.739689 | false |
sharad/calibre | src/calibre/gui2/dialogs/confirm_delete_location.py | 1 | 1511 | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal [email protected]' \
'2010, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
from functools import partial
from calibre.gui2.dialogs.confirm_delete_location_ui import Ui_Dialog
from PyQt5.Qt import QDialog, Qt, QPixmap, QIcon
class Dialog(QDialog, Ui_Dialog):
def __init__(self, msg, name, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.loc = None
self.msg.setText(msg)
self.name = name
self.buttonBox.setFocus(Qt.OtherFocusReason)
self.button_lib.clicked.connect(partial(self.set_loc, 'lib'))
self.button_device.clicked.connect(partial(self.set_loc, 'dev'))
self.button_both.clicked.connect(partial(self.set_loc, 'both'))
def set_loc(self, loc):
self.loc = loc
self.accept()
def choice(self):
return self.loc
def break_cycles(self):
for x in ('lib', 'device', 'both'):
b = getattr(self, 'button_'+x)
try:
b.clicked.disconnect()
except:
pass
def confirm_location(msg, name, parent=None, pixmap='dialog_warning.png'):
d = Dialog(msg, name, parent)
d.label.setPixmap(QPixmap(I(pixmap)))
d.setWindowIcon(QIcon(I(pixmap)))
d.resize(d.sizeHint())
ret = d.exec_()
d.break_cycles()
if ret == d.Accepted:
return d.choice()
return None
| gpl-3.0 | -5,930,206,761,042,180,000 | 28.627451 | 74 | 0.606883 | false |
neiljdo/readysaster-icannhas-web | readysaster-icannhas-web/users/views.py | 1 | 2241 | # -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from .forms import UserForm
# Import the customized User model
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserForm
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class FetchFloodMapView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
def get_context_data(self, **kwargs):
context_data = super(FetchFloodMapView, self).get_context_data(**kwargs)
# fetch flood maps using NOAH API
municipality = self.object.lgu.municipality
floodmaps = municipality.get_floodmaps()
# add newly fetched floodmaps to context
context_data.update({
'floodmaps': floodmaps
})
return context_data
| bsd-3-clause | -6,875,853,870,313,985,000 | 28.103896 | 80 | 0.707274 | false |
SKIRT/PTS | magic/plot/imagegrid.py | 1 | 106384 | # -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.plot.imagegrid Contains the ImageGridPlotter classes.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import aplpy
from abc import ABCMeta, abstractproperty
import matplotlib.pyplot as plt
from matplotlib import cm
from collections import OrderedDict, defaultdict
# Import the relevant PTS classes and modules
from ..tools.plotting import get_vmin_vmax
from ...core.tools import filesystem as fs
from ..core.frame import Frame
from ...core.basics.log import log
from ...core.basics.configurable import Configurable
from ...core.tools.utils import lazyproperty, memoize_method
from ...core.tools import sequences
from ..core.image import Image
from ...core.basics.distribution import Distribution
from ...core.basics.plot import MPLFigure
from ...core.basics.composite import SimplePropertyComposite
from ...core.basics.plot import normal_colormaps
from ..core.list import uniformize
from ...core.tools import numbers
from ...core.tools import types
# ------------------------------------------------------------------------------
light_theme = "light"
dark_theme = "dark"
themes = [light_theme, dark_theme]
# ------------------------------------------------------------------------------
default_cmap = "inferno"
default_residual_cmap = 'RdBu'
default_absolute_residual_cmap = "OrRd"
# ------------------------------------------------------------------------------
# Initialize dictionary for light theme settings
light_theme_settings = OrderedDict()
# Set parameters
light_theme_settings['axes.facecolor'] = 'white'
light_theme_settings['savefig.facecolor'] = 'white'
light_theme_settings['axes.edgecolor'] = 'black'
light_theme_settings['xtick.color'] = 'black'
light_theme_settings['ytick.color'] = 'black'
light_theme_settings["axes.labelcolor"] = 'black'
light_theme_settings["text.color"] = 'black'
# light_theme_settings["axes.titlecolor"]='black'
# ------------------------------------------------------------------------------
# Initialize dictionary for dark theme settings
dark_theme_settings = OrderedDict()
# Set parameters
dark_theme_settings['axes.facecolor'] = 'black'
dark_theme_settings['savefig.facecolor'] = 'black'
dark_theme_settings['axes.edgecolor'] = 'white'
dark_theme_settings['xtick.color'] = 'white'
dark_theme_settings['ytick.color'] = 'white'
dark_theme_settings["axes.labelcolor"] ='white'
dark_theme_settings["text.color"] = 'white'
#plt.rcParams["axes.titlecolor"] = 'white'
# ------------------------------------------------------------------------------
class ImagePlotSettings(SimplePropertyComposite):
"""
This class ...
"""
__metaclass__ = ABCMeta
# ------------------------------------------------------------------------------
def __init__(self, **kwargs):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ImagePlotSettings, self).__init__()
# Define properties
self.add_property("label", "string", "label for the image", None)
self.add_property("vmin", "real", "plotting minimum")
self.add_property("vmax", "real", "plotting maximum")
self.add_boolean_property("soft_vmin", "soft vmin", False) #, None) # use None as default to use plotter config if not defined
self.add_boolean_property("soft_vmax", "soft vmax", False) #, None) # use None as default to use plotter config if not defined
self.add_property("cmap", "string", "colormap", choices=normal_colormaps)
# ------------------------------------------------------------------------------
class ImageGridPlotter(Configurable):
"""
This class ...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(ImageGridPlotter, self).__init__(*args, **kwargs)
# The figure
self.figure = None
# The grid
self.grid = None
# The plots
self.plots = None
# The settings
self.settings = defaultdict(self.image_settings_class)
# -----------------------------------------------------------------
@abstractproperty
def image_settings_class(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@abstractproperty
def names(self):
"""
This function ...
:return:
"""
pass
# ------------------------------------------------------------------------------
@property
def light(self):
return self.config.theme == light_theme
# -----------------------------------------------------------------
@property
def dark(self):
return self.config.theme == dark_theme
# -----------------------------------------------------------------
@lazyproperty
def text_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "black"
# Dark theme
elif self.dark: return "white"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@lazyproperty
def frame_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "black"
# Dark theme
elif self.dark: return "white"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@lazyproperty
def background_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "white"
# Dark theme
elif self.dark: return "black"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@abstractproperty
def first_frame(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@lazyproperty
def center(self):
"""
This function ...
:return:
"""
# Center coordinate is defined
if self.config.center is not None: return self.config.center
# Not defined?
return self.first_frame.center_sky
# -----------------------------------------------------------------
@property
def ra_center(self):
return self.center.ra
# ------------------------------------------------------------------------------
@property
def dec_center(self):
return self.center.dec
# ------------------------------------------------------------------------------
@lazyproperty
def ra_center_deg(self):
return self.ra_center.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def dec_center_deg(self):
return self.dec_center.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def spacing_deg(self):
return self.config.spacing.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def radius_deg(self):
return self.config.radius.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def colormap(self):
return cm.get_cmap(self.config.cmap)
# -----------------------------------------------------------------
@lazyproperty
def nan_color(self):
if self.config.nan_color is not None: return self.config.nan_color
else: return self.colormap(0)
# -----------------------------------------------------------------
@lazyproperty
def theme_settings(self):
if self.light: return light_theme_settings
elif self.dark: return dark_theme_settings
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ImageGridPlotter, self).setup(**kwargs)
# plt.rcParams.update({'font.size':20})
plt.rcParams["axes.labelsize"] = self.config.axes_label_size # 16 #default 20
plt.rcParams["xtick.labelsize"] = self.config.ticks_label_size # 10 #default 16
plt.rcParams["ytick.labelsize"] = self.config.ticks_label_size # 10 #default 16
plt.rcParams["legend.fontsize"] = self.config.legend_fontsize # 10 #default 14
plt.rcParams["legend.markerscale"] = self.config.legend_markers_cale
plt.rcParams["lines.markersize"] = self.config.lines_marker_size # 4 #default 4
plt.rcParams["axes.linewidth"] = self.config.linewidth
# Set theme-specific settings
for label in self.theme_settings: plt.rcParams[label] = self.theme_settings[label]
# plt.rcParams['xtick.major.size'] = 5
# plt.rcParams['xtick.major.width'] = 2
# plt.rcParams['ytick.major.size'] = 5
# plt.rcParams['ytick.major.width'] = 2
# ------------------------------------------------------------------------------
def plot_images(images, **kwargs):
"""
This function ...
:param images:
:param kwargs:
:return:
"""
# Create the plotter
plotter = StandardImageGridPlotter(**kwargs)
# Run the plotter
plotter.run(images=images)
# -----------------------------------------------------------------
class StandardImagePlotSettings(ImagePlotSettings):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
This function ...
:param kwargs:
"""
# Call the constructor of the base class
super(StandardImagePlotSettings, self).__init__(**kwargs)
# Set properties
self.set_properties(kwargs)
# -----------------------------------------------------------------
class StandardImageGridPlotter(ImageGridPlotter):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
This function ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(StandardImageGridPlotter, self).__init__(*args, **kwargs)
# The image frames
self.frames = OrderedDict()
# The error frames
self.errors = OrderedDict()
# The masks
self.masks = OrderedDict()
# The regions
self.regions = OrderedDict()
# ------------------------------------------------------------------------------
@property
def image_settings_class(self):
"""
This function ...
:return:
"""
return StandardImagePlotSettings
# ------------------------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Show stuff
if self.config.show: self.show()
# Write
self.write()
# Plot
self.plot()
# ------------------------------------------------------------------------------
@property
def names(self):
"""
This function ...
:return:
"""
return self.frames.keys()
# ------------------------------------------------------------------------------
def add_image(self, name, image, errors=None, mask=None, regions=None, replace=False, settings=None):
"""
This function ...
:param name:
:param image:
:param errors:
:param mask:
:param regions:
:param replace:
:param settings:
:return:
"""
# Check if name already exists
if not replace and name in self.names: raise ValueError("Already an image with name '" + name + "' added")
# Image is passed
if isinstance(image, Image):
# Get the frame
frame = image.primary
# Get errors?
# Get mask?
# Get regions?
# Frame is passed
elif isinstance(image, Frame): frame = image
# Invalid
else: raise ValueError("Invalid value for 'image': must be Frame or Image")
# Add frame
self.frames[name] = frame
# Add errors
if errors is not None: self.errors[name] = errors
# Add regions
if regions is not None: self.regions[name] = regions
# Add mask
if mask is not None: self.masks[name] = mask
# Set settings
if settings is not None: self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing ...")
# ------------------------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Images
if self.config.write_images: self.write_images()
# Frames
if self.config.write_frames: self.write_frames()
# Masks
if self.config.write_masks: self.write_masks()
# Regions
if self.config.write_regions: self.write_regions()
# ------------------------------------------------------------------------------
def write_images(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_frames(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_masks(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_regions(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# ------------------------------------------------------------------------------
images_name = "images"
observations_name = "observations"
models_name = "models"
errors_name = "errors"
model_errors_name = "model_errors"
residuals_name = "residuals"
distributions_name = "distributions"
settings_name = "settings"
# ------------------------------------------------------------------------------
observation_name = "observation"
model_name = "model"
observation_or_model = [observation_name, model_name]
# ------------------------------------------------------------------------------
horizontal_mode, vertical_mode = "horizontal", "vertical"
default_direction = vertical_mode
directions = [horizontal_mode, vertical_mode]
# ------------------------------------------------------------------------------
class ResidualImagePlotSettings(ImagePlotSettings):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ResidualImagePlotSettings, self).__init__()
# Define properties
self.add_property("residual_amplitude", "percentage", "amplitude of the residual plots")
self.add_boolean_property("soft_residual_amplitude", "soft residual amplitude", False) #, None) # use None as default to use plotter config if not defined
self.add_property("residual_cmap", "string", "colormap for the residual plots") # no choices because can be absolute or not
# Set properties
self.set_properties(kwargs)
# ------------------------------------------------------------------------------
def plot_residuals(observations, models, **kwargs):
"""
This function ...
:param observations:
:param models:
:param kwargs:
:return:
"""
# Create the plotter
plotter = ResidualImageGridPlotter(**kwargs)
# Run the plotter
plotter.run(observations=observations, models=models)
# -----------------------------------------------------------------
class ResidualImageGridPlotter(ImageGridPlotter):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(ResidualImageGridPlotter, self).__init__(*args, **kwargs)
# The image frames
self.observations = OrderedDict()
self.errors = OrderedDict()
self.models = OrderedDict()
self.model_errors = OrderedDict()
self.residuals = OrderedDict()
# The residual distributions
self.distributions = OrderedDict()
# ------------------------------------------------------------------------------
@property
def image_settings_class(self):
return ResidualImagePlotSettings
# ------------------------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Create the residual frames
self.create_residuals()
# Create the residual distributions
self.create_distributions()
# Show stuff
if self.config.show: self.show()
# Write
self.write()
# Plot
self.plot()
# ------------------------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ResidualImageGridPlotter, self).setup(**kwargs)
# Load the images
if kwargs.get(images_name, None) is not None: self.add_images(kwargs.pop(images_name))
if kwargs.get(observations_name, None) is not None: self.add_observations(kwargs.pop(observations_name))
if kwargs.get(models_name, None) is not None: self.add_models(kwargs.pop(models_name))
if kwargs.get(errors_name, None) is not None: self.add_error_maps(kwargs.pop(errors_name))
if kwargs.get(residuals_name, None) is not None: self.add_residual_maps(kwargs.pop(residuals_name))
# Nothing added
if self.config.from_directory is not None: self.load_from_directory(self.config.from_directory)
elif not self.has_images: self.load_from_directory(self.config.path)
# Initialize the figure
self.initialize_figure()
# ------------------------------------------------------------------------------
@property
def figsize(self):
return (15,10)
# ------------------------------------------------------------------------------
@property
def horizontal(self):
return self.config.direction == horizontal_mode
# ------------------------------------------------------------------------------
@property
def vertical(self):
return self.config.direction == vertical_mode
# ------------------------------------------------------------------------------
@lazyproperty
def npanels(self):
if self.config.distributions: return 4 # observation, model, residual, distribution
else: return 3 # observation, model, residual
# ------------------------------------------------------------------------------
@lazyproperty
def nrows(self):
if self.horizontal: return self.npanels
elif self.vertical: return self.nimages
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
@lazyproperty
def ncolumns(self):
if self.horizontal: return self.nimages
elif self.vertical: return self.npanels
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
@property
def share_x(self):
return True
# ------------------------------------------------------------------------------
@property
def share_y(self):
return True
# ------------------------------------------------------------------------------
def initialize_figure(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Initializing the figure with size " + str(self.figsize) + " ...")
# Create the plot
self.figure = MPLFigure(size=self.figsize)
# Create plots
#self.plots = self.figure.create_grid(self.nrows, self.ncolumns, sharex=self.share_x, sharey=self.share_y)
# Create grid
self.grid = self.figure.create_gridspec(self.nrows, self.ncolumns, hspace=0.0, wspace=0.0)
# Initialize structure to contain the plots
#print("NCOLUMNS", self.ncolumns)
#print("NROWS", self.nrows)
self.plots = [[None for i in range(self.ncolumns)] for j in range(self.nrows)]
# ------------------------------------------------------------------------------
@property
def all_names(self):
return sequences.combine_unique(self.observation_names, self.model_names, self.errors_names, self.residuals_names)
# ------------------------------------------------------------------------------
@property
def observation_names(self):
return self.observations.keys()
# ------------------------------------------------------------------------------
def has_observation(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.observation_names
# ------------------------------------------------------------------------------
@property
def model_names(self):
return self.models.keys()
# ------------------------------------------------------------------------------
def has_model(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.model_names
# ------------------------------------------------------------------------------
@property
def errors_names(self):
return self.errors.keys()
# ------------------------------------------------------------------------------
def has_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.errors
# ------------------------------------------------------------------------------
@property
def model_errors_names(self):
return self.model_errors.keys()
# ------------------------------------------------------------------------------
def has_model_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.model_errors
# ------------------------------------------------------------------------------
@property
def residuals_names(self):
return self.residuals.keys()
# ------------------------------------------------------------------------------
def has_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.residuals
# ------------------------------------------------------------------------------
@property
def distribution_names(self):
return self.distributions.keys()
# ------------------------------------------------------------------------------
def has_distribution(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.distributions
# ------------------------------------------------------------------------------
@property
def settings_names(self):
return self.settings.keys()
# ------------------------------------------------------------------------------
def has_settings(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.settings_names
# ------------------------------------------------------------------------------
@property
def names(self):
return self.observation_names
# ------------------------------------------------------------------------------
@property
def first_name(self):
return self.names[0]
# ------------------------------------------------------------------------------
@property
def first_observation(self):
return self.get_observation(self.first_name)
# ------------------------------------------------------------------------------
@property
def first_frame(self):
return self.first_observation
# ------------------------------------------------------------------------------
@property
def nimages(self):
return len(self.names)
# ------------------------------------------------------------------------------
@property
def has_images(self):
return self.nimages > 0
# ------------------------------------------------------------------------------
def add_image(self, name, observation, model=None, errors=None, model_errors=None, residuals=None, replace=False,
settings=None):
"""
This function ...
:param name:
:param observation:
:param model:
:param errors:
:param model_errors:
:param residuals:
:param replace:
:param settings:
:return:
"""
# Check if name already exists
if not replace and name in self.names: raise ValueError("Already an image with name '" + name + "' added")
# Check type of the image
if isinstance(observation, Image):
# Get observation frame
if observation_name in observation.frame_names: observation = observation.frames[observation_name]
else: observation = observation.primary
# Get model frame
if model_name in observation.frame_names:
if model is not None: raise ValueError("Cannot pass model frame if image contains model frame")
model = observation.frames[model_name]
# Get errors frame
if errors_name in observation.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = observation.frames[errors_name]
# Get model errors frame
if model_errors_name in observation.frame_names:
if model_errors is not None: raise ValueError("Cannot pass model error map if image contains model error map")
model_errors = observation.frames[model_errors_name]
# Get residuals frame
if residuals_name in observation.frame_names:
if residuals is not None: raise ValueError("Cannot pass residual map if image contains residual map")
residuals = observation.frames[residuals_name]
# Check the type of the model image
if model is not None and isinstance(model, Image):
# Get the model frame
if model_name in model.frame_names: model = model.frames[model_name]
else: model = model.primary
# Get the model errors frame
if model_errors_name in model.frame_names:
if errors_name in model.frame_names: raise ValueError("Model image contains both 'errors' and 'model_errors' frame")
if model_errors is not None: raise ValueError("Cannot pass model error map if model image contains model error map")
model_errors = model.frames[model_errors_name]
elif errors_name in model.frame_names:
if model_errors is not None: raise ValueError("Cannot pass model error map if model image contains error map")
model_errors = model.frames[errors_name]
# Add observation
self.observations[name] = observation
# Add model
if model is not None: self.models[name] = model
# Add errors
if errors is not None: self.errors[name] = errors
# Add model errors
if model_errors is not None: self.model_errors[name] = model_errors
# Add residuals
if residuals is not None: self.residuals[name] = residuals
# Set settings
if settings is not None: self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def add_observation(self, name, frame, errors=None):
"""
This function ...
:param name:
:param frame:
:param errors:
:return:
"""
# Check the type of the image
if isinstance(frame, Image):
# Get observation frame
if observation_name in frame.frame_names: frame = frame.frames[observation_name]
else: frame = frame.primary
# Get error map
if errors_name in frame.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[errors_name]
# Check whether there are no other frames
if sequences.contains_more(frame.frame_names, ["primary", observation_name, errors_name]): raise ValueError("Observation image contains too many frames")
# Add observation frame
self.observations[name] = frame
# Add error map
if errors is not None: self.errors[name] = errors
# ------------------------------------------------------------------------------
def add_model(self, name, frame, errors=None):
"""
This function ...
:param name:
:param frame:
:param errors:
:return:
"""
# Check the type of the image
if isinstance(frame, Image):
# Get model frame
if model_name in frame.frame_names: frame = frame.frames[model_name]
else: frame = frame.primary
# Get error map
if errors_name in frame.frame_names:
if model_errors_name in frame.frame_names: raise ValueError("Model image contains both 'errors' and 'model_errors' frame")
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[errors_name]
elif model_errors_name in frame.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[model_errors_name]
# Check whether there are no other frames
if sequences.contains_more(frame.frame_names, ["primary", model_name, errors_name, model_errors_name]): raise ValueError("Model image contains too many frames")
# Add model frame
self.models[name] = frame
# Add error map
if errors is not None: self.model_errors[name] = errors
# ------------------------------------------------------------------------------
def add_errors(self, name, frame):
"""
This function ...
:param name:
:param frame:
:return:
"""
# Add
self.errors[name] = frame
# ------------------------------------------------------------------------------
def add_model_errors(self, name, frame):
"""
Thisn function ...
:param name:
:param frame:
:return:
"""
# Add
self.model_errors[name] = frame
# ------------------------------------------------------------------------------
def add_residuals(self, name, frame):
"""
This function ...
:param name:
:param frame:
:return:
"""
# Add
self.residuals[name] = frame
# ------------------------------------------------------------------------------
def add_distribution(self, name, distribution):
"""
This function ...
:param name:
:param distribution:
:return:
"""
# Add
self.distributions[name] = distribution
# -----------------------------------------------------------------
def add_settings(self, name, **settings):
"""
This function ...
:param name:
:param settings:
:return:
"""
# Set settings
self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def set_settings(self, name, settings):
"""
This function ...
:param name:
:param settings:
:return:
"""
# Set settings
self.settings[name] = settings
# ------------------------------------------------------------------------------
def set_setting(self, name, setting_name, value):
"""
This function ...
:param name:
:param setting_name:
:param value:
:return:
"""
# Set
self.settings[name][setting_name] = value
# ------------------------------------------------------------------------------
def add_images(self, images):
"""
This function ...
:param images:
:return:
"""
# Debugging
log.debug("Adding images ...")
# Loop over the images
for name in images:
# Get the image
image = images[name]
# Add
self.add_image(name, image)
# ------------------------------------------------------------------------------
def add_observations(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding observations ...")
# Loop over the frames
for name in frames:
# Get the frames
frame = frames[name]
# Add
self.add_observation(name, frame)
# ------------------------------------------------------------------------------
def add_models(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding models ...")
# Loop over the frames
for name in frames:
# Get the frames
frame = frames[name]
# Add
self.add_model(name, frame)
# ------------------------------------------------------------------------------
def add_error_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding error maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_errors(name, frame)
# ------------------------------------------------------------------------------
def add_model_error_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding model error maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_model_errors(name, frame)
# ------------------------------------------------------------------------------
def add_residual_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding residual maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_residuals(name, frame)
# ------------------------------------------------------------------------------
def load_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Are there FITS files in the directory?
if fs.has_files_in_path(path, extension="fits"): self.load_images_from_directory(path)
# Are there subdirectories?
elif fs.has_directories_in_path(path):
# Determine paths
images_path = fs.join(path, images_name)
observations_path = fs.join(path, observations_name)
models_path = fs.join(path, models_name)
residuals_path = fs.join(path, residuals_name)
settings_path = fs.join(path, settings_name)
# Load observations
if fs.is_directory(images_path): self.load_images_from_directory(path)
if fs.is_directory(observations_path): self.load_observations_from_directory(path)
if fs.is_directory(models_path): self.load_models_from_directory(path)
if fs.is_directory(residuals_path): self.load_residuals_from_directory(path)
if fs.is_directory(settings_path): self.load_settings_from_directory(path)
# No FITS files nor subdirectories
else: raise IOError("No image files nor subdirectories found in '" + path + "'")
# ------------------------------------------------------------------------------
def load_images_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading image files from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading '" + name + "' image ...")
# Load the image
image = Image.from_file(filepath, always_call_first_primary=False)
# Add the image
self.add_image(name, image)
# ------------------------------------------------------------------------------
def load_observations_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading observed image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' observed image ...")
# Get header
#header = get_header(filepath)
# Get the filter
#fltr = get_filter(name, header=header)
# Check whether the filter is in the list of filters to be plotted
#if fltr not in config.filters: continue
# Get the index for this filter
#index = config.filters.index(fltr)
# Load the image
#frame = Frame.from_file(filepath)
image = Image.from_file(filepath, always_call_first_primary=False)
# Replace zeroes and negatives
image.primary.replace_zeroes_by_nans()
image.primary.replace_negatives_by_nans()
# Add the image
self.add_observation(name, image)
# ------------------------------------------------------------------------------
def load_models_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading model image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "name"]):
# Debugging
log.debug("Loading the '" + name + "' model image ...")
# Load the image
image = Image.from_file(filepath, always_call_first_primary=False)
# Replace zeroes and negatives
image.primary.replace_zeroes_by_nans()
image.primary.replace_negatives_by_nans()
# Add the image
self.add_model(name, image)
# ------------------------------------------------------------------------------
def load_residuals_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading residual image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' residual map ...")
# Load the frame
frame = Frame.from_file(filepath)
# Add the map
self.add_residuals(name, frame)
# ------------------------------------------------------------------------------
def load_settings_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading plotting settings from '" + path + "' ...")
# Loop over the dat files
for name, filepath in fs.files_in_path(path, extension="dat", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' settings ...")
# Load the settings
settings = ImagePlotSettings.from_file(filepath)
# Set the settings
self.set_settings(name, settings)
# ------------------------------------------------------------------------------
def get_observation_or_model(self, name):
"""
This function ...
:param name:
:return:
"""
if self.has_observation(name): return self.get_observation(name)
elif self.has_model(name): return self.get_model(name)
else: raise ValueError("Doesn't have observation or model for name '" + name + "'")
# ------------------------------------------------------------------------------
def get_filter(self, name):
"""
This function ...
:param name:
:return:
"""
return self.get_observation_or_model(name).filter
# ------------------------------------------------------------------------------
def get_wcs(self, name):
"""
Thisf unction ...
:param name:
:return:
"""
return self.get_observation_or_model(name).wcs
# ------------------------------------------------------------------------------
def calculate_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
# Get the frames
#observation = self.observations[name]
#model = self.models[name]
# Uniformize
observation, model = uniformize(self.observations[name], self.models[name])
# Error-weighed residuals
if self.config.weighed:
if self.config.weighing_reference == observation_name:
if not self.has_errors(name): raise ValueError("No errors for the '" + name + "' image")
errors = self.get_errors(name)
elif self.config.weighing_reference == model_name:
if not self.has_model_errors(name): raise ValueError("No model errors for the '" + name + "' image")
errors = self.get_model_errors(name)
else: raise ValueError("Invalid value for 'weighing_reference'")
# Calculate
res = Frame((model - observation) / errors, wcs=observation.wcs)
# Relative residuals
elif self.config.relative: res = Frame((model - observation) / observation, wcs=observation.wcs)
# Absolute residuals
else: res = Frame(model - observation, wcs=observation.wcs)
# Take absolute values?
if self.config.absolute: res = res.absolute
# Return the residual
return res
# ------------------------------------------------------------------------------
def create_residuals(self):
"""
This function ...
:param self:
:return:
"""
# Inform the user
log.info("Creating the residual frames ...")
# Loop over the observed images
for name in self.names:
# Checks
if not self.has_model(name): continue
if self.has_residuals(name): continue
# Debugging
log.debug("Creating residual frame for the '" + name + "' image ...")
# Create
res = self.calculate_residuals(name)
# Add the residuals frame
self.residuals[name] = res
# ------------------------------------------------------------------------------
def create_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the residual distributions ...")
# Loop over the residual maps
for name in self.residuals_names:
# Checks
if self.has_distribution(name): continue
# Debugging
log.debug("Creating distribution for the '" + name + "' residuals ...")
# Get the residual map
residuals = self.get_residuals(name)
# Create the distribution
distribution = Distribution.from_data("Residual", residuals, sigma_clip=self.config.sigma_clip_distributions, sigma_level=self.config.sigma_clip_level)
# Add the distribution
self.distributions[name] = distribution
# ------------------------------------------------------------------------------
def get_observation(self, name):
"""
This function ...
:param name:
:return:
"""
return self.observations[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_observation_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create image
image = Image(name=name)
# Add observation frame
image.add_frame(self.get_observation(name), observation_name)
# Add error map
if self.has_errors(name): image.add_frame(self.get_errors(name), errors_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_model(self, name):
"""
This function ...
:param name:
:return:
"""
return self.models[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_model_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create image
image = Image(name=name)
# Add model frame
image.add_frame(self.get_model(name), model_name)
# Add error map
if self.has_model_errors(name): image.add_frame(self.get_model_errors(name), errors_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return self.errors[name]
# ------------------------------------------------------------------------------
def get_model_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return self.model_errors[name]
# ------------------------------------------------------------------------------
def get_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
return self.residuals[name]
# ------------------------------------------------------------------------------
def get_distribution(self, name):
"""
This function ...
:param name:
:return:
"""
return self.distributions[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create the image
image = Image(name=name)
# Add the observation
if self.has_observation(name): image.add_frame(self.get_observation(name), observation_name)
# Add the model
if self.has_model(name): image.add_frame(self.get_model(name), model_name)
# Add the errors
if self.has_errors(name): image.add_frame(self.get_errors(name), errors_name)
# Add the model errors
if self.has_model_errors(name): image.add_frame(self.get_model_errors(name), model_errors_name)
# Add the residuals
if self.has_residuals(name): image.add_frame(self.get_residuals(name), residuals_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_settings(self, name):
"""
This function ...
:param name:
:return:
"""
return self.settings[name]
# ------------------------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing ...")
# ------------------------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write observations
if self.config.write_observations: self.write_observations()
# Write models
if self.config.write_models: self.write_models()
# Write residual frames
if self.config.write_residuals: self.write_residuals()
# Write the images
if self.config.write_images: self.write_images()
# Write the distributions
if self.config.write_distributions: self.write_distributions()
# Write the settings
if self.config.write_settings: self.write_settings()
# ------------------------------------------------------------------------------
@lazyproperty
def images_path(self):
return self.output_path_directory(images_name)
# ------------------------------------------------------------------------------
@lazyproperty
def observations_path(self):
return self.output_path_directory(observations_name)
# ------------------------------------------------------------------------------
@lazyproperty
def models_path(self):
return self.output_path_directory(models_name)
# ------------------------------------------------------------------------------
@lazyproperty
def residuals_path(self):
return self.output_path_directory(residuals_name)
# ------------------------------------------------------------------------------
@lazyproperty
def distributions_path(self):
return self.output_path_directory(distributions_name)
# ------------------------------------------------------------------------------
@lazyproperty
def settings_path(self):
return self.output_path_directory(settings_name)
# ------------------------------------------------------------------------------
def write_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the images ...")
# Loop over all images
for name in self.all_names:
# Determine path
path = fs.join(self.images_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' image ...")
# Get image
image = self.get_image(name)
# Save the image
image.saveto(path)
# ------------------------------------------------------------------------------
def write_observations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the observed frames ...")
# Loop over the observed images
for name in self.observation_names:
# Determine the path
path = fs.join(self.observations_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' observed image ...")
# Get the frame
frame = self.get_observation_image(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_models(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the model frames ...")
# Loop over the model images
for name in self.model_names:
# Determine the path
path = fs.join(self.models_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' model image ...")
# Get the frame
frame = self.get_model_image(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_residuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the residual frames ...")
# Loop over the residual maps
for name in self.residuals_names:
# Determine the path
path = fs.join(self.residuals_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' residual frame ...")
# Get the residual map
frame = self.get_residuals(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the residual distributions ...")
# Loop over the distributions
for name in self.distribution_names:
# Determine the path
path = fs.join(self.distributions_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' residual distribution ...")
# Get the distribution
distribution = self.get_distribution(name)
# Save
distribution.saveto(path)
# ------------------------------------------------------------------------------
def write_settings(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the plotting settings ...")
# Loop over the settings
for name in self.settings_names:
# Determine the path
path = fs.join(self.settings_path, name + ".dat")
# Debugging
log.debug("Writing the '" + name + "' plotting settings ...")
# Get the settings
settings = self.get_settings(name)
# Save
settings.saveto(path)
# ------------------------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Plot observations
self.plot_observations()
# Plot models
self.plot_models()
# Plot residuals
self.plot_residuals()
# Plot distributions
if self.config.distributions: self.plot_distributions()
# Finish the plot
self.finish()
# ------------------------------------------------------------------------------
def get_label(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings?
if not self.has_settings(name): return name
# Get the settings
settings = self.get_settings(name)
# Return
if settings.label is not None: return settings.label
else: return name
# ------------------------------------------------------------------------------
def get_colormap(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings?
if not self.has_settings(name): return self.config.cmap
# Get the settings
settings = self.get_settings(name)
# Return
if settings.cmap is not None: return settings.cmap
else: return self.config.cmap
# ------------------------------------------------------------------------------
@property
def config_residual_cmap(self):
"""
This function ...
:return:
"""
if self.config.absolute: return self.config.absolute_residual_cmap
else: return self.config.residual_cmap
# ------------------------------------------------------------------------------
def get_residual_colormap(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config_residual_cmap
# Get the settings
settings = self.get_settings(name)
# Return
if settings.residual_cmap is not None: return settings.residual_cmap
else: return self.config_residual_cmap
# ------------------------------------------------------------------------------
def get_limits(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config.vmin, self.config.vmax, False, False
# Get the settings
settings = self.get_settings(name)
# Get limits
vmin = settings.vmin if settings.vmin is not None else self.config.vmin
vmax = settings.vmax if settings.vmax is not None else self.config.vmax
# Get flags
soft_vmin = settings.soft_vmin if settings.vmin is not None else False # don't use True flag if vmin is not set in settings
soft_vmax = settings.soft_vmax if settings.vmax is not None else False # don't use True flag if vmax is not set in settings
# Return
return vmin, vmax, soft_vmin, soft_vmax
# ------------------------------------------------------------------------------
def get_residual_amplitude(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config.residual_amplitude, False
# Get the settings
settings = self.get_settings(name)
# Get amplitude
amplitude = settings.residual_amplitude if settings.residual_amplitude is not None else self.config.residual_amplitude
# Get flag
soft_amplitude = settings.soft_residual_amplitude if settings.residual_amplitude is not None else False # don't use True flag if amplitude is not set in settings
# Return
return amplitude, soft_amplitude
# ------------------------------------------------------------------------------
def set_limits(self, name, vmin, vmax, soft_vmin=None, soft_vmax=None):
"""
This function ...
:param name:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Set vmin and vmax
self.add_settings(name, vmin=vmin, vmax=vmax)
# Set flags
if soft_vmin is not None: self.set_setting(name, "soft_vmin", soft_vmin)
if soft_vmax is not None: self.set_setting(name, "soft_vmax", soft_vmax)
# ------------------------------------------------------------------------------
def get_vmin_vmax(self, frame, vmin=None, vmax=None, soft_vmin=False, soft_vmax=False):
"""
This function ...
:param frame:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Defined?
has_vmin = vmin is not None
has_vmax = vmax is not None
# Vmin and vmax don't have to be calculated
if has_vmin and has_vmax and (not soft_vmin) and (not soft_vmax): return vmin, vmax
# Calculate vmin and or vmax
return get_vmin_vmax(frame.data, interval=self.config.interval, zmin=vmin, zmax=vmax, soft_zmin=soft_vmin, soft_zmax=soft_vmax)
# ------------------------------------------------------------------------------
def get_residual_vmin_vmax(self, frame, amplitude=None, soft_amplitude=False):
"""
This function ...
:param frame:
:param amplitude:
:param soft_amplitude:
:return:
"""
# Defined?
if amplitude is not None and not soft_amplitude:
if self.config.absolute: return 0., amplitude
else: return -amplitude, amplitude
# Calculate vmin and or vmax
if self.config.absolute: return get_vmin_vmax(frame.data, interval=self.config.residual_interval, zmin=0, zmax=amplitude, soft_zmin=False, soft_zmax=soft_amplitude)
else:
zmin = -amplitude if amplitude is not None else None
zmax = amplitude
return get_vmin_vmax(frame.data, interval=self.config.residual_interval, zmin=zmin, zmax=zmax, soft_zmin=soft_amplitude, soft_zmax=soft_amplitude, around_zero=True, symmetric=True)
# ------------------------------------------------------------------------------
def get_observation_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 0
if self.horizontal: return 0, index
# Vertical
#elif self.vertical: return 0, index
elif self.vertical: return index, 0
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_model_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 1
if self.horizontal: return 1, index
# Vertical
#elif self.vertical: return 1, index
elif self.vertical: return index, 1
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_residuals_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 2
if self.horizontal: return 2, index
# Vertical
#elif self.vertical: return 2, index
elif self.vertical: return index, 2
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_distribution_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 3
if self.horizontal: return 3, index
# Vertical
#elif self.vertical: return 3, index
elif self.vertical: return index, 3
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_observation_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_observation_row_col(index)
#print(self.grid.get_geometry())
#print(self.grid.get_height_ratios())
# Return the grid spec
#if return_row_col: return self.grid[row, col], row, col
#else: return self.grid[row, col]
#if return_row_col: return self.grid[index], row, col
#else: return self.grid[index]
# No, no, this was a mistake with 'get_observation_row_col'
#if return_row_col: return self.grid[col, row], row, col # WHY?
#else: return self.grid[col, row] # WHY?
# This was right after all
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_model_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_model_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_residuals_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_residuals_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_distribution_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_distribution_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def create_observation_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_observation_spec(index, return_row_col=True)
#print(spec)
#print("ROW", row, "COL", col)
# Get coordinates of the subplot
#points = spec.get_position(self.figure.figure).get_points()
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
# needs [xmin, ymin, dx, dy]
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def create_model_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_model_spec(index, return_row_col=True)
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def create_residuals_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_residuals_spec(index, return_row_col=True)
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def _plot_observation(self, index, frame, cmap, label=None, vmin=None, vmax=None, soft_vmin=False, soft_vmax=False):
"""
This function ...
:param index:
:param frame:
:param cmap:
:param label:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Create the plot
plot = self.create_observation_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_vmin_vmax(frame, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, stretch=self.config.scale)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color or frame
plot.frame.set_color(self.frame_color)
# FOR FIRST
#f1._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#f1._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# Tick settings
plot._ax2.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax2.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# Set image background color
plot.set_nan_color(self.nan_color)
# FOR FIRST
#f1._ax1.scatter(ra, dec, marker='.', label='Observation')
# FOR FIRST
#legend1 = f1._ax1.legend(loc='upper right', fontsize=12, fancybox=True, framealpha=0, numpoints=None)
#plt.setp(legend1.get_texts(), color=config.text_color_in)
# Set title
if label is not None: plot._ax1.set_title(label, fontsize=self.config.label_fontsize)
# Return the vmin and vmax
return vmin, vmax
# ------------------------------------------------------------------------------
def _plot_model(self, index, frame, cmap, vmin=None, vmax=None, soft_vmin=None, soft_vmax=None):
"""
This function ...
:param index:
:param frame:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Create the plot
plot = self.create_model_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_vmin_vmax(frame, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, stretch=self.config.scale)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color for frame
plot.frame.set_color(self.frame_color)
# Set ticks
plot._ax1.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax1.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# FOR FIRST
#f6._ax1.scatter(ra, dec, marker='.', label='Model')
#legend6 = f6._ax1.legend(loc='upper right', fontsize=12, fancybox=False, framealpha=0, numpoints=None)
#plt.setp(legend6.get_texts(), color=config.text_color_in)
# Set image background color
plot.set_nan_color(self.nan_color)
# ------------------------------------------------------------------------------
def _plot_residuals(self, index, frame, cmap, amplitude=None, soft_amplitude=False):
"""
This function ...
:param index:
:param frame:
:param cmap:
:param amplitude:
:param soft_amplitude:
:return:
"""
# Create the plot
plot = self.create_residuals_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_residual_vmin_vmax(frame, amplitude=amplitude, soft_amplitude=soft_amplitude)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color for frame
plot.frame.set_color(self.frame_color)
# Set ticks
plot._ax1.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax1.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# FOR FIRST
# f11._ax1.scatter(ra, dec, marker='.', label='Relative \nResidual')
# FOR FIRST
# Set legend
#legend11 = f11._ax1.legend(loc='lower right', fontsize=12, fancybox=False, framealpha=0, numpoints=None)
#plt.setp(legend11.get_texts(), color=config.text_color_in)
# Set background color
plot.set_nan_color(self.background_color)
# ------------------------------------------------------------------------------
def _plot_distribution(self, index, distribution):
"""
This function ...
:param index:
:param distribution:
:return:
"""
pass
# ------------------------------------------------------------------------------
def plot_observations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the observed image frames ...")
# Loop over the names
#print(self.names)
#print(self.nimages)
#print(len(self.names))
for index, name in enumerate(self.names):
# Debugging
log.debug("Plotting the observed frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the observation
frame = self.get_observation(name)
# Get the label for this image
label = self.get_label(name)
# Get the colormap for this image
cmap = self.get_colormap(name)
# Get the limits
vmin, vmax, soft_vmin, soft_vmax = self.get_limits(name)
# Plot
vmin, vmax = self._plot_observation(index, frame, cmap, label=label, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set new vmin and vmax (for corresponding model)
self.set_limits(name, vmin, vmax, soft_vmin=False, soft_vmax=False)
# ------------------------------------------------------------------------------
def plot_models(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the model image frames ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_model(name): continue
# Debugging
log.debug("Plotting the model frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the model
frame = self.get_model(name)
# Get the colormap for this image
cmap = self.get_colormap(name)
# Get the limits
vmin, vmax, soft_vmin, soft_vmax = self.get_limits(name)
# Plot
self._plot_model(index, frame, cmap, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# ------------------------------------------------------------------------------
def plot_residuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the residual image frames ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_residuals(name): continue
# Debugging
log.debug("Plotting the residuals frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the residuals
frame = self.get_residuals(name)
# Get the colormap for this residual map
cmap = self.get_residual_colormap(name)
# Get the amplitude
amplitude, soft_amplitude = self.get_residual_amplitude(name)
# Plot
# index, frame, cmap, amplitude=None, soft_amplitude=False
self._plot_residuals(index, frame, cmap, amplitude=amplitude, soft_amplitude=soft_amplitude)
# ------------------------------------------------------------------------------
def plot_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the residual distributions ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_distribution(name): continue
# Debugging
log.debug("Plotting the residual distribution of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + " ) ...")
# Get the distribution
distribution = self.get_distribution(name)
# ------------------------------------------------------------------------------
def finish(self):
"""
This function ...
:param self:
:return:
"""
# Draw
self.figure.draw()
# Save to file
if self.config.path is not None: self.figure.figure.savefig(self.config.path, dpi=self.config.dpi)
# Show
else: plt.show()
# Close
#plt.close(fig)
plt.close()
# ------------------------------------------------------------------------------
def plot_images_aplpy(frames, filepath=None, center=None, radius=None, xy_ratio=None, dark=False, scale="log",
colormap="inferno", nrows=None, ncols=None, orientation="horizontal", plotsize=3., distance=None,
share_scale=None, descriptions=None, minmax_scaling=0.5):
"""
This function ...
:param frames:
:param filepath:
:param center:
:param radius:
:param xy_ratio:
:param dark:
:param scale:
:param colormap:
:param nrows:
:param ncols:
:param orientation:
:param plotsize:
:param distance:
:param share_scale:
:param descriptions:
:param minmax_scaling: 0.5
:return:
"""
import matplotlib.gridspec as gridspec
#from matplotlib.colorbar import ColorbarBase
#from matplotlib.colors import LinearSegmentedColormap
#from matplotlib.colors import Normalize
from pts.magic.tools import plotting
# Set
set_theme(dark=dark)
nimages = len(frames)
xsize = plotsize
#if xy_ratio is None: ysize = 3.5
#else: ysize = xsize / xy_ratio
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
#print("plotsize", xsize, ysize)
# Determine the number of columns and rows
if nrows is None and ncols is None:
if orientation == "horizontal": ncols, nrows = nimages, 1
elif orientation == "vertical": ncols, nrows = 1, nimages
else: raise ValueError("Invalid orientation: '" + orientation + "'")
# Nrows is none but ncols is not
elif nrows is None: ncols = numbers.round_up_to_int(nimages/nrows)
# Ncols is none but nrows is not
elif ncols is None: nrows = numbers.round_up_to_int(nimages/ncols)
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
#print("figsize", figxsize, figysize)
# Create figure with appropriate size
fig = plt.figure(figsize=(figxsize, figysize))
# Create grid
gs1 = gridspec.GridSpec(nrows, ncols) # nimages ROWS, 4 COLUMNS
# gs1.update(wspace=0.01, hspace=0.3)
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Get frame labels
if types.is_dictionary(frames):
labels = frames.keys()
frames = frames.values()
else: labels = [frame.filter_name for frame in frames]
# Set scale for each image
scales = dict()
if types.is_string_type(scale):
for label in labels: scales[label] = scale
elif types.is_sequence(scale):
for label, scalei in zip(labels, scale): scales[label] = scalei
elif types.is_dictionary(scale): scales = scale
else: raise ValueError("Invalid type for 'scale'")
# Initialize dict for intervals
intervals = dict()
# Set descriptions
if descriptions is None:
descriptions = dict()
for label in labels: descriptions[label] = None
elif types.is_sequence(descriptions):
descrpts = descriptions
descriptions = dict()
for label, descr in zip(labels, descrpts): descriptions[label] = descr
elif types.is_dictionary(descriptions): pass # OK
else: raise ValueError("Invalid type for 'descriptions'")
# Set minmax scaling
if types.is_real_type(minmax_scaling):
factor = minmax_scaling
minmax_scaling = dict()
for label in labels: minmax_scaling[label] = factor
elif types.is_dictionary(minmax_scaling):
minmax_scaling_orig = minmax_scaling
minmax_scaling = dict()
for label in labels:
if label in minmax_scaling_orig: minmax_scaling[label] = minmax_scaling_orig[label]
else: minmax_scaling[label] = 0.5
elif types.is_sequence(minmax_scaling):
minmax_scaling_orig = minmax_scaling
minmax_scaling = dict()
for label, factor in zip(labels, minmax_scaling_orig): minmax_scaling[label] = factor
else: raise ValueError("Invalid type for 'minmax_scaling'")
# Loop over the frames
for label, frame, index in zip(labels, frames, range(nimages)):
rowi = index // ncols
coli = index % ncols
is_first_row = rowi == 0
is_last_row = rowi == nrows - 1
is_first_col = coli == 0
is_last_col = coli == ncols - 1
#print("row", rowi)
#print("col", coli)
# IS FIRST OR LAST IMAGE?
is_first = index == 0
is_last = index == nimages - 1
# Debugging
log.debug("Plotting the '" + label + "' image ...")
# Get HDU
hdu = frame.to_hdu()
# Get interval
if share_scale is not None and label in share_scale:
share_with = share_scale[label]
vmin, vmax = intervals[share_with]
scalei = scales[share_with]
else:
# Get scale
scalei = scales[label]
is_logscale = scalei == "log"
#print(label, minmax_scaling[label])
vmin, vmax = plotting.get_vmin_vmax(frame.data, logscale=is_logscale, minmax_scaling=minmax_scaling[label])
# Set interval
intervals[label] = (vmin, vmax,)
# Set title
if descriptions[label] is not None: title = descriptions[label]
else: title = label.replace("_", "\_").replace("um", "$\mu$m")
# Has sky coordinate system?
has_wcs = frame.has_wcs and frame.wcs.is_sky
# OBSERVATION
figi = aplpy.FITSFigure(hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(figi, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scalei, has_wcs=has_wcs)
set_ticks(figi, is_first_row, is_last_row)
# FIRST COLUMN
if is_first_col:
figi.tick_labels.show_y()
figi.axis_labels.show_y()
# LAST ROW
if is_last_row:
figi.tick_labels.show_x()
figi.axis_labels.show_x()
# Increment
plot_idx += 1
# Save the figure
if filepath is not None: plt.savefig(filepath, bbox_inches='tight', dpi=300)
else: plt.show()
# Close
plt.close()
# Reset
reset_theme()
# ------------------------------------------------------------------------------
def plot_one_residual_aplpy(observation, model, residual=None, path=None, scale="log", plotsize=3., dark=False,
center=None, radius=None, xy_ratio=None, first_label="Observation", second_label="Model",
residual_label="Residual", filter_label=True):
"""
This function ...
:param observation:
:param model:
:param residual:
:param path:
:param scale:
:param plotsize:
:param dark:
:param center:
:param radius:
:param xy_ratio:
:param first_label:
:param second_label:
:param residual_label:
:param filter_label:
:return:
"""
# Make residual?
if residual is None: residual = (model - observation) / observation
# Colormaps
colormap = "inferno"
residual_colormap = "RdBu"
import matplotlib.gridspec as gridspec
from pts.magic.tools import plotting
# Set theme
set_theme(dark=dark)
nrows = 1
ncols = 3
xsize = plotsize
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
# Create figure with appropriate size
#fig = plt.figure(figsize=(figxsize, figysize))
figure = MPLFigure(size=(figxsize,figysize))
# Create grid
gs1 = gridspec.GridSpec(1, 4) # nimages ROWS, 4 COLUMNS
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Percentual residuals
residual = residual * 100.
# Set title
if filter_label and observation.has_filter: title = str(observation.filter).replace("um", " $\mu$m")
else: title = first_label
# Create HDU's for Aplpy
observation_hdu = observation.to_hdu()
model_hdu = model.to_hdu()
residual_hdu = residual.to_hdu()
# Get interval
vmin, vmax = plotting.get_vmin_vmax(observation.data, logscale=scale=="log")
# OBSERVATION
fig1 = aplpy.FITSFigure(observation_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig1, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scale, has_wcs=observation.has_celestial_wcs)
set_ticks(fig1, True, True)
# Enable y ticks and axis labels BECAUSE OBSERVATION IS THE FIRST COLUMN
fig1.tick_labels.show_y()
fig1.axis_labels.show_y()
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig1.tick_labels.show_x()
fig1.axis_labels.show_x()
# Increment
plot_idx += 1
# MODEL
fig2 = aplpy.FITSFigure(model_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig2, colormap, vmin=vmin, vmax=vmax, label=second_label, center=center, radius=radius, scale=scale, has_wcs=model.has_celestial_wcs)
set_ticks(fig2, True, True)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig2.tick_labels.show_x()
fig2.axis_labels.show_x()
# Increment
plot_idx += 1
# RESIDUAL
fig3 = aplpy.FITSFigure(residual_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig3, residual_colormap, vmin=-100, vmax=100, label=residual_label + ' (\%)', center=center, radius=radius, has_wcs=residual.has_celestial_wcs)
set_ticks(fig3, True, True)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig3.tick_labels.show_x()
fig3.axis_labels.show_x()
# Show or save
if path is None: figure.show()
else: figure.saveto(path)
# Reset theme
reset_theme()
# ------------------------------------------------------------------------------
def plot_residuals_aplpy(observations, models, residuals, filepath=None, center=None, radius=None, xy_ratio=None,
dark=False, scale="log", plotsize=3., distance=None, mask_simulated=False, masks=None):
"""
This function ...
:param observations:
:param models:
:param residuals:
:param filepath:
:param center:
:param radius:
:param xy_ratio:
:param dark:
:param scale:
:param plotsize:
:param distance:
:param mask_simulated:
:param masks: if passed, both observations, models and residuals are masked
:return:
"""
import numpy as np
import matplotlib.gridspec as gridspec
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import Normalize
import seaborn as sns
# Set theme
set_theme(dark=dark)
nimages = len(observations)
ncols = 4
nrows = nimages
# Colormaps
colormap = "inferno"
residual_colormap = "RdBu"
# Set individual map plot size
xsize = plotsize
#if xy_ratio is None: ysize = 3.5
#else: ysize = xsize / xy_ratio
#print("individual size", xsize, ysize)
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
#print("figure size", figxsize, figysize)
# Create figure with appropriate size
fig = plt.figure(figsize=(figxsize, figysize))
# Create grid
gs1 = gridspec.GridSpec(nimages, 4) # nimages ROWS, 4 COLUMNS
#gs1.update(wspace=0.01, hspace=0.3)
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Loop over the filters
if masks is None: masks = [None] * nimages
for observation, model, residual, mask, index in zip(observations, models, residuals, masks, range(nimages)):
#print("units:")
#print(observation.unit)
#print(model.unit)
observation.convert_to("mJy/sr", distance=distance)
model.convert_to("mJy/sr", distance=distance)
# MASK MODEL
if mask_simulated:
model.rebin(observation.wcs)
model.apply_mask_nans(observation.nans)
# MASK ALL?
if mask is not None:
observation.apply_mask_nans(mask)
model.apply_mask_nans(mask)
residual.apply_mask_nans(mask)
# IS FIRST OR LAST IMAGE?
is_first = index == 0
is_last = index == nimages - 1
# Debugging
log.debug("Plotting the observation, model and residuals for the " + str(observation.filter) + " filter ...")
# Percentual residuals
residual = residual * 100.
# Set title
title = str(observation.filter).replace("um", " $\mu$m")
# Create HDU's for Aplpy
observation_hdu = observation.to_hdu()
model_hdu = model.to_hdu()
residual_hdu = residual.to_hdu()
from pts.magic.tools import plotting
vmin, vmax = plotting.get_vmin_vmax(observation.data, logscale=scale=="log")
#vmax = 0.7 * vmax
#print("VMIN", vmin)
#print("VMAX", vmax)
# ------------------------------------------------------------------------------
# Plot obs, model and residual
# ------------------------------------------------------------------------------
# OBSERVATION
fig1 = aplpy.FITSFigure(observation_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig1, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scale)
set_ticks(fig1, is_first, is_last)
# Enable y ticks and axis labels BECAUSE OBSERVATION IS THE FIRST COLUMN
fig1.tick_labels.show_y()
fig1.axis_labels.show_y()
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig1.tick_labels.show_x()
if is_last: fig1.axis_labels.show_x()
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# MODEL
fig2 = aplpy.FITSFigure(model_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig2, colormap, vmin=vmin, vmax=vmax, label='Model', center=center, radius=radius, scale=scale)
set_ticks(fig2, is_first, is_last)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig2.tick_labels.show_x()
if is_last: fig2.axis_labels.show_x()
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# RESIDUAL
fig3 = aplpy.FITSFigure(residual_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig3, residual_colormap, vmin=-100, vmax=100, label='Residual (\%)', center=center, radius=radius)
set_ticks(fig3, is_first, is_last)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig3.tick_labels.show_x()
if is_last: fig3.axis_labels.show_x()
# ------------------------------------------------------------------------------
# COLORBAR
colorbar_start_x = gs1[plot_idx].get_position(fig).bounds[0] + 0.025
colorbar_start_y = gs1[plot_idx].get_position(fig).bounds[1] + 0.085 / (nimages)
colorbar_x_width = gs1[plot_idx].get_position(fig).bounds[2] - 0.05
colorbar_y_height = gs1[plot_idx].get_position(fig).bounds[3]
cb_ax = fig.add_axes([colorbar_start_x, colorbar_start_y, colorbar_x_width, (0.02 + 0.002) / (nimages + 1)])
# Colourbar
cb = ColorbarBase(cb_ax, cmap=residual_colormap, norm=Normalize(vmin=-100, vmax=100), orientation='horizontal')
cb.ax.xaxis.set_ticks_position('bottom')
cb.ax.xaxis.set_label_position('bottom')
cb.ax.zorder = 99
cb.ax.xaxis.set_tick_params(color='white')
cb.outline.set_edgecolor('white')
plt.setp(plt.getp(cb.ax.axes, 'yticklabels'), color='white')
plt.setp(plt.getp(cb.ax.axes, 'xticklabels'), color='white')
cb.set_ticks([-100, -50, 0, 50, 100])
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# KDE Plot of residuals
residual = residual_hdu.data
fig4 = plt.subplot(gs1[plot_idx])
residuals_to_kde = np.where((residual <= 200) & (residual >= -200))
if dark:
sns.kdeplot(residual[residuals_to_kde], bw='silverman', c='white', shade=True)
fig4.axes.set_facecolor("black")
else:
sns.kdeplot(residual[residuals_to_kde], bw='silverman', c='k', shade=True)
fig4.axes.set_facecolor("white")
fig4.tick_params(labelleft='off')
plt.xlim([-150, 150])
fig4.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=True, left=False)
fig4.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=True, left=False)
# Hide tick labels except for the last (bottom) plot
if not is_last: fig4.tick_params(labelbottom=False)
if dark: plt.axvline(0, c='white', ls='--', lw=2)
else: plt.axvline(0, c='k', ls='--', lw=2)
# Label for kde
plt.xlabel('Residual (\%)')
# Increment
plot_idx += 1
# Save the figure
if filepath is not None: plt.savefig(filepath, bbox_inches='tight', dpi=300)
else: plt.show()
# Close
plt.close()
# Reset theme
reset_theme()
# ------------------------------------------------------------------------------
def setup_map_plot(figure, colormap, vmin, vmax, label, smooth=None,text_x=0.05, text_y=0.95, center=None,
radius=None, scale="linear", has_wcs=True):
"""
This function ...
:param figure:
:param colormap:
:param vmin:
:param vmax:
:param label:
:param smooth:
:param text_x:
:param text_y:
:param center:
:param radius:
:param scale:
:param has_wcs:
:return:
"""
figure.show_colorscale(cmap=colormap, vmin=vmin, vmax=vmax, smooth=smooth, stretch=scale)
#figure.set_tick_labels_format(xformat='hh:mm:ss',yformat='dd:mm:ss')
if has_wcs:
figure.tick_labels.set_xformat('hh:mm:ss')
figure.tick_labels.set_yformat('dd:mm:ss')
figure._ax1.set_facecolor('black')
figure.set_nan_color('black')
# RECENTER
if center is not None:
if radius is None: raise ValueError("Cannot specify center without radius")
if has_wcs: figure.recenter(center.ra.to("deg").value, center.dec.to("deg").value, radius=radius.to("deg").value)
else: figure.recenter(center.x, center.y, radius=radius)
# Hide axes labels and tick labels by default (enable for y for first column and for x for last row)
figure.axis_labels.hide()
figure.tick_labels.hide()
# Axes spines
figure._ax1.spines['bottom'].set_color('white')
figure._ax1.spines['top'].set_color('white')
figure._ax1.spines["left"].set_color("white")
figure._ax1.spines["right"].set_color("white")
# TICKS
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
#figure._ax2.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax2.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# SET LABEL
figure.add_label(text_x, text_y, r'' + str(label), relative=True, size=13, weight='bold', color='white',
horizontalalignment='left', verticalalignment='top',
bbox=dict(facecolor='black', edgecolor='none', alpha=0.5))
# ------------------------------------------------------------------------------
def set_ticks(figure, is_first_row, is_last_row):
"""
This function ...
:param figure:
:param is_first_row:
:param is_last_row:
:return:
"""
# ONLY ROW?
is_only_row = is_first_row and is_last_row
# ONLY
if is_only_row:
# IN EVERYWHERE
figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# FIRST
elif is_first_row:
# LEFT, RIGHT AND TOP
figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=False, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=False, left=True)
# LAST
elif is_last_row:
# TOP
figure._ax1.tick_params(direction='inout', which='major', length=14, top=True, right=False, bottom=False, left=False)
figure._ax1.tick_params(direction='inout', which='minor', length=8, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='minor', length=4, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=False, bottom=False, left=False)
# BOTTOM, LEFT AND RIGHT
figure._ax1.tick_params(direction='in', which='major', length=7, right=True, bottom=True, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# In between
else:
# TOP
figure._ax1.tick_params(direction='inout', which='major', length=14, top=True, right=False, bottom=False, left=False)
figure._ax1.tick_params(direction='inout', which='minor', length=8, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='minor', length=4, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=False, bottom=False, left=False)
# LEFT AND RIGHT
figure._ax1.tick_params(direction='in', which='major', length=7, right=True, bottom=False, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, right=True, bottom=False, left=True)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=False, right=True, bottom=False, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=False, right=True, bottom=False, left=True)
# ------------------------------------------------------------------------------
def set_theme(dark=False):
"""
This function ...
:param dark:
:return:
"""
# General settings
plt.rcParams["axes.labelsize"] = 14 # 16 #default 20
plt.rcParams["xtick.labelsize"] = 8 # 10 #default 16
plt.rcParams["ytick.labelsize"] = 8 # 10 #default 16
plt.rcParams["legend.fontsize"] = 14 # 10 #default 14
plt.rcParams["legend.markerscale"] = 0
plt.rcParams["lines.markersize"] = 2.5 # 4 #default 4
plt.rcParams["axes.linewidth"] = 1
# Colors
if dark:
plt.rcParams['axes.facecolor'] = 'black'
plt.rcParams['savefig.facecolor'] = 'black'
plt.rcParams['axes.edgecolor'] = 'white'
plt.rcParams['xtick.color'] = 'white'
plt.rcParams['ytick.color'] = 'white'
plt.rcParams["axes.labelcolor"] = 'white'
plt.rcParams["text.color"] = 'white'
else:
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['savefig.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'black'
plt.rcParams['xtick.color'] = 'black'
plt.rcParams['ytick.color'] = 'black'
plt.rcParams["axes.labelcolor"] = 'black'
plt.rcParams["text.color"] = 'black'
# ------------------------------------------------------------------------------
def reset_theme():
"""
This function ...
:return:
"""
# Back to original settings
plt.rcParams.update(plt.rcParamsDefault)
# ------------------------------------------------------------------------------
| agpl-3.0 | 52,418,295,662,389,330 | 28.47714 | 192 | 0.500916 | false |
glamp/coffe2py | main.py | 1 | 1282 | import sys
from IPython.core.interactiveshell import InteractiveShell
import pandasjson as json
import StringIO
if __name__=="__main__":
mode = "ipython"
line = sys.stdin.readline()
shell = InteractiveShell()
while line:
# explicitly write to stdout
sys.stdout.write(line)
sys.stdout.flush()
# handle incoming data, parse it, and redirect
# stdout so it doesn't interfere
line = sys.stdin.readline()
data = json.loads(line)
codeOut = StringIO.StringIO()
sys.stdout = codeOut
try:
code = data["code"]
if data.get("autocomplete")==True:
_, completions = shell.complete(code)
print json.dumps(completions)
elif code.startswith("print"):
#exec(code)
shell.ex(code)
else:
try:
#print repr(eval(code))
print repr(shell.ev(code))
except:
#exec(code)
shell.ex(code)
except Exception, e:
pass
sys.stdout = sys.__stdout__
data["result"] = codeOut.getvalue()
sys.stdout.write(json.dumps(data) + "\n")
sys.stdout.flush() | bsd-2-clause | 5,789,231,768,680,157,000 | 30.292683 | 58 | 0.522621 | false |
capntransit/carfree-council | cfcensus2010.py | 1 | 1828 | import sys, os, json, time
import pandas as pd
BOROCODE = {'61' : '1', '05' : '2', '47': '3', '81' : '4', '85': '5'}
if (len(sys.argv) < 2):
print ("Usage: cfcensus.py census.csv districts.json")
exit()
censusfile = sys.argv[1]
councilfile = sys.argv[2]
TRACTCOL = 'BoroCT' # rename this for 2000 census
def boroCT (id2):
boro = BOROCODE[str(id2)[3:5]]
tract = str(id2)[5:]
return boro + tract
for (f) in ([censusfile, councilfile]):
if (not os.path.isfile(f)):
print ("File " + f + " is not readable")
exit()
try:
vehDf = pd.read_csv(
censusfile,
skiprows=[1]
)
except Exception as e:
print ("Unable to read census file " + censusfile + ": {0}".format(e))
exit()
try:
with open(councilfile) as councilfo:
councilData = json.load(councilfo)
except Exception as e:
print ("Unable to read council file " + councilfile+": {0}".format(e))
exit()
vehDf['pctNoVeh'] = vehDf['HD01_VD03'].astype('int') / vehDf['HD01_VD01'].astype('int')
vehDf[TRACTCOL] = vehDf['GEO.id2'].apply(boroCT)
vehDf2 = pd.DataFrame(vehDf[[TRACTCOL, 'HD01_VD01', 'HD01_VD03', 'pctNoVeh']]).set_index(TRACTCOL)
f = 0
total = {}
noVeh = {}
councilDistricts = set()
for (t, c) in councilData.items():
for (d) in c:
councilDistricts.add(d)
try:
total[d] = total.get(d, 0) + c[d] * vehDf2.loc[str(t)]['HD01_VD01']
noVeh[d] = noVeh.get(d, 0) + c[d] * vehDf2.loc[str(t)]['HD01_VD03']
except KeyError as e:
print("No entry for census tract " + str(t))
for (d) in sorted(councilDistricts, key=int):
print (','.join([
d,
str(int(total[d])),
str(int(noVeh[d])),
str(round((noVeh[d] / total[d]), 3))
]))
| gpl-3.0 | 7,753,463,791,986,384,000 | 26.283582 | 98 | 0.555252 | false |
eharney/cinder | cinder/scheduler/filters/capacity_filter.py | 1 | 8982 | # Copyright (c) 2012 Intel
# Copyright (c) 2012 OpenStack Foundation
# Copyright (c) 2015 EMC Corporation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from oslo_log import log as logging
from cinder.scheduler import filters
LOG = logging.getLogger(__name__)
class CapacityFilter(filters.BaseBackendFilter):
"""Capacity filters based on volume backend's capacity utilization."""
def backend_passes(self, backend_state, filter_properties):
"""Return True if host has sufficient capacity."""
volid = None
# If the volume already exists on this host, don't fail it for
# insufficient capacity (e.g., if we are retyping)
if backend_state.backend_id == filter_properties.get('vol_exists_on'):
return True
spec = filter_properties.get('request_spec')
if spec:
volid = spec.get('volume_id')
grouping = 'cluster' if backend_state.cluster_name else 'host'
if filter_properties.get('new_size'):
# If new_size is passed, we are allocating space to extend a volume
requested_size = (int(filter_properties.get('new_size')) -
int(filter_properties.get('size')))
LOG.debug('Checking if %(grouping)s %(grouping_name)s can extend '
'the volume %(id)s in %(size)s GB',
{'grouping': grouping,
'grouping_name': backend_state.backend_id, 'id': volid,
'size': requested_size})
else:
requested_size = filter_properties.get('size')
LOG.debug('Checking if %(grouping)s %(grouping_name)s can create '
'a %(size)s GB volume (%(id)s)',
{'grouping': grouping,
'grouping_name': backend_state.backend_id, 'id': volid,
'size': requested_size})
# requested_size is 0 means that it's a manage request.
if requested_size == 0:
return True
if backend_state.free_capacity_gb is None:
# Fail Safe
LOG.error("Free capacity not set: "
"volume node info collection broken.")
return False
free_space = backend_state.free_capacity_gb
total_space = backend_state.total_capacity_gb
reserved = float(backend_state.reserved_percentage) / 100
if free_space in ['infinite', 'unknown']:
# NOTE(zhiteng) for those back-ends cannot report actual
# available capacity, we assume it is able to serve the
# request. Even if it was not, the retry mechanism is
# able to handle the failure by rescheduling
return True
elif total_space in ['infinite', 'unknown']:
# If total_space is 'infinite' or 'unknown' and reserved
# is 0, we assume the back-ends can serve the request.
# If total_space is 'infinite' or 'unknown' and reserved
# is not 0, we cannot calculate the reserved space.
# float(total_space) will throw an exception. total*reserved
# also won't work. So the back-ends cannot serve the request.
if reserved == 0:
return True
LOG.debug("Cannot calculate GB of reserved space (%s%%) with "
"backend's reported total capacity '%s'",
backend_state.reserved_percentage, total_space)
return False
total = float(total_space)
if total <= 0:
LOG.warning("Insufficient free space for volume creation. "
"Total capacity is %(total).2f on %(grouping)s "
"%(grouping_name)s.",
{"total": total,
"grouping": grouping,
"grouping_name": backend_state.backend_id})
return False
# Calculate how much free space is left after taking into account
# the reserved space.
free = free_space - math.floor(total * reserved)
# NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs,
# we will not use max_over_subscription_ratio and
# provisioned_capacity_gb to determine whether a volume can be
# provisioned. Instead free capacity will be used to evaluate.
thin = True
vol_type = filter_properties.get('volume_type', {}) or {}
provision_type = vol_type.get('extra_specs', {}).get(
'provisioning:type')
if provision_type == 'thick':
thin = False
# Only evaluate using max_over_subscription_ratio if
# thin_provisioning_support is True. Check if the ratio of
# provisioned capacity over total capacity has exceeded over
# subscription ratio.
if (thin and backend_state.thin_provisioning_support and
backend_state.max_over_subscription_ratio >= 1):
provisioned_ratio = ((backend_state.provisioned_capacity_gb +
requested_size) / total)
if provisioned_ratio > backend_state.max_over_subscription_ratio:
msg_args = {
"provisioned_ratio": provisioned_ratio,
"oversub_ratio": backend_state.max_over_subscription_ratio,
"grouping": grouping,
"grouping_name": backend_state.backend_id,
}
LOG.warning(
"Insufficient free space for thin provisioning. "
"The ratio of provisioned capacity over total capacity "
"%(provisioned_ratio).2f has exceeded the maximum over "
"subscription ratio %(oversub_ratio).2f on %(grouping)s "
"%(grouping_name)s.", msg_args)
return False
else:
# Thin provisioning is enabled and projected over-subscription
# ratio does not exceed max_over_subscription_ratio. The host
# passes if "adjusted" free virtual capacity is enough to
# accommodate the volume. Adjusted free virtual capacity is
# the currently available free capacity (taking into account
# of reserved space) which we can over-subscribe.
adjusted_free_virtual = (
free * backend_state.max_over_subscription_ratio)
res = adjusted_free_virtual >= requested_size
if not res:
msg_args = {"available": adjusted_free_virtual,
"size": requested_size,
"grouping": grouping,
"grouping_name": backend_state.backend_id}
LOG.warning("Insufficient free virtual space "
"(%(available)sGB) to accommodate thin "
"provisioned %(size)sGB volume on %(grouping)s"
" %(grouping_name)s.", msg_args)
return res
elif thin and backend_state.thin_provisioning_support:
LOG.warning("Filtering out %(grouping)s %(grouping_name)s "
"with an invalid maximum over subscription ratio "
"of %(oversub_ratio).2f. The ratio should be a "
"minimum of 1.0.",
{"oversub_ratio":
backend_state.max_over_subscription_ratio,
"grouping": grouping,
"grouping_name": backend_state.backend_id})
return False
msg_args = {"grouping_name": backend_state.backend_id,
"grouping": grouping,
"requested": requested_size,
"available": free}
if free < requested_size:
LOG.warning("Insufficient free space for volume creation "
"on %(grouping)s %(grouping_name)s (requested / "
"avail): %(requested)s/%(available)s",
msg_args)
return False
LOG.debug("Space information for volume creation "
"on %(grouping)s %(grouping_name)s (requested / avail): "
"%(requested)s/%(available)s", msg_args)
return True
| apache-2.0 | 4,721,007,963,419,278,000 | 46.273684 | 79 | 0.560009 | false |
mganeva/mantid | Framework/PythonInterface/test/python/plugins/algorithms/GetNegMuMuonicXRDTest.py | 1 | 7298 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.kernel import *
from mantid.api import *
from mantid.simpleapi import *
class GetNegMuMuonicXRDTest(unittest.TestCase):
au_muonic_xr = [8135.2,8090.6,8105.4,8069.4,5764.89,5594.97,3360.2,
3206.8,2474.22,2341.21,2304.44,1436.05,1391.58,1104.9,
899.14,869.98,405.654,400.143]
as_muonic_xr = [1866.9,1855.8,436.6,427.5]
#TESTING FOR ONE WORKSPACE IN GROUP WORKSPACE
def test_muonic_xrd_single_ws_in_group(self):
self.au_muonic_xr.sort()
self.as_muonic_xr.sort()
#Setting up the work space manually
au_peak_values = self.au_muonic_xr
y_position = -0.001 #same as default used by GetNegMuMuonic
y_pos_ws = [y_position]*len(au_peak_values)
au_muon_xr_ws = CreateWorkspace(au_peak_values[:], y_pos_ws[:])
#Check that au_muon_xr_ws is not null
self.assertFalse(au_muon_xr_ws==None)
au_muon_group = GroupWorkspaces(au_muon_xr_ws)
#Check that au_muon_group is not null
self.assertFalse(au_muon_group==None)
#Get the algorithm to produce the same workspace
neg_mu_xr_group = GetNegMuMuonicXRD("Au") #testing default y-Axis position value
#Check that neg_mu_xr_ws is not null
self.assertFalse(neg_mu_xr_group==None)
#Test number of workspaces in group
self.assertEqual(au_muon_group.getNumberOfEntries(),
neg_mu_xr_group.getNumberOfEntries())
self.assertTrue(au_muon_group.size() == 1)
self.assertTrue(neg_mu_xr_group.size() == 1)
#now testing the one workspace in the workspace group
neg_mu_xr_ws = neg_mu_xr_group[0]
au_muon_ws = au_muon_group[0]
#check number of histograms are equal
self.assertEqual(neg_mu_xr_ws.getNumberHistograms(), au_muon_ws.getNumberHistograms())
#check number of bins is equal
self.assertEqual(au_muon_ws.blocksize(), neg_mu_xr_ws.blocksize())
#check length of XValues is the same
self.assertEqual(len(au_muon_ws.readX(0)), len(neg_mu_xr_ws.readX(0)))
#check all the XValues are the same
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(au_muon_ws.readX(0),neg_mu_xr_ws.readX(0))
#INSTEAD we will use a simple for loop
for x_value in range(len(au_muon_ws.readX(0))):
self.assertEqual(au_muon_ws.readX(0)[x_value], neg_mu_xr_ws.readX(0)[x_value])
#check length of YValues is the same
self.assertEqual(len(au_muon_ws.readY(0)), len(neg_mu_xr_ws.readY(0)))
#check all the YValues are the same
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(au_muon_ws.readY(0),neg_mu_xr_ws.readY(0))
#INSTEAD we will use a simple for loop
for y_value in range(len(au_muon_ws.readY(0))):
self.assertEqual(au_muon_ws.readY(0)[y_value], neg_mu_xr_ws.readY(0)[y_value])
#TESTING FOR MORE THAN ONE WORKSPACE IN GROUP WORKSPACE
def test_muonic_xrd_more_than_one_ws_in_group(self):
self.au_muonic_xr.sort()
self.as_muonic_xr.sort()
y_position = 0.2
#Setting up au_muonic workspace
au_peak_values = self.au_muonic_xr
#check to see if workspace has been set to non-None value
self.assertFalse(au_peak_values == None)
au_y_pos_ws = [y_position]*len(au_peak_values)
#setting up as_muonic workspace
as_peak_values = self.as_muonic_xr
#check to see if workspace has been set to non-None value
self.assertFalse(as_peak_values == None)
as_y_pos_ws = [y_position]*len(as_peak_values)
au_muon_xr_ws = CreateWorkspace(au_peak_values,au_y_pos_ws[:])
#check to see if workspace creation was successful
self.assertFalse(au_muon_xr_ws == None)
as_muon_xr_ws = CreateWorkspace(as_peak_values, as_y_pos_ws[:])
#check to see if workspace creation was successful
self.assertFalse(as_muon_xr_ws == None)
ws_list = [au_muon_xr_ws,as_muon_xr_ws]
grouped_muon_ws = GroupWorkspaces(ws_list)
#check to see whether grouping workspaces was successful
self.assertFalse(grouped_muon_ws == None)
#Run algorithm that creates muonic_xr group workspace
group_muonic_xr_ws = GetNegMuMuonicXRD("Au,As", 0.2)
#check that this has assigned value correctly
self.assertFalse(group_muonic_xr_ws == None)
#Compare histograms for each of the workspaces in GroupWorkspaces created
self.assertEqual(grouped_muon_ws[0].getNumberHistograms(), group_muonic_xr_ws[0].getNumberHistograms())
self.assertEqual(grouped_muon_ws[1].getNumberHistograms(), group_muonic_xr_ws[1].getNumberHistograms())
#Compare length of X values read from each workspace in grouped workspace
self.assertEqual(len(grouped_muon_ws[0].readX(0)), len(group_muonic_xr_ws[0].readX(0)))
self.assertEqual(len(grouped_muon_ws[1].readX(0)), len(group_muonic_xr_ws[1].readX(0)))
#Compare X values read from each workspace in grouped workspace
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(grouped_muon_ws[0].readX(0), group_muonic_xr_ws[0].readX(0))
#self.assertItemsEqual(grouped_muon_ws[1].readX(0), group_muonic_xr_ws[1].readX(0))
#INSTEAD we will use a simple for loop
for x_value in range(len(grouped_muon_ws[0].readX(0))):
self.assertEqual(grouped_muon_ws[0].readX(0)[x_value], group_muonic_xr_ws[0].readX(0)[x_value])
for x_value in range(len(grouped_muon_ws[1].readX(0))):
self.assertEqual(grouped_muon_ws[1].readX(0)[x_value], group_muonic_xr_ws[1].readX(0)[x_value])
#Compare length of Y values read from each workspace in grouped workspace
self.assertEqual(len(grouped_muon_ws[0].readY(0)), len(group_muonic_xr_ws[0].readY(0)))
self.assertEqual(len(grouped_muon_ws[1].readY(0)), len(group_muonic_xr_ws[1].readY(0)))
#Compare Y values read from each workspace in grouped workspace
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(grouped_muon_ws[0].readY(0), group_muonic_xr_ws[0].readY(0))
#self.assertItemsEqual(grouped_muon_ws[1].readY(0), group_muonic_xr_ws[1].readY(0))
#INSTEAD we will use a simple for loop
for y_value in range(len(grouped_muon_ws[0].readY(0))):
self.assertEqual(grouped_muon_ws[0].readY(0)[y_value], group_muonic_xr_ws[0].readY(0)[y_value])
for y_value in range(len(grouped_muon_ws[1].readY(0))):
self.assertEqual(grouped_muon_ws[1].readY(0)[y_value], group_muonic_xr_ws[1].readY(0)[y_value])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 6,650,701,605,140,535,000 | 50.394366 | 111 | 0.65744 | false |
vorushin/FamilyFeed | sources/facebook.py | 1 | 1748 | from datetime import datetime
import json
from urllib2 import urlopen, HTTPError
from django.db.models import Max
from sources.models import FacebookPost
def time(s):
return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S+0000')
def post_text(item):
return item.get('message', u'') + item.get('description', u'')
def list_posts(access_token):
latest_created_time = FacebookPost.objects\
.filter(access_token=access_token)\
.aggregate(Max('created_time'))['created_time__max']
'''for post in new_posts(access_token, latest_created_time):
if not FacebookPost.objects.filter(
access_token=access_token,
created_time=time(post['created_time'])).exists():
FacebookPost.objects.create(
access_token=access_token,
created_time=time(post['created_time']),
data=post)'''
return [p.data for p in FacebookPost.objects \
.filter(access_token=access_token) \
.order_by('-created_time')]
def new_posts(access_token, older_than=None):
graph_url = 'https://graph.facebook.com/me/feed?access_token=%s' % \
access_token
graph_url += '&limit=1000'
if older_than:
graph_url += '&since=' + older_than.isoformat()
resp = json.loads(urlopen(graph_url).read())
while resp['data']:
for item in resp['data']:
if older_than:
if time(item['created_time']) <= older_than:
return
if item.get('message'):
yield item
try:
resp = json.loads(urlopen(resp['paging']['next']).read())
except HTTPError:
break
| mit | -6,442,804,977,084,851,000 | 33.96 | 76 | 0.57151 | false |
bytedance/fedlearner | web_console_v2/api/test/fedlearner_webconsole/utils/file_manager_test.py | 1 | 9062 | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import shutil
import tempfile
import unittest
from collections import namedtuple
from pathlib import Path
from tensorflow.io import gfile
from fedlearner_webconsole.utils.file_manager import GFileFileManager, FileManager, File
FakeFileStatistics = namedtuple('FakeFileStatistics', ['length', 'mtime_nsec'])
class GFileFileManagerTest(unittest.TestCase):
_F1_SIZE = 3
_F2_SIZE = 4
_S1_SIZE = 55
_F1_MTIME = 1613982390
_F2_MTIME = 1613982391
_S1_MTIME = 1613982392
def _get_file_stat(self, orig_os_stat, path):
gfile_stat = FakeFileStatistics(2, 1613982390 * 1e9)
if path == self._get_temp_path('f1.txt') or \
path == self._get_temp_path('subdir/f1.txt'):
gfile_stat = FakeFileStatistics(self._F1_SIZE,
self._F1_MTIME * 1e9)
return gfile_stat
elif path == self._get_temp_path('f2.txt') or \
path == self._get_temp_path('f3.txt'):
gfile_stat = FakeFileStatistics(self._F2_SIZE,
self._F2_MTIME * 1e9)
return gfile_stat
elif path == self._get_temp_path('subdir/s1.txt'):
gfile_stat = FakeFileStatistics(self._S1_SIZE,
self._S1_MTIME * 1e9)
return gfile_stat
else:
return orig_os_stat(path)
def setUp(self):
# Create a temporary directory
self._test_dir = tempfile.mkdtemp()
subdir = Path(self._test_dir).joinpath('subdir')
subdir.mkdir(exist_ok=True)
Path(self._test_dir).joinpath('f1.txt').write_text('xxx')
Path(self._test_dir).joinpath('f2.txt').write_text('xxx')
subdir.joinpath('s1.txt').write_text('xxx')
# Mocks os.stat
self._orig_os_stat = os.stat
def fake_stat(path, *arg, **kwargs):
return self._get_file_stat(self._orig_os_stat, path)
gfile.stat = fake_stat
self._fm = GFileFileManager()
def tearDown(self):
os.stat = self._orig_os_stat
# Remove the directory after the test
shutil.rmtree(self._test_dir)
def _get_temp_path(self, file_path: str = None) -> str:
return str(Path(self._test_dir, file_path or '').absolute())
def test_can_handle(self):
self.assertTrue(self._fm.can_handle('/data/abc'))
self.assertFalse(self._fm.can_handle('data'))
def test_ls(self):
# List file
self.assertEqual(self._fm.ls(self._get_temp_path('f1.txt')), [
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME)
])
# List folder
self.assertEqual(
sorted(self._fm.ls(self._get_temp_path()),
key=lambda file: file.path),
sorted([
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME),
File(path=self._get_temp_path('f2.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME)
],
key=lambda file: file.path))
# List folder recursively
self.assertEqual(
sorted(self._fm.ls(self._get_temp_path(), recursive=True),
key=lambda file: file.path),
sorted([
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME),
File(path=self._get_temp_path('f2.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME),
File(path=self._get_temp_path('subdir/s1.txt'),
size=self._S1_SIZE,
mtime=self._S1_MTIME),
],
key=lambda file: file.path))
def test_move(self):
# Moves to another folder
self._fm.move(self._get_temp_path('f1.txt'),
self._get_temp_path('subdir/'))
self.assertEqual(
sorted(self._fm.ls(self._get_temp_path('subdir')),
key=lambda file: file.path),
sorted([
File(path=self._get_temp_path('subdir/s1.txt'),
size=self._S1_SIZE,
mtime=self._S1_MTIME),
File(path=self._get_temp_path('subdir/f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME),
],
key=lambda file: file.path))
# Renames
self._fm.move(self._get_temp_path('f2.txt'),
self._get_temp_path('f3.txt'))
with self.assertRaises(ValueError):
self._fm.ls(self._get_temp_path('f2.txt'))
self.assertEqual(self._fm.ls(self._get_temp_path('f3.txt')), [
File(path=self._get_temp_path('f3.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME)
])
def test_remove(self):
self._fm.remove(self._get_temp_path('f1.txt'))
self._fm.remove(self._get_temp_path('subdir'))
self.assertEqual(self._fm.ls(self._get_temp_path(), recursive=True), [
File(path=self._get_temp_path('f2.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME)
])
def test_copy(self):
self._fm.copy(self._get_temp_path('f1.txt'),
self._get_temp_path('subdir'))
self.assertEqual(self._fm.ls(self._get_temp_path('f1.txt')), [
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME)
])
self.assertEqual(self._fm.ls(self._get_temp_path('subdir/f1.txt')), [
File(path=self._get_temp_path('subdir/f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME)
])
def test_mkdir(self):
self._fm.mkdir(os.path.join(self._get_temp_path(), 'subdir2'))
self.assertTrue(os.path.isdir(self._get_temp_path('subdir2')))
def test_read(self):
content = self._fm.read(self._get_temp_path('f1.txt'))
self.assertEqual('xxx', content)
class FileManagerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
fake_fm = 'testing.fake_file_manager:FakeFileManager'
os.environ['CUSTOMIZED_FILE_MANAGER'] = fake_fm
@classmethod
def tearDownClass(cls):
del os.environ['CUSTOMIZED_FILE_MANAGER']
def setUp(self):
self._fm = FileManager()
def test_can_handle(self):
self.assertTrue(self._fm.can_handle('fake://123'))
# Falls back to default manager
self.assertTrue(self._fm.can_handle('/data/123'))
self.assertFalse(self._fm.can_handle('unsupported:///123'))
def test_ls(self):
self.assertEqual(self._fm.ls('fake://data'), [{
'path': 'fake://data/f1.txt',
'size': 0
}])
def test_move(self):
self.assertTrue(self._fm.move('fake://move/123', 'fake://move/234'))
self.assertFalse(
self._fm.move('fake://do_not_move/123', 'fake://move/234'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.move('hdfs://123', 'fake://abc'))
def test_remove(self):
self.assertTrue(self._fm.remove('fake://remove/123'))
self.assertFalse(self._fm.remove('fake://do_not_remove/123'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.remove('unsupported://123'))
def test_copy(self):
self.assertTrue(self._fm.copy('fake://copy/123', 'fake://copy/234'))
self.assertFalse(
self._fm.copy('fake://do_not_copy/123', 'fake://copy/234'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.copy('hdfs://123', 'fake://abc'))
def test_mkdir(self):
self.assertTrue(self._fm.mkdir('fake://mkdir/123'))
self.assertFalse(self._fm.mkdir('fake://do_not_mkdir/123'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.mkdir('unsupported:///123'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -8,069,600,054,312,134,000 | 36.446281 | 88 | 0.550541 | false |
Phixyn/ZoeyBot | modules/utils.py | 1 | 1119 | """
utils.py - Utilities module
ZoeyBot - Python IRC Bot
Copyright 2012-2014 (c) Phixyn
This file is part of ZoeyBot.
ZoeyBot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ZoeyBot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ZoeyBot. If not, see <http://www.gnu.org/licenses/>.
"""
import os, subprocess
from datetime import datetime as dt
def timestamp():
""" Documentation pending """
return dt.strftime(dt.now(), "(%H:%M:%S)")
def clear_screen():
""" Documentation pending """
# TODO try...except block here maybe?
if (os.name == 'nt'):
subprocess.call('cls', shell=True)
elif (os.name == 'posix'):
subprocess.call('clear')
else:
print(chr(27) + "[2J")
| gpl-3.0 | 1,913,154,595,719,887,000 | 25.023256 | 68 | 0.726542 | false |
doraemonext/DEOnlineJudge | lib/tools/validator.py | 1 | 1501 | # -*- coding: utf-8 -*-
import re
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
class MinValue(object):
"""
最小长度验证
"""
def __init__(self, name, length):
self.name = name
self.length = length
def __call__(self, value, *args, **kwargs):
if len(value) < self.length:
raise ValidationError(u'%s最小长度为%d个字符' % (self.name, self.length))
class MaxValue(object):
"""
最大长度验证
"""
def __init__(self, name, length):
self.name = name
self.length = length
def __call__(self, value, *args, **kwargs):
if len(value) > self.length:
raise ValidationError(u'%s最大长度为%d个字符' % (self.name, self.length))
class SafeValue(object):
"""
安全字符验证
仅允许包含汉字、数字、字母、下划线及短横线
"""
def __init__(self, name):
self.name = name
def __call__(self, value, *args, **kwargs):
if not re.search(u'^[_a-zA-Z0-9\u4e00-\u9fa5\-]+$', value):
raise ValidationError(u'%s包含非法字符' % self.name)
class EmailValue(object):
"""
电子邮件验证
"""
def __init__(self, name):
self.name = name
def __call__(self, value, *args, **kwargs):
try:
validate_email(value)
except ValidationError:
raise ValidationError(u'%s不合法' % self.name)
| mit | 8,311,261,738,640,830,000 | 21.683333 | 77 | 0.563556 | false |
vhaupert/mitmproxy | mitmproxy/proxy/config.py | 1 | 3244 | import os
import re
import typing
from OpenSSL import crypto
from mitmproxy import certs
from mitmproxy import exceptions
from mitmproxy import options as moptions
from mitmproxy.net import server_spec
class HostMatcher:
def __init__(self, handle, patterns=tuple()):
self.handle = handle
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
if not address:
return False
host = "%s:%s" % address
if self.handle in ["ignore", "tcp"]:
return any(rex.search(host) for rex in self.regexes)
else: # self.handle == "allow"
return not any(rex.search(host) for rex in self.regexes)
def __bool__(self):
return bool(self.patterns)
class ProxyConfig:
def __init__(self, options: moptions.Options) -> None:
self.options = options
self.certstore: certs.CertStore
self.check_filter: typing.Optional[HostMatcher] = None
self.check_tcp: typing.Optional[HostMatcher] = None
self.upstream_server: typing.Optional[server_spec.ServerSpec] = None
self.configure(options, set(options.keys()))
options.changed.connect(self.configure)
def configure(self, options: moptions.Options, updated: typing.Any) -> None:
if options.allow_hosts and options.ignore_hosts:
raise exceptions.OptionsError("--ignore-hosts and --allow-hosts are mutually "
"exclusive; please choose one.")
if options.ignore_hosts:
self.check_filter = HostMatcher("ignore", options.ignore_hosts)
elif options.allow_hosts:
self.check_filter = HostMatcher("allow", options.allow_hosts)
else:
self.check_filter = HostMatcher(False)
if "tcp_hosts" in updated:
self.check_tcp = HostMatcher("tcp", options.tcp_hosts)
certstore_path = os.path.expanduser(options.confdir)
if not os.path.exists(os.path.dirname(certstore_path)):
raise exceptions.OptionsError(
"Certificate Authority parent directory does not exist: %s" %
os.path.dirname(certstore_path)
)
key_size = options.key_size
self.certstore = certs.CertStore.from_store(
certstore_path,
moptions.CONF_BASENAME,
key_size
)
for c in options.certs:
parts = c.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
cert = os.path.expanduser(parts[1])
if not os.path.exists(cert):
raise exceptions.OptionsError(
"Certificate file does not exist: %s" % cert
)
try:
self.certstore.add_cert_file(parts[0], cert)
except crypto.Error:
raise exceptions.OptionsError(
"Invalid certificate format: %s" % cert
)
m = options.mode
if m.startswith("upstream:") or m.startswith("reverse:"):
_, spec = server_spec.parse_with_mode(options.mode)
self.upstream_server = spec
| mit | 2,039,066,290,307,979,000 | 35.044444 | 90 | 0.589704 | false |
jjo31/ATHAM-Fluidity | python/fluidity/microphysics/FortranMicrophysicsWrapper.py | 1 | 4818 | import os
path=os.path.dirname(__file__)
def MakeWrapperFiles(field_dict,call_str,pointwise):
write_to_file(field_dict,call_str,pointwise)
def allocate_str(field_dict):
s=""" subroutine allocate_storage(number_of_tracers,n)
integer :: n
!f2py integer, intent(hide), depend(number_of_tracers) :: n=shape(number_of_tracers,0)
integer :: number_of_tracers(n)
"""
for n,k in enumerate(field_dict):
s+=" if (allocated(%s)) deallocate(%s)\n"%(k,k)
s+=" allocate(%s(number_of_tracers(%d)))\n"%(k,n+1)
s+=" end subroutine allocate_storage\n\n"
return s
def finalize_str(field_dict):
s=" subroutine finalize\n"
for k in field_dict:
s+="deallocate(%s)\n"%k
s+=" end subroutine finalize\n\n"
return s
def set_field_str(fname):
s=""" subroutine set_%s(i,new_val,n,old_val,source,m)
integer :: m,n
!f2py integer, intent(hide), depend(new_val) :: n=shape(new_val,0)
!f2py integer, intent(hide), depend(new_val) :: m=shape(source,0)
real, intent(in), dimension(n), target :: new_val, old_val
real, intent(in), dimension(n), target, optional ::source
!f2py real, intent(inplace), dimension(n) :: new_val, old_val
!f2py real, intent(inplace), dimension(n), optional :: source
integer :: i
%s(i)%%new=>new_val
%s(i)%%old=>old_val
print*, present(source), m
if (present(source) .and. m==n)&
%s(i)%%source=>source
end subroutine set_%s
"""%(fname,fname,fname,fname,fname)
return s
def run_str(field_dict,call_string):
s="""subroutine run_microphysics(current_time,dt)
real, intent(in) :: current_time, dt
interface
subroutine %s(time,timestep"""%call_string
for n,k in enumerate(field_dict):
s+=',&\n t%d'%n
s+=')\n'
s+=' use FW_data_type\n'
s+=' real, intent(in) :: time, timestep\n'
for n,k in enumerate(field_dict):
s+=' type(basic_scalar), intent(inout), dimension(:) :: t%d\n'%n
s+=' end subroutine %s\n'%call_string
s+=""" end interface
call %s(current_time,dt"""%call_string
for k in field_dict:
s+=',&\n %s'%k
s+=')\n\n'
s+=' end subroutine run_microphysics\n\n'
return s
def run_str_pointwise(field_dict,call_string):
s="""subroutine run_microphysics(current_time,dt)
real, intent(in) :: current_time, dt
integer :: i,j\n
"""
for n,k in enumerate(field_dict):
s+=' real, dimension(size(%s),3) :: tracer%d\n'%(k,n)
s+=""" interface\n
subroutine %s(time,timestep"""%call_string
for n,k in enumerate(field_dict):
s+=',&\n t%d'%n
s+=')\n'
s+=' use FW_data_type\n'
s+=' real, intent(in) :: time, timestep\n'
for n,k in enumerate(field_dict):
s+=' real, intent(inout), dimension(:,:) :: t%d\n'%n
s+=' end subroutine %s\n'%call_string
s+=" end interface\n"
s+=" do i=1, size(%s(0)%%new)\n"%(field_dict.keys()[0])
for n,k in enumerate(field_dict):
s+=' do j=1,size(%s)\n'%k
s+=' tracer%d(j,1)=%s(j)%%new(i)\n'%(n,k)
s+=' tracer%d(j,2)=%s(j)%%old(i)\n'%(n,k)
s+=' if (associated(%s(j)%%source))&\n tracer%d(j,3)=%s(j)%%source(i)\n'%(k,n,k)
s+=' end do\n\n'
s+=" call %s(current_time,dt"%call_string
for k in range(len(field_dict)):
s+=',&\n tracer%d'%n
s+=')\n\n'
for n,k in enumerate(field_dict):
s+=' do j=1,size(%s)\n'%k
s+=' %s(j)%%new(i)=tracer%d(j,1)\n'%(k,n)
s+=' %s(j)%%old(i)=tracer%d(j,2)\n'%(k,n)
s+=' if (associated(%s(j)%%source))&\n %s(j)%%source(i)=tracer%d(j,3)\n'%(k,k,n)
s+=' end do\n\n'
s+=' end do\n\n'
s+=' end subroutine run_microphysics\n\n'
return s
def write_to_file(field_dict={},
call_string='',
pointwise=False,
dirname=path+'/src',
src_name='FW_auto',
data_name='FW_data'):
f=open(dirname+'/'+src_name+'.F90','w')
s="""module FW_auto
use FW_data
implicit none
contains
"""
f.write(s)
f.write(allocate_str(field_dict))
f.write(finalize_str(field_dict))
for k in field_dict:
f.write(set_field_str(k))
if pointwise:
f.write(run_str_pointwise(field_dict,call_string))
else:
f.write(run_str(field_dict,call_string))
f.write("end module FW_Auto\n")
f.close()
f=open(dirname+'/'+data_name+'.F90','w')
f.write("""module %s
use FW_data_type
"""%data_name)
for k in field_dict:
f.write(' type(basic_scalar), dimension(:), allocatable :: %s\n'%k)
f.write('end module %s\n'%data_name)
f.close()
| lgpl-2.1 | 37,133,814,416,774,880 | 32.458333 | 100 | 0.545247 | false |
CartoDB/cartoframes | cartoframes/io/managers/context_manager.py | 1 | 22518 | import time
import pandas as pd
from warnings import warn
from carto.auth import APIKeyAuthClient
from carto.datasets import DatasetManager
from carto.exceptions import CartoException, CartoRateLimitException
from carto.sql import SQLClient, BatchSQLClient, CopySQLClient
from pyrestcli.exceptions import NotFoundException
from ..dataset_info import DatasetInfo
from ... import __version__
from ...auth.defaults import get_default_credentials
from ...utils.logger import log
from ...utils.geom_utils import encode_geometry_ewkb
from ...utils.utils import (is_sql_query, check_credentials, encode_row, map_geom_type, PG_NULL, double_quote,
create_tmp_name)
from ...utils.columns import (get_dataframe_columns_info, get_query_columns_info, obtain_converters, date_columns_names,
normalize_name)
DEFAULT_RETRY_TIMES = 3
BATCH_API_PAYLOAD_THRESHOLD = 12000
def retry_copy(func):
def wrapper(*args, **kwargs):
m_retry_times = kwargs.get('retry_times', DEFAULT_RETRY_TIMES)
while m_retry_times >= 1:
try:
return func(*args, **kwargs)
except CartoRateLimitException as err:
m_retry_times -= 1
if m_retry_times <= 0:
warn(('Read call was rate-limited. '
'This usually happens when there are multiple queries being read at the same time.'))
raise err
warn('Read call rate limited. Waiting {s} seconds'.format(s=err.retry_after))
time.sleep(err.retry_after)
warn('Retrying...')
return func(*args, **kwargs)
return wrapper
def not_found(func):
def decorator_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except CartoException as e:
if hasattr(e, 'args') and isinstance(e.args, (list, tuple)) and type(e.args[0]) == NotFoundException:
raise Exception('Resource not found') from None
else:
raise e
return decorator_func
class ContextManager:
def __init__(self, credentials):
self.credentials = credentials or get_default_credentials()
check_credentials(self.credentials)
self.auth_client = _create_auth_client(self.credentials)
self.sql_client = SQLClient(self.auth_client)
self.copy_client = CopySQLClient(self.auth_client)
self.batch_sql_client = BatchSQLClient(self.auth_client)
@not_found
def execute_query(self, query, parse_json=True, do_post=True, format=None, **request_args):
return self.sql_client.send(query.strip(), parse_json, do_post, format, **request_args)
@not_found
def execute_long_running_query(self, query):
return self.batch_sql_client.create_and_wait_for_completion(query.strip())
def copy_to(self, source, schema=None, limit=None, retry_times=DEFAULT_RETRY_TIMES):
query = self.compute_query(source, schema)
columns = self._get_query_columns_info(query)
copy_query = self._get_copy_query(query, columns, limit)
return self._copy_to(copy_query, columns, retry_times)
def copy_from(self, gdf, table_name, if_exists='fail', cartodbfy=True,
retry_times=DEFAULT_RETRY_TIMES):
schema = self.get_schema()
table_name = self.normalize_table_name(table_name)
df_columns = get_dataframe_columns_info(gdf)
if self.has_table(table_name, schema):
if if_exists == 'replace':
table_query = self._compute_query_from_table(table_name, schema)
table_columns = self._get_query_columns_info(table_query)
if self._compare_columns(df_columns, table_columns):
# Equal columns: truncate table
self._truncate_table(table_name, schema)
else:
# Diff columns: truncate table and drop + add columns
self._truncate_and_drop_add_columns(
table_name, schema, df_columns, table_columns)
elif if_exists == 'fail':
raise Exception('Table "{schema}.{table_name}" already exists in your CARTO account. '
'Please choose a different `table_name` or use '
'if_exists="replace" to overwrite it.'.format(
table_name=table_name, schema=schema))
else: # 'append'
cartodbfy = False
else:
self._create_table_from_columns(table_name, schema, df_columns)
self._copy_from(gdf, table_name, df_columns, retry_times)
if cartodbfy is True:
cartodbfy_query = _cartodbfy_query(table_name, schema)
self.execute_long_running_query(cartodbfy_query)
return table_name
def create_table_from_query(self, query, table_name, if_exists):
schema = self.get_schema()
table_name = self.normalize_table_name(table_name)
if self.has_table(table_name, schema):
if if_exists == 'replace':
# TODO: review logic copy_from
self._drop_create_table_from_query(table_name, schema, query)
elif if_exists == 'fail':
raise Exception('Table "{schema}.{table_name}" already exists in your CARTO account. '
'Please choose a different `table_name` or use '
'if_exists="replace" to overwrite it.'.format(
table_name=table_name, schema=schema))
else: # 'append'
pass
else:
self._drop_create_table_from_query(table_name, schema, query)
return table_name
def list_tables(self, schema=None):
datasets = DatasetManager(self.auth_client).filter(
show_table_size_and_row_count='false',
show_table='false',
show_stats='false',
show_likes='false',
show_liked='false',
show_permission='false',
show_uses_builder_features='false',
show_synchronization='false',
load_totals='false'
)
datasets.sort(key=lambda x: x.updated_at, reverse=True)
return pd.DataFrame([dataset.name for dataset in datasets], columns=['tables'])
def has_table(self, table_name, schema=None):
query = self.compute_query(table_name, schema)
return self._check_exists(query)
def delete_table(self, table_name):
query = _drop_table_query(table_name)
output = self.execute_query(query)
return not('notices' in output and 'does not exist' in output['notices'][0])
def _delete_function(self, function_name):
query = _drop_function_query(function_name)
self.execute_query(query)
return function_name
def _create_function(self, schema, statement,
function_name=None, columns_types=None, return_value='VOID', language='plpgsql'):
function_name = function_name or create_tmp_name(base='tmp_func')
safe_schema = double_quote(schema)
query, qualified_func_name = _create_function_query(
schema=safe_schema,
function_name=function_name,
statement=statement,
columns_types=columns_types or '',
return_value=return_value,
language=language)
self.execute_query(query)
return qualified_func_name
def rename_table(self, table_name, new_table_name, if_exists='fail'):
new_table_name = self.normalize_table_name(new_table_name)
if table_name == new_table_name:
raise ValueError('Table names are equal. Please choose a different table name.')
if not self.has_table(table_name):
raise Exception('Table "{table_name}" does not exist in your CARTO account.'.format(
table_name=table_name))
if self.has_table(new_table_name):
if if_exists == 'replace':
log.debug('Removing table "{}"'.format(new_table_name))
self.delete_table(new_table_name)
elif if_exists == 'fail':
raise Exception('Table "{new_table_name}" already exists in your CARTO account. '
'Please choose a different `new_table_name` or use '
'if_exists="replace" to overwrite it.'.format(
new_table_name=new_table_name))
self._rename_table(table_name, new_table_name)
return new_table_name
def update_privacy_table(self, table_name, privacy=None):
DatasetInfo(self.auth_client, table_name).update_privacy(privacy)
def get_privacy(self, table_name):
return DatasetInfo(self.auth_client, table_name).privacy
def get_schema(self):
"""Get user schema from current credentials"""
query = 'SELECT current_schema()'
result = self.execute_query(query, do_post=False)
schema = result['rows'][0]['current_schema']
log.debug('schema: {}'.format(schema))
return schema
def get_geom_type(self, query):
"""Fetch geom type of a remote table or query"""
distict_query = '''
SELECT distinct ST_GeometryType(the_geom) AS geom_type
FROM ({}) q
LIMIT 5
'''.format(query)
response = self.execute_query(distict_query, do_post=False)
if response and response.get('rows') and len(response.get('rows')) > 0:
st_geom_type = response.get('rows')[0].get('geom_type')
if st_geom_type:
return map_geom_type(st_geom_type[3:])
return None
def get_num_rows(self, query):
"""Get the number of rows in the query"""
result = self.execute_query('SELECT COUNT(*) FROM ({query}) _query'.format(query=query))
return result.get('rows')[0].get('count')
def get_bounds(self, query):
extent_query = '''
SELECT ARRAY[
ARRAY[st_xmin(geom_env), st_ymin(geom_env)],
ARRAY[st_xmax(geom_env), st_ymax(geom_env)]
] bounds FROM (
SELECT ST_Extent(the_geom) geom_env
FROM ({}) q
) q;
'''.format(query)
response = self.execute_query(extent_query, do_post=False)
if response and response.get('rows') and len(response.get('rows')) > 0:
return response.get('rows')[0].get('bounds')
return None
def get_column_names(self, source, schema=None, exclude=None):
query = self.compute_query(source, schema)
columns = [c.name for c in self._get_query_columns_info(query)]
if exclude and isinstance(exclude, list):
columns = list(set(columns) - set(exclude))
return columns
def is_public(self, query):
# Used to detect public tables in queries in the publication,
# because privacy only works for tables.
public_auth_client = _create_auth_client(self.credentials, public=True)
public_sql_client = SQLClient(public_auth_client)
exists_query = 'EXPLAIN {}'.format(query)
try:
public_sql_client.send(exists_query, do_post=False)
return True
except CartoException:
return False
def get_table_names(self, query):
# Used to detect tables in queries in the publication.
query = 'SELECT CDB_QueryTablesText($q${}$q$) as tables'.format(query)
result = self.execute_query(query)
tables = []
if result['total_rows'] > 0 and result['rows'][0]['tables']:
# Dataset_info only works with tables without schema
tables = [table.split('.')[1] if '.' in table else table for table in result['rows'][0]['tables']]
return tables
def _compare_columns(self, a, b):
a_copy = [i for i in a if _not_reserved(i.name)]
b_copy = [i for i in b if _not_reserved(i.name)]
a_copy.sort()
b_copy.sort()
return a_copy == b_copy
def _drop_create_table_from_query(self, table_name, schema, query):
log.debug('DROP + CREATE table "{}"'.format(table_name))
query = 'BEGIN; {drop}; {create}; COMMIT;'.format(
drop=_drop_table_query(table_name),
create=_create_table_from_query_query(table_name, query))
self.execute_long_running_query(query)
def _create_table_from_columns(self, table_name, schema, columns):
log.debug('CREATE table "{}"'.format(table_name))
query = 'BEGIN; {create}; COMMIT;'.format(
create=_create_table_from_columns_query(table_name, columns))
self.execute_query(query)
def _truncate_table(self, table_name, schema):
log.debug('TRUNCATE table "{}"'.format(table_name))
query = 'BEGIN; {truncate}; COMMIT;'.format(
truncate=_truncate_table_query(table_name))
self.execute_query(query)
def _truncate_and_drop_add_columns(self, table_name, schema, df_columns, table_columns):
log.debug('TRUNCATE AND DROP + ADD columns table "{}"'.format(table_name))
drop_columns = _drop_columns_query(table_name, table_columns)
add_columns = _add_columns_query(table_name, df_columns)
drop_add_columns = 'ALTER TABLE {table_name} {drop_columns},{add_columns};'.format(
table_name=table_name, drop_columns=drop_columns, add_columns=add_columns)
query = '{regenerate}; BEGIN; {truncate}; {drop_add_columns}; COMMIT;'.format(
regenerate=_regenerate_table_query(table_name, schema) if self._check_regenerate_table_exists() else '',
truncate=_truncate_table_query(table_name),
drop_add_columns=drop_add_columns)
query_length_over_threshold = len(query) > BATCH_API_PAYLOAD_THRESHOLD
if query_length_over_threshold:
qualified_func_name = self._create_function(
schema=schema, statement=drop_add_columns)
drop_add_func_sql = 'SELECT {}'.format(qualified_func_name)
query = '''
{regenerate};
BEGIN;
{truncate};
{drop_add_func_sql};
COMMIT;'''.format(
regenerate=_regenerate_table_query(
table_name, schema) if self._check_regenerate_table_exists() else '',
truncate=_truncate_table_query(table_name),
drop_add_func_sql=drop_add_func_sql)
try:
self.execute_long_running_query(query)
finally:
if query_length_over_threshold:
self._delete_function(qualified_func_name)
def compute_query(self, source, schema=None):
if is_sql_query(source):
return source
schema = schema or self.get_schema()
return self._compute_query_from_table(source, schema)
def _compute_query_from_table(self, table_name, schema):
return 'SELECT * FROM "{schema}"."{table_name}"'.format(
schema=schema or 'public',
table_name=table_name
)
def _check_exists(self, query):
exists_query = 'EXPLAIN {}'.format(query)
try:
self.execute_query(exists_query, do_post=False)
return True
except CartoException:
return False
def _check_regenerate_table_exists(self):
query = '''
SELECT 1
FROM pg_catalog.pg_proc p
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
WHERE p.proname = 'cdb_regeneratetable' AND n.nspname = 'cartodb';
'''
result = self.execute_query(query)
return len(result['rows']) > 0
def _get_query_columns_info(self, query):
query = 'SELECT * FROM ({}) _q LIMIT 0'.format(query)
table_info = self.execute_query(query)
return get_query_columns_info(table_info['fields'])
def _get_copy_query(self, query, columns, limit):
query_columns = [
double_quote(column.name) for column in columns
if (column.name != 'the_geom_webmercator')
]
query = 'SELECT {columns} FROM ({query}) _q'.format(
query=query,
columns=','.join(query_columns))
if limit is not None:
if isinstance(limit, int) and (limit >= 0):
query += ' LIMIT {limit}'.format(limit=limit)
else:
raise ValueError("`limit` parameter must an integer >= 0")
return query
@retry_copy
def _copy_to(self, query, columns, retry_times=DEFAULT_RETRY_TIMES):
log.debug('COPY TO')
copy_query = "COPY ({0}) TO stdout WITH (FORMAT csv, HEADER true, NULL '{1}')".format(query, PG_NULL)
raw_result = self.copy_client.copyto_stream(copy_query)
converters = obtain_converters(columns)
parse_dates = date_columns_names(columns)
df = pd.read_csv(
raw_result,
converters=converters,
parse_dates=parse_dates)
return df
@retry_copy
def _copy_from(self, dataframe, table_name, columns, retry_times=DEFAULT_RETRY_TIMES):
log.debug('COPY FROM')
query = """
COPY {table_name}({columns}) FROM stdin WITH (FORMAT csv, DELIMITER '|', NULL '{null}');
""".format(
table_name=table_name, null=PG_NULL,
columns=','.join(double_quote(column.dbname) for column in columns)).strip()
data = _compute_copy_data(dataframe, columns)
self.copy_client.copyfrom(query, data)
def _rename_table(self, table_name, new_table_name):
query = _rename_table_query(table_name, new_table_name)
self.execute_query(query)
def normalize_table_name(self, table_name):
norm_table_name = normalize_name(table_name)
if norm_table_name != table_name:
log.debug('Table name normalized: "{}"'.format(norm_table_name))
return norm_table_name
def _drop_table_query(table_name, if_exists=True):
return 'DROP TABLE {if_exists} {table_name}'.format(
table_name=table_name,
if_exists='IF EXISTS' if if_exists else '')
def _drop_function_query(function_name, columns_types=None, if_exists=True):
if columns_types and not isinstance(columns_types, dict):
raise ValueError('The columns_types parameter should be a dictionary of column names and types.')
columns_types = columns_types or {}
columns = ['{0} {1}'.format(cname, ctype) for cname, ctype in columns_types.items()]
columns_str = ','.join(columns)
return 'DROP FUNCTION {if_exists} {function_name}{columns_str_call}'.format(
function_name=function_name,
if_exists='IF EXISTS' if if_exists else '',
columns_str_call='({columns_str})'.format(columns_str=columns_str) if columns else '')
def _truncate_table_query(table_name):
return 'TRUNCATE TABLE {table_name}'.format(
table_name=table_name)
def _create_function_query(schema, function_name, statement, columns_types, return_value, language):
if columns_types and not isinstance(columns_types, dict):
raise ValueError('The columns_types parameter should be a dictionary of column names and types.')
columns_types = columns_types or {}
columns = ['{0} {1}'.format(cname, ctype) for cname, ctype in columns_types.items()]
columns_str = ','.join(columns) if columns else ''
function_query = '''
CREATE FUNCTION {schema}.{function_name}({columns_str})
RETURNS {return_value} AS $$
BEGIN
{statement}
END;
$$ LANGUAGE {language}
'''.format(schema=schema,
function_name=function_name,
statement=statement,
columns_str=columns_str,
return_value=return_value,
language=language)
qualified_func_name = '{schema}.{function_name}({columns_str})'.format(
schema=schema, function_name=function_name, columns_str=columns_str)
return function_query, qualified_func_name
def _drop_columns_query(table_name, columns):
columns = ['DROP COLUMN {name}'.format(name=double_quote(c.dbname))
for c in columns if _not_reserved(c.dbname)]
return ','.join(columns)
def _add_columns_query(table_name, columns):
columns = ['ADD COLUMN {name} {type}'.format(name=double_quote(c.dbname), type=c.dbtype)
for c in columns if _not_reserved(c.dbname)]
return ','.join(columns)
def _not_reserved(column):
RESERVED_COLUMNS = ['cartodb_id', 'the_geom', 'the_geom_webmercator']
return column not in RESERVED_COLUMNS
def _create_table_from_columns_query(table_name, columns):
columns = ['{name} {type}'.format(name=double_quote(c.dbname), type=c.dbtype) for c in columns]
return 'CREATE TABLE {table_name} ({columns})'.format(
table_name=table_name,
columns=','.join(columns))
def _create_table_from_query_query(table_name, query):
return 'CREATE TABLE {table_name} AS ({query})'.format(table_name=table_name, query=query)
def _cartodbfy_query(table_name, schema):
return "SELECT CDB_CartodbfyTable('{schema}', '{table_name}')".format(
schema=schema, table_name=table_name)
def _regenerate_table_query(table_name, schema):
return "SELECT CDB_RegenerateTable('{schema}.{table_name}'::regclass)".format(
schema=schema, table_name=table_name)
def _rename_table_query(table_name, new_table_name):
return 'ALTER TABLE {table_name} RENAME TO {new_table_name};'.format(
table_name=table_name, new_table_name=new_table_name)
def _create_auth_client(credentials, public=False):
return APIKeyAuthClient(
base_url=credentials.base_url,
api_key='default_public' if public else credentials.api_key,
session=credentials.session,
client_id='cartoframes_{}'.format(__version__),
user_agent='cartoframes_{}'.format(__version__))
def _compute_copy_data(df, columns):
for index in df.index:
row_data = []
for column in columns:
val = df.at[index, column.name]
if column.is_geom:
val = encode_geometry_ewkb(val)
row_data.append(encode_row(val))
csv_row = b'|'.join(row_data)
csv_row += b'\n'
yield csv_row
| bsd-3-clause | -1,297,629,793,458,669,800 | 39.282648 | 120 | 0.60445 | false |
hwjworld/xiaodun-platform | lms/djangoapps/wechat/views.py | 1 | 47459 | import logging
import urllib
from collections import defaultdict
from lxml import html
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from edxmako.shortcuts import render_to_response, render_to_string
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
import django.utils
from courseware import grades
from courseware.access import has_access
from courseware.courses import (get_courses, get_course_with_access, sort_by_announcement, get_course_info_section,
get_course_by_id, get_course, course_image_url, get_course_about_section, get_courses_by_search)
import courseware.tabs as tabs
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor,mobi_toc_for_course
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from student.models import UserTestGroup, CourseEnrollment
from student.views import course_from_id, single_course_reverification_info
from util.cache import cache, cache_if_anonymous
from util.json_request import JsonResponse
from xblock.fragment import Fragment
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore, loc_mapper
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.course_module import CourseDescriptor
from xmodule.contentstore.content import StaticContent
import shoppingcart
from microsite_configuration import microsite
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
#@ensure_csrf_cookie
#@cache_if_anonymous
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
q = request.GET.get('query', '')
courses_aa = get_courses_by_search(request.META.get('HTTP_HOST'))
courses_list = []
if q != "":
for course in courses_aa:
if q in course.org or q in course.id or q in course.display_name_with_default:
courses_list.append(course)
else:
continue
else:
courses_list = courses_aa
courses = sort_by_announcement(courses_list)
return render_to_response("courseware/courses.html", {'courses': courses})
def return_fixed_courses(request, courses, user=AnonymousUser(), action=None):
default_length = 8
course_id = request.GET.get("course_id")
if course_id:
course_id = course_id.replace(".", '/')
try:
index_course = get_course_by_id(course_id)
course_index = (courses.index(index_course) + 1)
except:
course_index = 0
current_list = courses[course_index:]
if len(current_list) > default_length:
current_list = current_list[course_index:(course_index + 8)]
course_list = []
for course in current_list:
try:
course_json = mobi_course_info(request, course, action)
course_json["registered"] = registered_for_course(course, user)
course_list.append(course_json)
except:
continue
return JsonResponse({"count": len(courses), "course-list": course_list})
def courses_list_handler(request, action):
"""
Return courses based on request params
"""
try:
user = request.user
except:
user = AnonymousUser()
if action not in ["homefalls", "all", "hot", "latest", "my", "search", "rolling"]:
return JsonResponse({"success": False, "errmsg": "not support other actions except homefalls all hot latest rolling and my"})
def get_courses_depend_action():
"""
Return courses depend on action
action: [homefalls, hot, lastest, my, search]
homefalls: get all courses
hot: Number of attended people > ?
lastest: News last week
my: I registered
all: like 'homefalls'
"""
courses = get_courses(user, request.META.get('HTTP_HOST'))
courses = sort_by_announcement(courses)
courses_list = []
if action == "latest":
default_count = 20
if len(courses) < default_count:
default_count = len(courses)
courses_list = courses[0:default_count]
elif action == "my":
# filter my registered courses
for course in courses:
if registered_for_course(course, user):
courses_list.append(course)
elif action == "rolling":
default_count = 5
courses_list = courses[0:default_count]
elif action == 'search':
keyword = request.GET.get("keyword")
if keyword:
for c in courses:
print (keyword in c.org or keyword in c.id or keyword in c.display_name_with_default)
if keyword in c.org or keyword in c.id or keyword in c.display_name_with_default:
courses_list.append(c)
else:
courses_list = courses
return courses_list
courses = get_courses_depend_action()
# get_courses_depend_action()
return return_fixed_courses(request, courses, user, action)
def _course_json(course, course_id):
locator = loc_mapper().translate_location(course_id, course.location, published=False, add_entry_if_missing=True)
is_container = course.has_children
result = {
'display_name': course.display_name,
'id': unicode(locator),
'category': course.category,
'is_draft': getattr(course, 'is_draft', False),
'is_container': is_container
}
if is_container:
result['children'] = [_course_json(child, course_id) for child in course.get_children()]
category = result['category']
if result['category'] == 'video':
result[category + '-url'] = "http://www.diandiyun.com/Clip_480_5sec_6mbps_h264.mp4"
elif result['category'] == 'problem':
result[category + '-url'] = "http://music.163.com/"
return result
def mobi_course_info(request, course, action=None):
course_logo = course_image_url(course)
imgurl = course_logo
if action in ["homefalls", "all", "hot", "latest", "my", "search"]:
try:
course_mini_info = course.id.split('/')
asset_location = StaticContent.compute_location(course_mini_info[0], course_mini_info[1], 'mobi-logo-img.jpg')
imgurl = StaticContent.get_url_path_from_location(asset_location)
except:
print "=========================fail load mobi image==============================="
print "We will load this info to log"
return {
"id": course.id.replace('/', '.'),
"name": course.display_name_with_default,
"logo": request.get_host() + course_image_url(course),
"org": course.display_org_with_default,
"course_number": course.display_number_with_default,
"start_date": course.start.strftime("%Y-%m-%d"),
"about": get_course_about_section(course, 'short_description'),
"category": course.category,
"imgurl": request.get_host() + imgurl
}
def _course_info_content(html_parsed):
"""
Constructs the HTML for the course info update, not including the header.
"""
if len(html_parsed) == 1:
# could enforce that update[0].tag == 'h2'
content = html_parsed[0].tail
else:
content = html_parsed[0].tail if html_parsed[0].tail is not None else ""
content += "\n".join([html.tostring(ele) for ele in html_parsed[1:]])
return content
def parse_updates_html_str(html_str):
try:
course_html_parsed = html.fromstring(html_str)
except:
escaped = django.utils.html.eacape(html_str)
course_html_parsed = html.fromstring(escaped)
course_upd_collection = []
if course_html_parsed.tag == 'section':
for index, update in enumerate(course_html_parsed):
if len(update) > 0:
content = _course_info_content(update)
computer_id = len(course_html_parsed) - index
payload = {
"id": computer_id,
"date": update.findtext("h2"),
"content": content
}
course_upd_collection.append(payload)
return {"updates": course_upd_collection}
def mobi_course_action(request, course_id, action):
try:
course_id_bak = course_id.replace('.', '/')
if action in ["updates", "handouts", "structure"]:
course = get_course_with_access(request.user, course_id_bak, 'see_exists')
user = request.user
if not user:
user = AnonymousUser()
registered = registered_for_course(course, user)
if action == "updates" and registered:
course_updates = get_course_info_section(request, course, action)
return JsonResponse(parse_updates_html_str(course_updates))
elif action == "handouts" and registered:
course_handouts = get_course_info_section(request, course, action)
return JsonResponse({"handouts": course_handouts})
elif action == "structure":
return JsonResponse(_course_json(course, course.location.course_id))
else:
raise Exception
else:
course = get_course_with_access(request.user, course_id_bak, 'see_exists')
return JsonResponse(mobi_course_info(request, course))
except:
return JsonResponse({"success": False, "errmsg": "access denied!"})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first child.
Returns None only if there are no children at all.
"""
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
pos = 0
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# Something is wrong. Default to first child
child = children[0]
else:
child = None
return child
def redirect_to_course_position(course_module):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id}
chapter = get_current_child(course_module)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.url_name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:[email protected]/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module_for_descriptor(request.user,
request,
section_descriptor,
section_field_data_cache,
course_id,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render('student_view')
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def mobi_index(request, course_id, chapter=None, section=None,
position=None):
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:[email protected]/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module_for_descriptor(request.user,
request,
section_descriptor,
section_field_data_cache,
course_id,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render('mobi_student_view')
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('wechat/mobi_courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
def mobi_directory(request, course_id):
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
motoc = mobi_toc_for_course(user, request, course)
show_list = list()
for toc in motoc:
videolist = toc['show_url'][0]
show_list.append(videolist)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': mobi_render_accordion(request, course),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:[email protected]/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
'show_url': show_list[0],
}
result = render_to_response('wechat/mobi_directory.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},".format(
user=user,
course=course,))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
def mobi_render_accordion(request, course):
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = mobi_toc_for_course(user, request, course)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)] + template_imports.items())
return render_to_string('wechat/mobi_accordion.html', context)
@ensure_csrf_cookie
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_location = CourseDescriptor.id_to_location(course_id)
items = modulestore().get_items(
Location('i4x', course_location.org, course_location.course, None, module_id),
course_id=course_id
)
if len(items) == 0:
raise Http404("Could not find id = {0} in course_id = {1}. Referer = {2}".
format(module_id, course_id, request.META.get("HTTP_REFERER", "")))
if len(items) > 1:
log.warning("Multiple items found with id = {0} in course_id = {1}. Referer = {2}. Using first found {3}...".
format(module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.url()))
return jump_to(request, course_id, items[0].location.url())
@ensure_csrf_cookie
def jump_to(request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
# Complain if the location isn't valid
try:
location = Location(location)
except InvalidLocationError:
raise Http404("Invalid location")
# Complain if there's not data for this location
try:
(course_id, chapter, section, position) = path_to_location(modulestore(), course_id, location)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(location))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(location))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_id)
elif section is None:
return redirect('courseware_chapter', course_id=course_id, chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position)
@ensure_csrf_cookie
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page
reverifications = fetch_reverify_banner_info(request, course_id)
context = {
'request': request,
'course_id': course_id,
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masq,
'reverifications': reverifications,
}
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
tab = tabs.get_static_tab_by_slug(course, tab_slug)
if tab is None:
raise Http404
contents = tabs.get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/static_tab.html',
{'course': course,
'tab': tab,
'tab_contents': contents,
'staff_access': staff_access, })
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/syllabus.html', {'course': course,
'staff_access': staff_access, })
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
@ensure_csrf_cookie
@cache_if_anonymous
def course_about(request, course_id):
if microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
):
raise Http404
course = get_course_with_access(request.user, course_id, 'see_exists')
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
if (settings.FEATURES.get('ENABLE_SHOPPING_CART') and
settings.FEATURES.get('ENABLE_PAID_COURSE_REGISTRATION')):
registration_price = CourseMode.min_course_price_for_currency(course_id,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_id)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=course.id)
# see if we have already filled up all allowed enrollments
is_course_full = CourseEnrollment.is_course_full(course)
return render_to_response('courseware/course_about.html',
{'course': course,
'registered': registered,
'course_target': course_target,
'registration_price': registration_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full})
@ensure_csrf_cookie
@cache_if_anonymous
def mktg_course_about(request, course_id):
"""
This is the button that gets put into an iframe on the Drupal site
"""
try:
course = get_course_with_access(request.user, course_id, 'see_exists')
except (ValueError, Http404) as e:
# if a course does not exist yet, display a coming
# soon button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_id}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
allow_registration = has_access(request.user, course, 'enroll')
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course(course.id)
return render_to_response(
'courseware/mktg_course_about.html',
{
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
)
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
with grades.manual_transaction():
return _progress(request, course_id, student_id)
def _progress(request, course_id, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, course_id, 'load', depth=None)
staff_access = has_access(request.user, course, 'staff')
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {
'course': course,
'courseware_summary': courseware_summary,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_id)
}
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def fetch_reverify_banner_info(request, course_id):
"""
Fetches needed context variable to display reverification banner in courseware
"""
reverifications = defaultdict(list)
user = request.user
if not user.id:
return reverifications
enrollment = CourseEnrollment.get_or_create_enrollment(request.user, course_id)
course = course_from_id(course_id)
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
return reverifications
@login_required
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(course_id=course_id,
module_state_key=location,
student_id=student.id)
except User.DoesNotExist:
return HttpResponse(escape("User {0} does not exist.".format(student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape("{0} has never accessed problem {1}".format(student_username, location)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_id
}
return render_to_response('courseware/submission_history.html', context)
def show_video(request):
showurl = request.GET.get("showurl","")
course_id = request.GET.get("course_id")
return render_to_response('wechat/mobi_video.html',{"showurl":showurl, "course_id": course_id}) | agpl-3.0 | -126,619,783,822,553,040 | 38.616027 | 133 | 0.61276 | false |
google/deepvariant | deepvariant/make_examples_test.py | 1 | 90136 | # Copyright 2020 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant.make_examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
import copy
import enum
import errno
import platform
import sys
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
import six
from tensorflow.python.platform import gfile
from third_party.nucleus.io import fasta
from third_party.nucleus.io import tfrecord
from third_party.nucleus.io import vcf
from third_party.nucleus.protos import reads_pb2
from third_party.nucleus.protos import reference_pb2
from third_party.nucleus.protos import variants_pb2
from third_party.nucleus.testing import test_utils
from third_party.nucleus.util import ranges
from third_party.nucleus.util import variant_utils
from third_party.nucleus.util import variantcall_utils
from third_party.nucleus.util import vcf_constants
from deepvariant import dv_constants
from deepvariant import make_examples
from deepvariant import testdata
from deepvariant import tf_utils
from deepvariant.labeler import variant_labeler
from deepvariant.protos import deepvariant_pb2
from deepvariant.protos import realigner_pb2
FLAGS = flags.FLAGS
# Dictionary mapping keys to decoders for decode_example function.
_EXAMPLE_DECODERS = {
'locus': tf_utils.example_locus,
'alt_allele_indices/encoded': tf_utils.example_alt_alleles_indices,
'image/encoded': tf_utils.example_encoded_image,
'variant/encoded': tf_utils.example_variant,
'variant_type': tf_utils.example_variant_type,
'label': tf_utils.example_label,
'image/format': tf_utils.example_image_format,
'image/shape': tf_utils.example_image_shape,
'sequencing_type': tf_utils.example_sequencing_type,
}
def decode_example(example):
"""Decodes a tf.Example from DeepVariant into a dict of Pythonic structures.
Args:
example: tf.Example proto. The example to make into a dictionary.
Returns:
A python dictionary with key/value pairs for each of the fields of example,
with each value decoded as needed into Python structures like protos, list,
etc.
Raises:
KeyError: If example contains a feature without a known decoder.
"""
as_dict = {}
for key in example.features.feature:
if key not in _EXAMPLE_DECODERS:
raise KeyError('Unexpected example key', key)
as_dict[key] = _EXAMPLE_DECODERS[key](example)
return as_dict
def setUpModule():
testdata.init()
def _make_contigs(specs):
"""Makes ContigInfo protos from specs.
Args:
specs: A list of 2- or 3-tuples. All tuples should be of the same length. If
2-element, these should be the name and length in basepairs of each
contig, and their pos_in_fasta will be set to their index in the list. If
the 3-element, the tuple should contain name, length, and pos_in_fasta.
Returns:
A list of ContigInfo protos, one for each spec in specs.
"""
if specs and len(specs[0]) == 3:
return [
reference_pb2.ContigInfo(name=name, n_bases=length, pos_in_fasta=i)
for name, length, i in specs
]
else:
return [
reference_pb2.ContigInfo(name=name, n_bases=length, pos_in_fasta=i)
for i, (name, length) in enumerate(specs)
]
def _from_literals_list(literals, contig_map=None):
"""Makes a list of Range objects from literals."""
return ranges.parse_literals(literals, contig_map)
def _from_literals(literals, contig_map=None):
"""Makes a RangeSet of intervals from literals."""
return ranges.RangeSet.from_regions(literals, contig_map)
def _sharded(basename, num_shards=None):
if num_shards:
return basename + '@' + str(num_shards)
else:
return basename
class TestConditions(enum.Enum):
"""Enum capturing what the test condition we're using."""
USE_BAM = 1
USE_CRAM = 2
USE_MULTI_BAMS = 3
class MakeExamplesEnd2EndTest(parameterized.TestCase):
# Golden sets are created with learning/genomics/internal/create_golden.sh
@parameterized.parameters(
# All tests are run with fast_pass_aligner enabled. There are no
# golden sets version for ssw realigner.
dict(mode='calling', num_shards=0),
dict(mode='calling', num_shards=3),
dict(
mode='training', num_shards=0, labeler_algorithm='haplotype_labeler'),
dict(
mode='training', num_shards=3, labeler_algorithm='haplotype_labeler'),
dict(
mode='training', num_shards=0,
labeler_algorithm='positional_labeler'),
dict(
mode='training', num_shards=3,
labeler_algorithm='positional_labeler'),
# The following tests are for CRAM input:
dict(
mode='calling', num_shards=0, test_condition=TestConditions.USE_CRAM),
dict(
mode='training',
num_shards=0,
test_condition=TestConditions.USE_CRAM,
labeler_algorithm='haplotype_labeler'),
# The following tests are for multiple BAM inputs:
dict(
mode='calling',
num_shards=0,
test_condition=TestConditions.USE_MULTI_BAMS),
dict(
mode='training',
num_shards=0,
test_condition=TestConditions.USE_MULTI_BAMS,
labeler_algorithm='haplotype_labeler'),
)
@flagsaver.flagsaver
def test_make_examples_end2end(self,
mode,
num_shards,
test_condition=TestConditions.USE_BAM,
labeler_algorithm=None,
use_fast_pass_aligner=True):
self.assertIn(mode, {'calling', 'training'})
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.write_run_info = True
FLAGS.ref = testdata.CHR20_FASTA
if test_condition == TestConditions.USE_BAM:
FLAGS.reads = testdata.CHR20_BAM
elif test_condition == TestConditions.USE_CRAM:
FLAGS.reads = testdata.CHR20_CRAM
elif test_condition == TestConditions.USE_MULTI_BAMS:
FLAGS.reads = ','.join(
[testdata.CHR20_BAM_FIRST_HALF, testdata.CHR20_BAM_SECOND_HALF])
FLAGS.candidates = test_utils.test_tmpfile(
_sharded('vsc.tfrecord', num_shards))
FLAGS.examples = test_utils.test_tmpfile(
_sharded('examples.tfrecord', num_shards))
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.partition_size = 1000
FLAGS.mode = mode
FLAGS.gvcf_gq_binsize = 5
FLAGS.use_fast_pass_aligner = use_fast_pass_aligner
if labeler_algorithm is not None:
FLAGS.labeler_algorithm = labeler_algorithm
if mode == 'calling':
FLAGS.gvcf = test_utils.test_tmpfile(
_sharded('gvcf.tfrecord', num_shards))
else:
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
for task_id in range(max(num_shards, 1)):
FLAGS.task = task_id
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
# Check that our run_info proto contains the basic fields we'd expect:
# (a) our options are written to the run_info.options field.
run_info = make_examples.read_make_examples_run_info(
options.run_info_filename)
self.assertEqual(run_info.options, options)
# (b) run_info.resource_metrics is present and contains our hostname.
self.assertTrue(run_info.HasField('resource_metrics'))
self.assertEqual(run_info.resource_metrics.host_name, platform.node())
# Test that our candidates are reasonable, calling specific helper functions
# to check lots of properties of the output.
candidates = sorted(
tfrecord.read_tfrecords(
FLAGS.candidates, proto=deepvariant_pb2.DeepVariantCall),
key=lambda c: variant_utils.variant_range_tuple(c.variant))
self.verify_deepvariant_calls(candidates, options)
self.verify_variants([call.variant for call in candidates],
region,
options,
is_gvcf=False)
# Verify that the variants in the examples are all good.
examples = self.verify_examples(
FLAGS.examples, region, options, verify_labels=mode == 'training')
example_variants = [tf_utils.example_variant(ex) for ex in examples]
self.verify_variants(example_variants, region, options, is_gvcf=False)
# Verify the integrity of the examples and then check that they match our
# golden labeled examples. Note we expect the order for both training and
# calling modes to produce deterministic order because we fix the random
# seed.
if mode == 'calling':
golden_file = _sharded(testdata.GOLDEN_CALLING_EXAMPLES, num_shards)
else:
golden_file = _sharded(testdata.GOLDEN_TRAINING_EXAMPLES, num_shards)
self.assertDeepVariantExamplesEqual(
examples, list(tfrecord.read_tfrecords(golden_file)))
if mode == 'calling':
nist_reader = vcf.VcfReader(testdata.TRUTH_VARIANTS_VCF)
nist_variants = list(nist_reader.query(region))
self.verify_nist_concordance(example_variants, nist_variants)
# Check the quality of our generated gvcf file.
gvcfs = variant_utils.sorted_variants(
tfrecord.read_tfrecords(FLAGS.gvcf, proto=variants_pb2.Variant))
self.verify_variants(gvcfs, region, options, is_gvcf=True)
self.verify_contiguity(gvcfs, region)
gvcf_golden_file = _sharded(testdata.GOLDEN_POSTPROCESS_GVCF_INPUT,
num_shards)
expected_gvcfs = list(
tfrecord.read_tfrecords(gvcf_golden_file, proto=variants_pb2.Variant))
# Despite the name, assertCountEqual checks that all elements match.
self.assertCountEqual(gvcfs, expected_gvcfs)
if (mode == 'training' and num_shards == 0 and
labeler_algorithm != 'positional_labeler'):
# The positional labeler doesn't track metrics, so don't try to read them
# in when that's the mode.
self.assertEqual(
make_examples.read_make_examples_run_info(
testdata.GOLDEN_MAKE_EXAMPLES_RUN_INFO).labeling_metrics,
run_info.labeling_metrics)
@flagsaver.flagsaver
def test_make_examples_end2end_failed_on_mismatched_multi_bam(self):
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.write_run_info = True
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = ','.join([testdata.CHR20_BAM, testdata.NOCHR20_BAM])
FLAGS.candidates = test_utils.test_tmpfile(
_sharded('mismatched_multi_bam.vsc.tfrecord'))
FLAGS.examples = test_utils.test_tmpfile(
_sharded('mismatched_multi_bam.examples.tfrecord'))
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.partition_size = 1000
FLAGS.mode = 'calling'
FLAGS.gvcf_gq_binsize = 5
options = make_examples.default_options(add_flags=True)
# This shows an example of what the error message looks like:
with six.assertRaisesRegex(
self, ValueError, 'Not found: Unknown reference_name '
'reference_name: "chr20" start: 9999999 end: 10000999\n'
'The region chr20:10000000-10000999 does not exist in '
'.*HG002_NIST_150bp_downsampled_30x.chr20.10_10p1mb.bam.'):
make_examples.make_examples_runner(options)
@flagsaver.flagsaver
def test_make_examples_end2end_failed_on_cram(self):
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.use_ref_for_cram = False
FLAGS.write_run_info = True
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_CRAM
FLAGS.candidates = test_utils.test_tmpfile(_sharded('failed.vsc.tfrecord'))
FLAGS.examples = test_utils.test_tmpfile(
_sharded('failed.examples.tfrecord'))
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.partition_size = 1000
FLAGS.mode = 'calling'
FLAGS.gvcf_gq_binsize = 5
options = make_examples.default_options(add_flags=True)
with six.assertRaisesRegex(self, ValueError,
'Failed to parse BAM/CRAM file.'):
make_examples.make_examples_runner(options)
# Golden sets are created with learning/genomics/internal/create_golden.sh
@flagsaver.flagsaver
def test_make_examples_training_end2end_with_customized_classes_labeler(self):
FLAGS.labeler_algorithm = 'customized_classes_labeler'
FLAGS.customized_classes_labeler_classes_list = 'ref,class1,class2'
FLAGS.customized_classes_labeler_info_field_name = 'type'
region = ranges.parse_literal('chr20:10,000,000-10,004,000')
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.candidates = test_utils.test_tmpfile(_sharded('vsc.tfrecord'))
FLAGS.examples = test_utils.test_tmpfile(_sharded('examples.tfrecord'))
FLAGS.partition_size = 1000
FLAGS.mode = 'training'
FLAGS.gvcf_gq_binsize = 5
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF_WITH_TYPES
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
golden_file = _sharded(testdata.CUSTOMIZED_CLASSES_GOLDEN_TRAINING_EXAMPLES)
# Verify that the variants in the examples are all good.
examples = self.verify_examples(
FLAGS.examples, region, options, verify_labels=True)
self.assertDeepVariantExamplesEqual(
examples, list(tfrecord.read_tfrecords(golden_file)))
# Golden sets are created with learning/genomics/internal/create_golden.sh
@parameterized.parameters(
dict(mode='calling'),
dict(mode='training'),
)
@flagsaver.flagsaver
def test_make_examples_end2end_vcf_candidate_importer(self, mode):
FLAGS.variant_caller = 'vcf_candidate_importer'
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.candidates = test_utils.test_tmpfile(
_sharded('vcf_candidate_importer.{}.tfrecord'.format(mode)))
FLAGS.examples = test_utils.test_tmpfile(
_sharded('vcf_candidate_importer.examples.{}.tfrecord'.format(mode)))
FLAGS.mode = mode
if mode == 'calling':
golden_file = _sharded(
testdata.GOLDEN_VCF_CANDIDATE_IMPORTER_CALLING_EXAMPLES)
FLAGS.proposed_variants = testdata.VCF_CANDIDATE_IMPORTER_VARIANTS
# Adding the following flags to match how the testdata was created.
FLAGS.regions = 'chr20:59,777,000-60,000,000'
FLAGS.realign_reads = False
else:
golden_file = _sharded(
testdata.GOLDEN_VCF_CANDIDATE_IMPORTER_TRAINING_EXAMPLES)
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
# Verify that the variants in the examples are all good.
examples = self.verify_examples(
FLAGS.examples, None, options, verify_labels=mode == 'training')
self.assertDeepVariantExamplesEqual(
examples, list(tfrecord.read_tfrecords(golden_file)))
self.assertEqual(decode_example(examples[0])['image/shape'], [100, 221, 6])
@flagsaver.flagsaver
def test_make_examples_training_vcf_candidate_importer_regions(self):
"""Confirms confident_regions is used in vcf_candidate_importer training."""
def _get_examples(use_confident_regions=False):
# `flag_name` can be either 'confident_regions' or 'regions'. Both should
# be used to constrain the set of candidates generated, and as a result
# generating the same examples.
bed_path = test_utils.test_tmpfile('vcf_candidate_importer.bed')
with gfile.Open(bed_path, 'w') as fout:
fout.write('\t'.join(['chr20', '10000000', '10001000']) + '\n')
if use_confident_regions:
FLAGS.confident_regions = bed_path
FLAGS.regions = ''
else:
FLAGS.confident_regions = ''
FLAGS.regions = bed_path
FLAGS.examples = test_utils.test_tmpfile(
_sharded('vcf_candidate_importer.tfrecord'))
FLAGS.mode = 'training'
FLAGS.reads = testdata.CHR20_BAM
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.variant_caller = 'vcf_candidate_importer'
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
# Verify that the variants in the examples are all good.
examples = self.verify_examples(
FLAGS.examples, None, options, verify_labels=False)
return examples
examples_with_regions = _get_examples(use_confident_regions=False)
examples_with_confident_regions = _get_examples(use_confident_regions=True)
self.assertNotEmpty(examples_with_regions)
self.assertDeepVariantExamplesEqual(examples_with_regions,
examples_with_confident_regions)
# Golden sets are created with learning/genomics/internal/create_golden.sh
@parameterized.parameters(
dict(alt_align='rows', expected_shape=[300, 221, 6]),
dict(alt_align='diff_channels', expected_shape=[100, 221, 8]),
)
@flagsaver.flagsaver
def test_make_examples_training_end2end_with_alt_aligned_pileup(
self, alt_align, expected_shape):
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.candidates = test_utils.test_tmpfile(_sharded('vsc.tfrecord'))
FLAGS.examples = test_utils.test_tmpfile(_sharded('examples.tfrecord'))
FLAGS.partition_size = 1000
FLAGS.mode = 'training'
FLAGS.gvcf_gq_binsize = 5
FLAGS.alt_aligned_pileup = alt_align # This is the only input change.
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
options = make_examples.default_options(add_flags=True)
# Run make_examples with the flags above.
make_examples.make_examples_runner(options)
# Check the output for shape and against the golden file.
if alt_align == 'rows':
golden_file = _sharded(testdata.ALT_ALIGNED_ROWS_EXAMPLES)
elif alt_align == 'diff_channels':
golden_file = _sharded(testdata.ALT_ALIGNED_DIFF_CHANNELS_EXAMPLES)
else:
raise ValueError("Golden data doesn't exist for this alt_align option: "
'{}'.format(alt_align))
# Verify that the variants in the examples are all good.
examples = self.verify_examples(
FLAGS.examples, region, options, verify_labels=True)
self.assertDeepVariantExamplesEqual(
examples, list(tfrecord.read_tfrecords(golden_file)))
# Pileup image should have 3 rows of height 100, so resulting height is 300.
self.assertEqual(decode_example(examples[0])['image/shape'], expected_shape)
@flagsaver.flagsaver
def test_make_examples_runtime_runtime_by_region(self):
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.mode = 'calling'
num_shards = 4
FLAGS.examples = test_utils.test_tmpfile(
_sharded('examples.tfrecord', num_shards))
# Use same number of shards for profiling files as examples.
output_prefix = test_utils.test_tmpfile('runtime_profile')
FLAGS.runtime_by_region = output_prefix + '@{}'.format(num_shards)
FLAGS.task = 2
# Run make_examples with those FLAGS.
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
# Sharded output ending in @4 becomes -00002-of-00004 for task 2.
expected_output_path = output_prefix + '-0000{}-of-00004'.format(FLAGS.task)
expected_columns = [
'region', 'get reads', 'find candidates', 'make pileup images',
'write outputs', 'num reads', 'num candidates', 'num examples'
]
with gfile.Open(expected_output_path, 'r') as fin:
header = fin.readline()
column_names = header.strip().split('\t')
self.assertEqual(expected_columns, column_names)
non_header_lines = fin.readlines()
self.assertLen(non_header_lines, 3)
one_row = non_header_lines[0].strip().split('\t')
self.assertEqual(len(one_row), len(column_names))
self.assertGreater(int(one_row[5]), 0, msg='num reads > 0')
self.assertGreater(int(one_row[6]), 0, msg='num candidates > 0')
self.assertGreater(int(one_row[7]), 0, msg='num examples > 0')
@parameterized.parameters(
dict(select_types=None, expected_count=78),
dict(select_types='all', expected_count=78),
dict(select_types='snps', expected_count=62),
dict(select_types='indels', expected_count=12),
dict(select_types='snps indels', expected_count=74),
dict(select_types='multi-allelics', expected_count=4),
)
@flagsaver.flagsaver
def test_make_examples_with_variant_selection(self, select_types,
expected_count):
if select_types is not None:
FLAGS.select_variant_types = select_types
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.candidates = test_utils.test_tmpfile(_sharded('vsc.tfrecord'))
FLAGS.examples = test_utils.test_tmpfile(_sharded('examples.tfrecord'))
FLAGS.partition_size = 1000
FLAGS.mode = 'calling'
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
candidates = list(tfrecord.read_tfrecords(FLAGS.candidates))
self.assertLen(candidates, expected_count)
def verify_nist_concordance(self, candidates, nist_variants):
# Tests that we call almost all of the real variants (according to NIST's
# Genome in a Bottle callset for NA12878) in our candidate callset.
# Tests that we don't have an enormous number of FP calls. We should have
# no more than 5x (arbitrary) more candidate calls than real calls. If we
# have more it's likely due to some major pipeline problem.
self.assertLess(len(candidates), 5 * len(nist_variants))
tp_count = 0
for nist_variant in nist_variants:
if self.assertVariantIsPresent(nist_variant, candidates):
tp_count = tp_count + 1
self.assertGreater(
tp_count / len(nist_variants), 0.983,
'Recall must be greater than 0.983. TP={}, Truth variants={}'.format(
tp_count, len(nist_variants)))
def assertDeepVariantExamplesEqual(self, actual, expected):
"""Asserts that actual and expected tf.Examples from DeepVariant are equal.
Args:
actual: iterable of tf.Examples from DeepVariant. DeepVariant examples
that we want to check.
expected: iterable of tf.Examples. Expected results for actual.
"""
self.assertEqual(len(actual), len(expected))
for i in range(len(actual)):
actual_example = decode_example(actual[i])
expected_example = decode_example(expected[i])
self.assertEqual(actual_example.keys(), expected_example.keys())
for key in actual_example:
self.assertEqual(actual_example[key], expected_example[key],
'Failed on %s' % key)
def assertVariantIsPresent(self, to_find, variants):
def variant_key(v):
return (v.reference_bases, v.start, v.end)
# Finds a call in our actual call set for each NIST variant, asserting
# that we found exactly one.
matches = [
variant for variant in variants
if variant_key(to_find) == variant_key(variant)
]
if not matches:
return False
# Verify that every alt allele appears in the call (but the call might)
# have more than just those calls.
for alt in to_find.alternate_bases:
if alt not in matches[0].alternate_bases:
return False
return True
def verify_variants(self, variants, region, options, is_gvcf):
# Verifies simple properties of the Variant protos in variants. For example,
# checks that the reference_name() is our expected chromosome. The flag
# is_gvcf determines how we check the VariantCall field of each variant,
# enforcing expectations for gVCF records if true or variant calls if false.
for variant in variants:
if region:
self.assertEqual(variant.reference_name, region.reference_name)
self.assertGreaterEqual(variant.start, region.start)
self.assertLessEqual(variant.start, region.end)
self.assertNotEqual(variant.reference_bases, '')
self.assertNotEmpty(variant.alternate_bases)
self.assertLen(variant.calls, 1)
call = variant_utils.only_call(variant)
self.assertEqual(call.call_set_name,
options.variant_caller_options.sample_name)
if is_gvcf:
# GVCF records should have 0/0 or ./. (un-called) genotypes as they are
# reference sites, have genotype likelihoods and a GQ value.
self.assertIn(list(call.genotype), [[0, 0], [-1, -1]])
self.assertLen(call.genotype_likelihood, 3)
self.assertGreaterEqual(variantcall_utils.get_gq(call), 0)
def verify_contiguity(self, contiguous_variants, region):
"""Verifies region is fully covered by gvcf records."""
# We expect that the intervals cover every base, so the first variant should
# be at our interval start and the last one should end at our interval end.
self.assertNotEmpty(contiguous_variants)
self.assertEqual(region.start, contiguous_variants[0].start)
self.assertEqual(region.end, contiguous_variants[-1].end)
# After this loop completes successfully we know that together the GVCF and
# Variants form a fully contiguous cover of our calling interval, as
# expected.
for v1, v2 in zip(contiguous_variants, contiguous_variants[1:]):
# Sequential variants should be contiguous, meaning that v2.start should
# be v1's end, as the end is exclusive and the start is inclusive.
if v1.start == v2.start and v1.end == v2.end:
# Skip duplicates here as we may have multi-allelic variants turning
# into multiple bi-allelic variants at the same site.
continue
# We expect to immediately follow the end of a gvcf record but to occur
# at the base immediately after a variant, since the variant's end can
# span over a larger interval when it's a deletion and we still produce
# gvcf records under the deletion.
expected_start = v1.end if v1.alternate_bases == ['<*>'] else v1.start + 1
self.assertEqual(v2.start, expected_start)
def verify_deepvariant_calls(self, dv_calls, options):
# Verifies simple structural properties of the DeepVariantCall objects
# emitted by the VerySensitiveCaller, such as that the AlleleCount and
# Variant both have the same position.
for call in dv_calls:
for alt_allele in call.variant.alternate_bases:
# Skip ref calls.
if alt_allele == vcf_constants.NO_ALT_ALLELE:
continue
# Make sure allele appears in our allele_support field and that at
# least our min number of reads to call an alt allele are present in
# the supporting reads list for that allele.
self.assertIn(alt_allele, list(call.allele_support))
self.assertGreaterEqual(
len(call.allele_support[alt_allele].read_names),
options.variant_caller_options.min_count_snps)
def verify_examples(self, examples_filename, region, options, verify_labels):
# Do some simple structural checks on the tf.Examples in the file.
expected_features = [
'variant/encoded', 'locus', 'image/format', 'image/encoded',
'alt_allele_indices/encoded'
]
if verify_labels:
expected_features += ['label']
examples = list(tfrecord.read_tfrecords(examples_filename))
for example in examples:
for label_feature in expected_features:
self.assertIn(label_feature, example.features.feature)
# pylint: disable=g-explicit-length-test
self.assertNotEmpty(tf_utils.example_alt_alleles_indices(example))
# Check that the variants in the examples are good.
variants = [tf_utils.example_variant(x) for x in examples]
self.verify_variants(variants, region, options, is_gvcf=False)
return examples
class MakeExamplesUnitTest(parameterized.TestCase):
def test_read_write_run_info(self):
def _read_lines(path):
with open(path) as fin:
return list(fin.readlines())
golden_actual = make_examples.read_make_examples_run_info(
testdata.GOLDEN_MAKE_EXAMPLES_RUN_INFO)
# We don't really want to inject too much knowledge about the golden right
# here, so we only use a minimal test that (a) the run_info_filename is
# a non-empty string and (b) the number of candidates sites in the labeling
# metrics field is greater than 0. Any reasonable golden output will have at
# least one candidate variant, and the reader should have filled in the
# value.
self.assertNotEmpty(golden_actual.options.run_info_filename)
self.assertEqual(golden_actual.labeling_metrics.n_candidate_variant_sites,
testdata.N_GOLDEN_TRAINING_EXAMPLES)
# Check that reading + writing the data produces the same lines:
tmp_output = test_utils.test_tmpfile('written_run_info.pbtxt')
make_examples.write_make_examples_run_info(golden_actual, tmp_output)
self.assertEqual(
_read_lines(testdata.GOLDEN_MAKE_EXAMPLES_RUN_INFO),
_read_lines(tmp_output))
@parameterized.parameters(
dict(
flag_value='CALLING',
expected=deepvariant_pb2.DeepVariantOptions.CALLING,
),
dict(
flag_value='TRAINING',
expected=deepvariant_pb2.DeepVariantOptions.TRAINING,
),
)
def test_parse_proto_enum_flag(self, flag_value, expected):
enum_pb2 = deepvariant_pb2.DeepVariantOptions.Mode
self.assertEqual(
make_examples.parse_proto_enum_flag(enum_pb2, flag_value), expected)
def test_parse_proto_enum_flag_error_handling(self):
with six.assertRaisesRegex(
self, ValueError,
'Unknown enum option "foo". Allowed options are CALLING,TRAINING'):
make_examples.parse_proto_enum_flag(
deepvariant_pb2.DeepVariantOptions.Mode, 'foo')
@flagsaver.flagsaver
def test_keep_duplicates(self):
FLAGS.keep_duplicates = True
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertEqual(options.pic_options.read_requirements.keep_duplicates,
True)
@flagsaver.flagsaver
def test_keep_supplementary_alignments(self):
FLAGS.keep_supplementary_alignments = True
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertEqual(
options.pic_options.read_requirements.keep_supplementary_alignments,
True)
@flagsaver.flagsaver
def test_keep_secondary_alignments(self):
FLAGS.keep_secondary_alignments = True
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertEqual(
options.pic_options.read_requirements.keep_secondary_alignments, True)
@flagsaver.flagsaver
def test_min_base_quality(self):
FLAGS.min_base_quality = 5
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertEqual(options.pic_options.read_requirements.min_base_quality, 5)
@flagsaver.flagsaver
def test_min_mapping_quality(self):
FLAGS.min_mapping_quality = 15
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertEqual(options.pic_options.read_requirements.min_mapping_quality,
15)
@flagsaver.flagsaver
def test_default_options_with_training_random_emit_ref_sites(self):
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
FLAGS.training_random_emit_ref_sites = 0.3
options = make_examples.default_options(add_flags=True)
self.assertAlmostEqual(
options.variant_caller_options.fraction_reference_sites_to_emit, 0.3)
@flagsaver.flagsaver
def test_default_options_without_training_random_emit_ref_sites(self):
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
# In proto3, there is no way to check presence of scalar field:
# redacted
# As an approximation, we directly check that the value should be exactly 0.
self.assertEqual(
options.variant_caller_options.fraction_reference_sites_to_emit, 0.0)
@flagsaver.flagsaver
def test_invalid_sequencing_type(self):
FLAGS.mode = 'training'
FLAGS.sequencing_type = 'wGs'
with self.assertRaises(ValueError):
make_examples.default_options(add_flags=True)
def test_extract_sample_name_from_reads_single_sample(self):
mock_sample_reader = mock.Mock()
mock_sample_reader.header = reads_pb2.SamHeader(
read_groups=[reads_pb2.ReadGroup(sample_id='sample_name')])
self.assertEqual(
make_examples.extract_sample_name_from_sam_reader(mock_sample_reader),
'sample_name')
@parameterized.parameters(
# No samples could be found in the reads.
dict(samples=[], expected_sample_name=dv_constants.DEFAULT_SAMPLE_NAME),
# Check that we detect an empty sample name and use default instead.
dict(samples=[''], expected_sample_name=dv_constants.DEFAULT_SAMPLE_NAME),
# We have more than one sample in the reads.
dict(samples=['sample1', 'sample2'], expected_sample_name='sample1'),
)
def test_extract_sample_name_from_reads_uses_default_when_necessary(
self, samples, expected_sample_name):
mock_sample_reader = mock.Mock()
mock_sample_reader.header = reads_pb2.SamHeader(read_groups=[
reads_pb2.ReadGroup(sample_id=sample) for sample in samples
])
self.assertEqual(
expected_sample_name,
make_examples.extract_sample_name_from_sam_reader(mock_sample_reader))
@flagsaver.flagsaver
def test_confident_regions(self):
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
confident_regions = make_examples.read_confident_regions(options)
# Our expected intervals, inlined from CONFIDENT_REGIONS_BED.
expected = _from_literals_list([
'chr20:10000847-10002407', 'chr20:10002521-10004171',
'chr20:10004274-10004964', 'chr20:10004995-10006386',
'chr20:10006410-10007800', 'chr20:10007825-10008018',
'chr20:10008044-10008079', 'chr20:10008101-10008707',
'chr20:10008809-10008897', 'chr20:10009003-10009791',
'chr20:10009934-10010531'
])
# Our confident regions should be exactly those found in the BED file.
six.assertCountEqual(self, expected, list(confident_regions))
@parameterized.parameters(
({
'examples': ('foo', 'foo')
},),
({
'examples': ('foo', 'foo'),
'gvcf': ('bar', 'bar')
},),
({
'examples': ('foo@10', 'foo-00000-of-00010')
},),
({
'task': (0, 0),
'examples': ('foo@10', 'foo-00000-of-00010')
},),
({
'task': (1, 1),
'examples': ('foo@10', 'foo-00001-of-00010')
},),
({
'task': (1, 1),
'examples': ('foo@10', 'foo-00001-of-00010'),
'gvcf': ('bar@10', 'bar-00001-of-00010')
},),
({
'task': (1, 1),
'examples': ('foo@10', 'foo-00001-of-00010'),
'gvcf': ('bar@10', 'bar-00001-of-00010'),
'candidates': ('baz@10', 'baz-00001-of-00010')
},),
)
@flagsaver.flagsaver
def test_sharded_outputs1(self, settings):
# Set all of the requested flag values.
for name, (flag_val, _) in settings.items():
setattr(FLAGS, name, flag_val)
FLAGS.mode = 'training'
FLAGS.reads = ''
FLAGS.ref = ''
options = make_examples.default_options(add_flags=True)
# Check all of the flags.
for name, option_val in [('examples', options.examples_filename),
('candidates', options.candidates_filename),
('gvcf', options.gvcf_filename)]:
expected = settings[name][1] if name in settings else ''
self.assertEqual(expected, option_val)
@flagsaver.flagsaver
def test_gvcf_output_enabled_is_false_without_gvcf_flag(self):
FLAGS.mode = 'training'
FLAGS.gvcf = ''
FLAGS.reads = ''
FLAGS.ref = ''
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertFalse(make_examples.gvcf_output_enabled(options))
@flagsaver.flagsaver
def test_gvcf_output_enabled_is_true_with_gvcf_flag(self):
FLAGS.mode = 'training'
FLAGS.gvcf = '/tmp/foo.vcf'
FLAGS.reads = ''
FLAGS.ref = ''
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertTrue(make_examples.gvcf_output_enabled(options))
@flagsaver.flagsaver
def test_add_supporting_other_alt_color(self):
FLAGS.mode = 'training'
FLAGS.gvcf = ''
FLAGS.reads = ''
FLAGS.ref = ''
FLAGS.examples = ''
FLAGS.add_supporting_other_alt_color = True
options = make_examples.default_options(add_flags=True)
self.assertAlmostEqual(
options.pic_options.other_allele_supporting_read_alpha, 0.3)
self.assertAlmostEqual(options.pic_options.allele_unsupporting_read_alpha,
0.6)
def test_validate_ref_contig_coverage(self):
ref_contigs = _make_contigs([('1', 100), ('2', 100)])
# Fully covered reference contigs don't trigger an error.
for threshold in [0.5, 0.9, 1.0]:
self.assertIsNone(
make_examples.validate_reference_contig_coverage(
ref_contigs, ref_contigs, threshold))
# No common contigs always blows up.
for threshold in [0.0, 0.1, 0.5, 0.9, 1.0]:
with six.assertRaisesRegex(self, ValueError, 'span 200'):
make_examples.validate_reference_contig_coverage(
ref_contigs, [], threshold)
# Dropping either contig brings up below our 0.9 threshold.
with six.assertRaisesRegex(self, ValueError, 'span 200'):
make_examples.validate_reference_contig_coverage(
ref_contigs, _make_contigs([('1', 100)]), 0.9)
with six.assertRaisesRegex(self, ValueError, 'span 200'):
make_examples.validate_reference_contig_coverage(
ref_contigs, _make_contigs([('2', 100)]), 0.9)
# Our actual overlap is 50%, so check that we raise when appropriate.
with six.assertRaisesRegex(self, ValueError, 'span 200'):
make_examples.validate_reference_contig_coverage(
ref_contigs, _make_contigs([('2', 100)]), 0.6)
self.assertIsNone(
make_examples.validate_reference_contig_coverage(
ref_contigs, _make_contigs([('2', 100)]), 0.4))
@parameterized.parameters(
# all intervals are shared.
([[('chrM', 10)], [('chrM', 10)]], [('chrM', 10)]),
# No common intervals.
([[('chrM', 10)], [('chr1', 10)]], []),
# The names are the same but sizes are different, so not common.
([[('chrM', 10)], [('chrM', 20)]], []),
# One common interval and one not.
([[('chrM', 10), ('chr1', 20)], [('chrM', 10),
('chr2', 30)]], [('chrM', 10)]),
# Check that the order doesn't matter.
([[('chr1', 20), ('chrM', 10)], [('chrM', 10),
('chr2', 30)]], [('chrM', 10, 1)]),
# Three-way merges.
([
[('chr1', 20), ('chrM', 10)],
[('chrM', 10), ('chr2', 30)],
[('chr2', 30), ('chr3', 30)],
], []),
)
def test_common_contigs(self, contigs_list, expected):
self.assertEqual(
_make_contigs(expected),
make_examples.common_contigs(
[_make_contigs(contigs) for contigs in contigs_list]))
@parameterized.parameters(
# Note that these tests aren't so comprehensive as we are trusting that
# the intersection code logic itself is good and well-tested elsewhere.
# Here we are focusing on some basic tests and handling of missing
# calling_region and confident_region data.
(['1:1-10'], ['1:1-10']),
(['1:1-100'], ['1:1-100']),
(['1:50-150'], ['1:50-100']),
(None, ['1:1-100', '2:1-200']),
(['1:20-50'], ['1:20-50']),
# Chr3 isn't part of our contigs; make sure we tolerate it.
(['1:20-30', '1:40-60', '3:10-50'], ['1:20-30', '1:40-60']),
# Check that we handle overlapping calling or confident regions.
(['1:25-30', '1:20-40'], ['1:20-40']),
)
def test_regions_to_process(self, calling_regions, expected):
contigs = _make_contigs([('1', 100), ('2', 200)])
six.assertCountEqual(
self, _from_literals_list(expected),
make_examples.regions_to_process(
contigs, 1000, calling_regions=_from_literals(calling_regions)))
@parameterized.parameters(
(50, None, [
'1:1-50', '1:51-100', '2:1-50', '2:51-76', '3:1-50', '3:51-100',
'3:101-121'
]),
(120, None, ['1:1-100', '2:1-76', '3:1-120', '3:121']),
(500, None, ['1:1-100', '2:1-76', '3:1-121']),
(10, ['1:1-20', '1:30-35'], ['1:1-10', '1:11-20', '1:30-35']),
(8, ['1:1-20', '1:30-35'], ['1:1-8', '1:9-16', '1:17-20', '1:30-35']),
)
def test_regions_to_process_partition(self, max_size, calling_regions,
expected):
contigs = _make_contigs([('1', 100), ('2', 76), ('3', 121)])
six.assertCountEqual(
self, _from_literals_list(expected),
make_examples.regions_to_process(
contigs, max_size, calling_regions=_from_literals(calling_regions)))
@parameterized.parameters(
dict(includes=[], excludes=[], expected=['1:1-100', '2:1-200']),
dict(includes=['1'], excludes=[], expected=['1:1-100']),
# Check that excludes work as expected.
dict(includes=[], excludes=['1'], expected=['2:1-200']),
dict(includes=[], excludes=['2'], expected=['1:1-100']),
dict(includes=[], excludes=['1', '2'], expected=[]),
# Check that excluding pieces works. The main checks on taking the
# difference between two RangeSets live in ranges.py so here we are just
# making sure some basic logic works.
dict(includes=['1'], excludes=['1:1-10'], expected=['1:11-100']),
# Check that includes and excludes work together.
dict(
includes=['1', '2'],
excludes=['1:5-10', '1:20-50', '2:10-20'],
expected=['1:1-4', '1:11-19', '1:51-100', '2:1-9', '2:21-200']),
dict(
includes=['1'],
excludes=['1:5-10', '1:20-50', '2:10-20'],
expected=['1:1-4', '1:11-19', '1:51-100']),
dict(
includes=['2'],
excludes=['1:5-10', '1:20-50', '2:10-20'],
expected=['2:1-9', '2:21-200']),
# A complex example of including and excluding.
dict(
includes=['1:10-20', '2:50-60', '2:70-80'],
excludes=['1:1-13', '1:19-50', '2:10-65'],
expected=['1:14-18', '2:70-80']),
)
def test_build_calling_regions(self, includes, excludes, expected):
contigs = _make_contigs([('1', 100), ('2', 200)])
actual = make_examples.build_calling_regions(contigs, includes, excludes)
six.assertCountEqual(self, actual, _from_literals_list(expected))
def test_regions_to_process_sorted_within_contig(self):
# These regions are out of order but within a single contig.
contigs = _make_contigs([('z', 100)])
in_regions = _from_literals(['z:15', 'z:20', 'z:6', 'z:25-30', 'z:3-4'])
sorted_regions = _from_literals_list(
['z:3-4', 'z:6', 'z:15', 'z:20', 'z:25-30'])
actual_regions = list(
make_examples.regions_to_process(
contigs, 100, calling_regions=in_regions))
# The assertEqual here is checking the order is exactly what we expect.
self.assertEqual(sorted_regions, actual_regions)
def test_regions_to_process_sorted_contigs(self):
# These contig names are out of order lexicographically.
contigs = _make_contigs([('z', 100), ('a', 100), ('n', 100)])
in_regions = _from_literals(['a:10', 'n:1', 'z:20', 'z:5'])
sorted_regions = _from_literals_list(['z:5', 'z:20', 'a:10', 'n:1'])
actual_regions = list(
make_examples.regions_to_process(
contigs, 100, calling_regions=in_regions))
# The assertEqual here is checking the order is exactly what we expect.
self.assertEqual(sorted_regions, actual_regions)
@parameterized.parameters([2, 3, 4, 5, 50])
def test_regions_to_process_sharding(self, num_shards):
"""Makes sure we deterministically split up regions."""
def get_regions(task_id, num_shards):
return make_examples.regions_to_process(
contigs=_make_contigs([('z', 100), ('a', 100), ('n', 100)]),
partition_size=5,
task_id=task_id,
num_shards=num_shards)
# Check that the regions are the same unsharded vs. sharded.
unsharded_regions = get_regions(0, 0)
sharded_regions = []
for task_id in range(num_shards):
task_regions = get_regions(task_id, num_shards)
sharded_regions.extend(task_regions)
six.assertCountEqual(self, unsharded_regions, sharded_regions)
@parameterized.parameters(
# Providing one of task id and num_shards but not the other is bad.
(None, 0),
(None, 2),
(2, None),
(0, None),
# Negative values are illegal.
(-1, 2),
(0, -2),
# task_id >= num_shards is bad.
(2, 2),
(3, 2),
)
def test_regions_to_process_fails_with_bad_shard_args(self, task, num_shards):
with self.assertRaises(ValueError):
make_examples.regions_to_process(
contigs=_make_contigs([('z', 100), ('a', 100), ('n', 100)]),
partition_size=10,
task_id=task,
num_shards=num_shards)
@parameterized.parameters(
# One variant in region.
(['x:100-200'], ['x:150-151'], [0]),
# Different chromosomes.
(['x:100-200'], ['y:150-151'], []),
# A variant at the beginning of a region.
(['x:100-200', 'x:201-300'], ['x:100-101'], [0]),
(['x:1-10', 'x:11-20', 'x:21-30'], ['x:11-12'], [1]),
# A variant before all the regions.
(['x:11-20', 'x:20-30'], ['x:1-2'], []),
# A variant after all the regions.
(['x:1-10', 'x:11-20', 'x:21-30'], ['x:40-50'], []),
# Multiple variants in the same region.
(['x:11-20', 'x:21-30'
], ['x:1-2', 'x:25-26', 'x:25-26', 'x:26-27', 'x:40-50'], [1]),
# A variant spanning multiple regions belongs where it starts.
(['x:1-10', 'x:11-20', 'x:21-30', 'x:31-40', 'x:41-50', 'x:51-60'
], ['x:15-66'], [1]),
)
def test_filter_regions_by_vcf(self, region_literals, variant_literals,
regions_to_keep):
regions = [ranges.parse_literal(l) for l in region_literals]
variant_positions = [ranges.parse_literal(l) for l in variant_literals]
output = make_examples.filter_regions_by_vcf(regions, variant_positions)
list_output = list(output)
list_expected = [regions[i] for i in regions_to_keep]
self.assertEqual(list_output, list_expected)
def test_catches_bad_argv(self):
with mock.patch.object(logging, 'error') as mock_logging,\
mock.patch.object(sys, 'exit') as mock_exit:
make_examples.main(['make_examples.py', 'extra_arg'])
mock_logging.assert_called_once_with(
'Command line parsing failure: make_examples does not accept '
'positional arguments but some are present on the command line: '
'"[\'make_examples.py\', \'extra_arg\']".')
mock_exit.assert_called_once_with(errno.ENOENT)
@flagsaver.flagsaver
def test_catches_bad_flags(self):
# Set all of the requested flag values.
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.candidates = test_utils.test_tmpfile('vsc.tfrecord')
FLAGS.examples = test_utils.test_tmpfile('examples.tfrecord')
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.partition_size = 1000
FLAGS.mode = 'training'
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
# This is the bad flag.
FLAGS.confident_regions = ''
with mock.patch.object(logging, 'error') as mock_logging,\
mock.patch.object(sys, 'exit') as mock_exit:
make_examples.main(['make_examples.py'])
mock_logging.assert_called_once_with(
'confident_regions is required when in training mode.')
mock_exit.assert_called_once_with(errno.ENOENT)
@parameterized.parameters(
dict(
ref_names=['1', '2', '3'],
sam_names=['1', '2', '3'],
vcf_names=None,
names_to_exclude=[],
min_coverage_fraction=1.0,
expected_names=['1', '2', '3']),
dict(
ref_names=['1', '2', '3'],
sam_names=['1', '2'],
vcf_names=None,
names_to_exclude=[],
min_coverage_fraction=0.66,
expected_names=['1', '2']),
dict(
ref_names=['1', '2', '3'],
sam_names=['1', '2'],
vcf_names=['1', '3'],
names_to_exclude=[],
min_coverage_fraction=0.33,
expected_names=['1']),
dict(
ref_names=['1', '2', '3', '4', '5'],
sam_names=['1', '2', '3'],
vcf_names=None,
names_to_exclude=['4', '5'],
min_coverage_fraction=1.0,
expected_names=['1', '2', '3']),
)
def test_ensure_consistent_contigs(self, ref_names, sam_names, vcf_names,
names_to_exclude, min_coverage_fraction,
expected_names):
ref_contigs = _make_contigs([(name, 100) for name in ref_names])
sam_contigs = _make_contigs([(name, 100) for name in sam_names])
if vcf_names is not None:
vcf_contigs = _make_contigs([(name, 100) for name in vcf_names])
else:
vcf_contigs = None
actual = make_examples._ensure_consistent_contigs(ref_contigs, sam_contigs,
vcf_contigs,
names_to_exclude,
min_coverage_fraction)
self.assertEqual([a.name for a in actual], expected_names)
@parameterized.parameters(
dict(
ref_names=['1', '2', '3'],
sam_names=['1', '2'],
vcf_names=None,
names_to_exclude=[],
min_coverage_fraction=0.67),
dict(
ref_names=['1', '2', '3'],
sam_names=['1', '2'],
vcf_names=['1', '3'],
names_to_exclude=[],
min_coverage_fraction=0.34),
)
def test_ensure_inconsistent_contigs(self, ref_names, sam_names, vcf_names,
names_to_exclude, min_coverage_fraction):
ref_contigs = _make_contigs([(name, 100) for name in ref_names])
sam_contigs = _make_contigs([(name, 100) for name in sam_names])
if vcf_names is not None:
vcf_contigs = _make_contigs([(name, 100) for name in vcf_names])
else:
vcf_contigs = None
with six.assertRaisesRegex(self, ValueError, 'Reference contigs span'):
make_examples._ensure_consistent_contigs(ref_contigs, sam_contigs,
vcf_contigs, names_to_exclude,
min_coverage_fraction)
@flagsaver.flagsaver
def test_regions_and_exclude_regions_flags(self):
FLAGS.mode = 'calling'
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.regions = 'chr20:10,000,000-11,000,000'
FLAGS.examples = 'examples.tfrecord'
FLAGS.exclude_regions = 'chr20:10,010,000-10,100,000'
options = make_examples.default_options(add_flags=True)
six.assertCountEqual(
self,
list(
ranges.RangeSet(
make_examples.processing_regions_from_options(options))),
_from_literals_list(
['chr20:10,000,000-10,009,999', 'chr20:10,100,001-11,000,000']))
@flagsaver.flagsaver
def test_incorrect_empty_regions(self):
FLAGS.mode = 'calling'
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
# Deliberately incorrect contig name.
FLAGS.regions = '20:10,000,000-11,000,000'
FLAGS.examples = 'examples.tfrecord'
options = make_examples.default_options(add_flags=True)
with six.assertRaisesRegex(self, ValueError,
'The regions to call is empty.'):
make_examples.processing_regions_from_options(options)
@parameterized.parameters(
# A SNP.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['T']),
reference_haplotype='GCACCT',
reference_offset=60165,
expected_return=[{
'haplotype':
'GCATCT',
'alt':
'T',
'variant':
variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['T'])
}]),
# A deletion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT']),
reference_haplotype='TTTCCATTCCAGTCCAT',
reference_offset=60279,
expected_return=[{
'haplotype':
'TTTCCATTCCAT',
'alt':
'AT',
'variant':
variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT'])
}]),
# An insertion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['TTTCCATTCCA']),
reference_haplotype='TTTCCATTCCAGTCCAT',
reference_offset=60279,
expected_return=[{
'haplotype':
'TTTCCATTCCATTCCAGTCCAT',
'alt':
'TTTCCATTCCA',
'variant':
variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['TTTCCATTCCA'])
}]),
# A deletion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT']),
reference_haplotype='TTTCCATTCCAG',
reference_offset=60279,
expected_return=[{
'haplotype':
'TTTCCAT',
'alt':
'AT',
'variant':
variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT'])
}]),
# An insertion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['TTTCCATTCCA']),
reference_haplotype='TTTCCATTCCAG',
reference_offset=60279,
expected_return=[{
'haplotype':
'TTTCCATTCCATTCCAG',
'alt':
'TTTCCATTCCA',
'variant':
variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['TTTCCATTCCA'])
}]))
def test_update_haplotype(self, variant, reference_haplotype,
reference_offset, expected_return):
list_hap_obj = make_examples.update_haplotype(variant, reference_haplotype,
reference_offset)
self.assertListEqual(list_hap_obj, expected_return)
@parameterized.parameters(
dict(
dv_variant=variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT']),
cohort_variants=[
variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['T', 'TTTCCATTCCA']),
variants_pb2.Variant(
reference_name='chr20',
start=60285,
end=60291,
reference_bases='TTTCCA',
alternate_bases=['T']),
],
expected_ref_haplotype='TTTCCATTCCAG',
expected_ref_offset=60279))
def test_get_ref_haplotype_and_offset(self, dv_variant, cohort_variants,
expected_ref_haplotype,
expected_ref_offset):
ref_reader = fasta.IndexedFastaReader(testdata.CHR20_GRCH38_FASTA)
ref_haplotype, ref_offset = make_examples.get_ref_haplotype_and_offset(
dv_variant, cohort_variants, ref_reader)
self.assertEqual(ref_haplotype, expected_ref_haplotype)
self.assertEqual(ref_offset, expected_ref_offset)
# pylint: disable=unused-argument
@parameterized.parameters(
# A matched SNP.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['T']),
expected_return=dict(C=0.9998, T=0.0002),
label='matched_snp_1'),
# A matched deletion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60285,
end=60291,
reference_bases='TTCCAG',
alternate_bases=['T']),
expected_return=dict(T=0.001198, TTCCAG=0.998802),
label='matched_del_1'),
# A unmatched deletion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['A']),
expected_return=dict(A=0, ATTCCAG=1),
label='unmatched_del_1'),
# A matched deletion, where the candidate is formatted differently.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT']),
expected_return=dict(AT=0.001198, ATTCCAG=0.998802),
label='matched_del_2: diff representation'),
# An unmatched SNP.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60150,
end=60151,
reference_bases='C',
alternate_bases=['T']),
expected_return=dict(C=1, T=0),
label='unmatched_snp_1'),
# A matched SNP and an unmatched SNP.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['T', 'A']),
expected_return=dict(C=0.9998, T=0.0002, A=0),
label='mixed_snp_1'),
# An unmatched SNP, where the REF allele frequency is not 1.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['A']),
expected_return=dict(C=0.9998, A=0),
label='unmatched_snp_2: non-1 ref allele'),
# A multi-allelic candidate at a multi-allelic locus.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['T', 'TTTCCATTCCA']),
expected_return=dict(TTTCCA=0.999401, T=0.000399, TTTCCATTCCA=0.0002),
label='matched_mult_1'),
# A multi-allelic candidate at a multi-allelic locus.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['T', 'TATCCATTCCA']),
expected_return=dict(TTTCCA=0.999401, T=0.000399, TATCCATTCCA=0),
label='unmatched_mult_1'),
# [Different representation]
# A deletion where the cohort variant is represented differently.
# In this case, REF frequency is calculated by going over all cohort ALTs.
# Thus, the sum of all dict values is not equal to 1.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60295,
end=60301,
reference_bases='TTCCAT',
alternate_bases=['T']),
expected_return=dict(T=0.000399, TTCCAT=0.923922),
label='matched_del_3: diff representation'),
# [Non-candidate allele]
# One allele of a multi-allelic cohort variant is not in candidate.
# The non-candidate allele should be ignored.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['T']),
expected_return=dict(TTTCCA=0.999401, T=0.000399),
label='matched_del_4: multi-allelic cohort'),
# A left-align example.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=9074790,
end=9074794,
reference_bases='CT',
alternate_bases=['C', 'CTTT']),
expected_return=dict(C=0.167732, CTTT=0.215256, CT=0.442092),
label='matched_mult_2: left align'),
# A left-align example.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=9074790,
end=9074794,
reference_bases='C',
alternate_bases=['CTTT']),
expected_return=dict(CTTT=0.145367, C=0.442092),
label='matched_ins_1: left align'),
# A left-align example.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=9074790,
end=9074793,
reference_bases='CTT',
alternate_bases=['CTTA']),
expected_return=dict(CTTA=0, CTT=0.442092),
label='unmatched_ins_1: left align'),
# A matched mnps.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=61065,
end=61066,
reference_bases='T',
alternate_bases=['C']),
expected_return=dict(C=0.079872, T=0.919729),
label='matched_mnps_1'),
# A matched SNP.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=62022,
end=62023,
reference_bases='G',
alternate_bases=['C', 'T']),
expected_return=dict(G=0.996206, C=0.003594, T=0),
label='matched_snp_2'))
def test_find_matching_allele_frequency(self, variant, expected_return,
label):
ref_reader = fasta.IndexedFastaReader(testdata.CHR20_GRCH38_FASTA)
vcf_reader = vcf.VcfReader(testdata.VCF_WITH_ALLELE_FREQUENCIES)
allele_frequencies = make_examples.find_matching_allele_frequency(
variant, vcf_reader, ref_reader)
# Compare keys.
self.assertSetEqual(
set(allele_frequencies.keys()), set(expected_return.keys()))
# Compare values (almost equal).
for key in allele_frequencies.keys():
self.assertAlmostEqual(allele_frequencies[key], expected_return[key])
# pylint: enable=unused-argument
class RegionProcessorTest(parameterized.TestCase):
def setUp(self):
super(RegionProcessorTest, self).setUp()
self.region = ranges.parse_literal('chr20:10,000,000-10,000,100')
FLAGS.reads = ''
self.options = make_examples.default_options(add_flags=False)
self.options.reference_filename = testdata.CHR20_FASTA
if not self.options.reads_filenames:
self.options.reads_filenames.extend(testdata.CHR20_BAM)
self.options.truth_variants_filename = testdata.TRUTH_VARIANTS_VCF
self.options.mode = deepvariant_pb2.DeepVariantOptions.TRAINING
self.options.variant_caller_options.sample_name = 'sample_id'
self.processor = make_examples.RegionProcessor(self.options)
self.ref_reader = fasta.IndexedFastaReader(self.options.reference_filename)
self.mock_init = self.add_mock('initialize')
self.default_shape = [5, 5, 7]
self.default_format = 'raw'
def add_mock(self, name, retval='dontadd', side_effect='dontadd'):
patcher = mock.patch.object(self.processor, name, autospec=True)
self.addCleanup(patcher.stop)
mocked = patcher.start()
if retval != 'dontadd':
mocked.return_value = retval
if side_effect != 'dontadd':
mocked.side_effect = side_effect
return mocked
def test_on_demand_initialization_called_if_not_initialized(self):
candidates = ['Candidates']
self.assertFalse(self.processor.initialized)
self.processor.in_memory_sam_reader = mock.Mock()
mock_rr = self.add_mock('region_reads', retval=[])
mock_cir = self.add_mock('candidates_in_region', retval=(candidates, []))
mock_lc = self.add_mock('label_candidates', retval=[])
self.processor.process(self.region)
test_utils.assert_called_once_workaround(self.mock_init)
mock_rr.assert_called_once_with(self.region)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[])
mock_cir.assert_called_once_with(self.region)
mock_lc.assert_called_once_with(candidates, self.region)
def test_on_demand_initialization_not_called_if_initialized(self):
self.processor.initialized = True
self.assertTrue(self.processor.initialized)
self.processor.in_memory_sam_reader = mock.Mock()
mock_rr = self.add_mock('region_reads', retval=[])
mock_cir = self.add_mock('candidates_in_region', retval=([], []))
mock_lc = self.add_mock('label_candidates', retval=[])
self.processor.process(self.region)
test_utils.assert_not_called_workaround(self.mock_init)
mock_rr.assert_called_once_with(self.region)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[])
mock_cir.assert_called_once_with(self.region)
test_utils.assert_called_once_workaround(mock_lc)
def test_process_calls_no_candidates(self):
self.processor.in_memory_sam_reader = mock.Mock()
mock_rr = self.add_mock('region_reads', retval=[])
mock_cir = self.add_mock('candidates_in_region', retval=([], []))
mock_cpe = self.add_mock('create_pileup_examples', retval=[])
mock_lc = self.add_mock('label_candidates')
candidates, examples, gvcfs, runtimes = self.processor.process(self.region)
self.assertEmpty(candidates)
self.assertEmpty(examples)
self.assertEmpty(gvcfs)
self.assertIsInstance(runtimes, dict)
mock_rr.assert_called_once_with(self.region)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[])
mock_cir.assert_called_once_with(self.region)
test_utils.assert_not_called_workaround(mock_cpe)
mock_lc.assert_called_once_with([], self.region)
@parameterized.parameters([
deepvariant_pb2.DeepVariantOptions.TRAINING,
deepvariant_pb2.DeepVariantOptions.CALLING
])
def test_process_calls_with_candidates(self, mode):
self.processor.options.mode = mode
self.processor.in_memory_sam_reader = mock.Mock()
mock_read = mock.MagicMock()
mock_candidate = mock.MagicMock()
mock_example = mock.MagicMock()
mock_label = mock.MagicMock()
mock_rr = self.add_mock('region_reads', retval=[mock_read])
mock_cir = self.add_mock(
'candidates_in_region', retval=([mock_candidate], []))
mock_cpe = self.add_mock('create_pileup_examples', retval=[mock_example])
mock_lc = self.add_mock(
'label_candidates', retval=[(mock_candidate, mock_label)])
mock_alte = self.add_mock('add_label_to_example', retval=mock_example)
candidates, examples, gvcfs, runtimes = self.processor.process(self.region)
self.assertEqual(candidates, [mock_candidate])
self.assertEqual(examples, [mock_example])
self.assertEmpty(gvcfs)
self.assertIsInstance(runtimes, dict)
mock_rr.assert_called_once_with(self.region)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[mock_read])
mock_cir.assert_called_once_with(self.region)
mock_cpe.assert_called_once_with(mock_candidate)
if mode == deepvariant_pb2.DeepVariantOptions.TRAINING:
mock_lc.assert_called_once_with([mock_candidate], self.region)
mock_alte.assert_called_once_with(mock_example, mock_label)
else:
# In training mode we don't label our candidates.
test_utils.assert_not_called_workaround(mock_lc)
test_utils.assert_not_called_workaround(mock_alte)
@parameterized.parameters([
deepvariant_pb2.DeepVariantOptions.TRAINING,
deepvariant_pb2.DeepVariantOptions.CALLING
])
def test_process_keeps_ordering_of_candidates_and_examples(self, mode):
self.processor.options.mode = mode
r1, r2 = mock.Mock(), mock.Mock()
c1, c2 = mock.Mock(), mock.Mock()
l1, l2 = mock.Mock(), mock.Mock()
e1, e2, e3 = mock.Mock(), mock.Mock(), mock.Mock()
self.processor.in_memory_sam_reader = mock.Mock()
self.add_mock('region_reads', retval=[r1, r2])
self.add_mock('candidates_in_region', retval=([c1, c2], []))
mock_cpe = self.add_mock(
'create_pileup_examples', side_effect=[[e1], [e2, e3]])
mock_lc = self.add_mock('label_candidates', retval=[(c1, l1), (c2, l2)])
mock_alte = self.add_mock('add_label_to_example', side_effect=[e1, e2, e3])
candidates, examples, gvcfs, runtimes = self.processor.process(self.region)
self.assertEqual(candidates, [c1, c2])
self.assertEqual(examples, [e1, e2, e3])
self.assertEmpty(gvcfs)
self.assertIsInstance(runtimes, dict)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[r1, r2])
# We don't try to label variants when in calling mode.
self.assertEqual([mock.call(c1), mock.call(c2)], mock_cpe.call_args_list)
if mode == deepvariant_pb2.DeepVariantOptions.CALLING:
# In calling mode, we never try to label.
test_utils.assert_not_called_workaround(mock_lc)
test_utils.assert_not_called_workaround(mock_alte)
else:
mock_lc.assert_called_once_with([c1, c2], self.region)
self.assertEqual([
mock.call(e1, l1),
mock.call(e2, l2),
mock.call(e3, l2),
], mock_alte.call_args_list)
def test_process_with_realigner(self):
self.processor.options.mode = deepvariant_pb2.DeepVariantOptions.CALLING
self.processor.options.realigner_enabled = True
self.processor.options.realigner_options.CopyFrom(
realigner_pb2.RealignerOptions())
self.processor.realigner = mock.Mock()
self.processor.realigner.realign_reads.return_value = [], []
self.processor.sam_readers = [mock.Mock()]
self.processor.sam_readers[0].query.return_value = []
self.processor.in_memory_sam_reader = mock.Mock()
c1, c2 = mock.Mock(), mock.Mock()
e1, e2, e3 = mock.Mock(), mock.Mock(), mock.Mock()
self.add_mock('candidates_in_region', retval=([c1, c2], []))
mock_cpe = self.add_mock(
'create_pileup_examples', side_effect=[[e1], [e2, e3]])
mock_lc = self.add_mock('label_candidates')
candidates, examples, gvcfs, runtimes = self.processor.process(self.region)
self.assertEqual(candidates, [c1, c2])
self.assertEqual(examples, [e1, e2, e3])
self.assertEmpty(gvcfs)
self.assertIsInstance(runtimes, dict)
self.processor.sam_readers[0].query.assert_called_once_with(self.region)
self.processor.realigner.realign_reads.assert_called_once_with([],
self.region)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[])
self.assertEqual([mock.call(c1), mock.call(c2)], mock_cpe.call_args_list)
test_utils.assert_not_called_workaround(mock_lc)
def test_candidates_in_region_no_reads(self):
self.processor.in_memory_sam_reader = mock.Mock()
self.processor.in_memory_sam_reader.query.return_value = []
mock_ac = self.add_mock('_make_allele_counter_for_region')
self.assertEqual(([], []), self.processor.candidates_in_region(self.region))
self.processor.in_memory_sam_reader.query.assert_called_once_with(
self.region)
# A region with no reads should return out without making an AlleleCounter.
test_utils.assert_not_called_workaround(mock_ac)
@parameterized.parameters(True, False)
def test_candidates_in_region(self, include_gvcfs):
self.options.gvcf_filename = 'foo.vcf' if include_gvcfs else ''
self.processor.in_memory_sam_reader = mock.Mock()
reads = ['read1', 'read2']
self.processor.in_memory_sam_reader.query.return_value = reads
# Setup our make_allele_counter and other mocks.
mock_ac = mock.Mock()
mock_make_ac = self.add_mock(
'_make_allele_counter_for_region', retval=mock_ac)
# Setup our make_variant_caller and downstream mocks.
mock_vc = mock.Mock()
expected_calls = (['variant'], ['gvcf'] if include_gvcfs else [])
mock_vc.calls_and_gvcfs.return_value = expected_calls
self.processor.variant_caller = mock_vc
actual = self.processor.candidates_in_region(self.region)
# Make sure we're getting our reads for the region.
self.processor.in_memory_sam_reader.query.assert_called_once_with(
self.region)
# Make sure we're creating an AlleleCounter once and adding each of our
# reads to it.
mock_make_ac.assert_called_once_with(self.region)
self.assertEqual([mock.call(r, 'sample_id') for r in reads],
mock_ac.add.call_args_list)
# Make sure we call CallVariant for each of the counts returned by the
# allele counter.
mock_vc.calls_and_gvcfs.assert_called_once_with(mock_ac, include_gvcfs)
# Finally, our actual result should be the single 'variant' and potentially
# the gvcf records.
self.assertEqual(expected_calls, actual)
def test_create_pileup_examples_handles_none(self):
self.processor.pic = mock.Mock()
dv_call = mock.Mock()
self.processor.pic.create_pileup_images.return_value = None
self.assertEqual([], self.processor.create_pileup_examples(dv_call))
self.processor.pic.create_pileup_images.assert_called_once_with(
dv_call=dv_call,
reads_for_samples=[],
haplotype_alignments_for_samples=None,
haplotype_sequences=None)
def test_create_pileup_examples(self):
self.processor.pic = mock.Mock()
self.add_mock(
'_encode_tensor',
side_effect=[
(six.b('tensor1'), self.default_shape, self.default_format),
(six.b('tensor2'), self.default_shape, self.default_format)
])
dv_call = mock.Mock()
dv_call.variant = test_utils.make_variant(start=10, alleles=['A', 'C', 'G'])
ex = mock.Mock()
alt1, alt2 = ['C'], ['G']
self.processor.pic.create_pileup_images.return_value = [
(alt1, six.b('tensor1')), (alt2, six.b('tensor2'))
]
actual = self.processor.create_pileup_examples(dv_call)
self.processor.pic.create_pileup_images.assert_called_once_with(
dv_call=dv_call,
reads_for_samples=[],
haplotype_alignments_for_samples=None,
haplotype_sequences=None)
self.assertLen(actual, 2)
for ex, (alt, img) in zip(actual, [(alt1, six.b('tensor1')),
(alt2, six.b('tensor2'))]):
self.assertEqual(tf_utils.example_alt_alleles(ex), alt)
self.assertEqual(tf_utils.example_variant(ex), dv_call.variant)
self.assertEqual(tf_utils.example_encoded_image(ex), img)
self.assertEqual(tf_utils.example_image_shape(ex), self.default_shape)
self.assertEqual(
tf_utils.example_image_format(ex), six.b(self.default_format))
@parameterized.parameters(
# Test that a het variant gets a label value of 1 assigned to the example.
dict(
label=variant_labeler.VariantLabel(
is_confident=True,
variant=test_utils.make_variant(start=10, alleles=['A', 'C']),
genotype=(0, 1)),
expected_label_value=1,
),
# Test that a reference variant gets a label value of 0 in the example.
dict(
label=variant_labeler.VariantLabel(
is_confident=True,
variant=test_utils.make_variant(start=10, alleles=['A', '.']),
genotype=(0, 0)),
expected_label_value=0,
),
)
def test_add_label_to_example(self, label, expected_label_value):
example = self._example_for_variant(label.variant)
labeled = copy.deepcopy(example)
actual = self.processor.add_label_to_example(labeled, label)
# The add_label_to_example command modifies labeled and returns it.
self.assertIs(actual, labeled)
# Check that all keys from example are present in labeled.
for key, value in example.features.feature.items():
if key != 'variant/encoded': # Special case tested below.
self.assertEqual(value, labeled.features.feature[key])
# The genotype of our example_variant should be set to the true genotype
# according to our label.
self.assertEqual(expected_label_value, tf_utils.example_label(labeled))
labeled_variant = tf_utils.example_variant(labeled)
call = variant_utils.only_call(labeled_variant)
self.assertEqual(tuple(call.genotype), label.genotype)
# The original variant and labeled_variant from out tf.Example should be
# equal except for the genotype field, since this is set by
# add_label_to_example.
label.variant.calls[0].genotype[:] = []
call.genotype[:] = []
self.assertEqual(label.variant, labeled_variant)
def test_label_variant_raises_for_non_confident_variant(self):
label = variant_labeler.VariantLabel(
is_confident=False,
variant=test_utils.make_variant(start=10, alleles=['A', 'C']),
genotype=(0, 1))
example = self._example_for_variant(label.variant)
with six.assertRaisesRegex(
self, ValueError, 'Cannot add a non-confident label to an example'):
self.processor.add_label_to_example(example, label)
def _example_for_variant(self, variant):
return tf_utils.make_example(variant, list(variant.alternate_bases),
six.b('foo'), self.default_shape,
self.default_format)
@parameterized.parameters('sort_by_haplotypes', 'use_original_quality_scores')
def test_flags_strictly_needs_sam_aux_fields(
self, flags_strictly_needs_sam_aux_fields):
FLAGS.mode = 'calling'
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.examples = 'examples.tfrecord'
FLAGS[flags_strictly_needs_sam_aux_fields].value = True
FLAGS.parse_sam_aux_fields = False
with six.assertRaisesRegex(
self, Exception,
'If --{} is set then parse_sam_aux_fields must be set too.'.format(
flags_strictly_needs_sam_aux_fields)):
make_examples.default_options(add_flags=True)
@parameterized.parameters(
('add_hp_channel', True, None),
('add_hp_channel', False,
'WARGNING! --{} is set but --parse_sam_aux_fields is not set.'),
('add_hp_channel', None,
'Because --{}=true, --parse_sam_aux_fields is set to true to enable '
'reading auxiliary fields from reads.'),
)
def test_flag_optionally_needs_sam_aux_fields_with_different_parse_sam_aux_fields(
self, flag_optionally_needs_sam_aux_fields, parse_sam_aux_fields,
expected_message):
FLAGS.mode = 'calling'
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.examples = 'examples.tfrecord'
FLAGS[flag_optionally_needs_sam_aux_fields].value = True
FLAGS.parse_sam_aux_fields = parse_sam_aux_fields
with self.assertLogs() as logs:
make_examples.default_options(add_flags=True)
warning_messages = [x for x in logs.output if x.startswith('WARNING')]
if expected_message:
self.assertLen(warning_messages, 1)
self.assertRegex(
warning_messages[0],
expected_message.format(flag_optionally_needs_sam_aux_fields))
else:
self.assertEmpty(warning_messages)
@parameterized.parameters(
[
dict(window_width=221),
dict(window_width=1001),
],)
def test_align_to_all_haplotypes(self, window_width):
# align_to_all_haplotypes() will pull from the reference, so choose a
# real variant.
region = ranges.parse_literal('chr20:10,046,000-10,046,400')
nist_reader = vcf.VcfReader(testdata.TRUTH_VARIANTS_VCF)
nist_variants = list(nist_reader.query(region))
# We picked this region to have exactly one known variant:
# reference_bases: "AAGAAAGAAAG"
# alternate_bases: "A", a deletion of 10 bp
# start: 10046177
# end: 10046188
# reference_name: "chr20"
variant = nist_variants[0]
self.processor.pic = mock.Mock()
self.processor.pic.width = window_width
self.processor.pic.half_width = int((self.processor.pic.width - 1) / 2)
self.processor.realigner = mock.Mock()
# Using a real ref_reader to test that the reference allele matches
# between the variant and the reference at the variant's coordinates.
self.processor.realigner.ref_reader = self.ref_reader
read = test_utils.make_read(
'A' * 101, start=10046100, cigar='101M', quals=[30] * 101)
self.processor.realigner.align_to_haplotype = mock.Mock()
alt_info = self.processor.align_to_all_haplotypes(variant, [read])
hap_alignments = alt_info['alt_alignments']
hap_sequences = alt_info['alt_sequences']
# Both outputs are keyed by alt allele.
self.assertCountEqual(hap_alignments.keys(), ['A'])
self.assertCountEqual(hap_sequences.keys(), ['A'])
# Sequence must be the length of the window.
self.assertLen(hap_sequences['A'], self.processor.pic.width)
# align_to_haplotype should be called once for each alt (1 alt here).
self.processor.realigner.align_to_haplotype.assert_called_once()
# If variant reference_bases are wrong, it should raise a ValueError.
variant.reference_bases = 'G'
with six.assertRaisesRegex(self, ValueError,
'does not match the bases in the reference'):
self.processor.align_to_all_haplotypes(variant, [read])
@parameterized.parameters(
dict(
dv_calls=iter([
deepvariant_pb2.DeepVariantCall(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['T']),
allele_support=None)
]),
expected_return=dict(C=0.9998, T=0.0002)))
def test_add_allele_frequencies_to_candidates(self, dv_calls,
expected_return):
pop_vcf_reader = vcf.VcfReader(testdata.VCF_WITH_ALLELE_FREQUENCIES)
self.processor.ref_reader = fasta.IndexedFastaReader(
testdata.CHR20_GRCH38_FASTA)
updated_dv_call = list(
self.processor.add_allele_frequencies_to_candidates(
dv_calls, pop_vcf_reader))
actual_frequency = updated_dv_call[0].allele_frequency
# Compare keys.
self.assertSetEqual(
set(actual_frequency.keys()), set(expected_return.keys()))
# Compare values (almost equal).
for key in actual_frequency.keys():
self.assertAlmostEqual(actual_frequency[key], expected_return[key])
@parameterized.parameters(
dict(
dv_calls=iter([
deepvariant_pb2.DeepVariantCall(
variant=variants_pb2.Variant(
reference_name='chrM',
start=10000,
end=10001,
reference_bases='T',
alternate_bases=['G']),
allele_support=None)
]),
expected_return=dict(T=1, G=0)))
def test_add_allele_frequencies_to_candidates_invalid_vcf(
self, dv_calls, expected_return):
pop_vcf_reader = None
self.processor.ref_reader = None
updated_dv_call = list(
self.processor.add_allele_frequencies_to_candidates(
dv_calls, pop_vcf_reader))
actual_frequency = updated_dv_call[0].allele_frequency
# Compare keys.
self.assertSetEqual(
set(actual_frequency.keys()), set(expected_return.keys()))
# Compare values (almost equal).
for key in actual_frequency.keys():
self.assertAlmostEqual(actual_frequency[key], expected_return[key])
if __name__ == '__main__':
absltest.main()
| bsd-3-clause | 5,773,336,491,472,342,000 | 40.289968 | 84 | 0.63676 | false |
mancoast/CPythonPyc_test | cpython/223_test_descr.py | 1 | 90901 | # Test enhancements related to descriptors and new-style classes
from test_support import verify, vereq, verbose, TestFailed, TESTFN, get_original_stdout
from copy import deepcopy
import warnings
warnings.filterwarnings("ignore",
r'complex divmod\(\), // and % are deprecated$',
DeprecationWarning, r'(<string>|test_descr)$')
def veris(a, b):
if a is not b:
raise TestFailed, "%r is %r" % (a, b)
def testunop(a, res, expr="len(a)", meth="__len__"):
if verbose: print "checking", expr
dict = {'a': a}
vereq(eval(expr, dict), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
vereq(m(a), res)
bm = getattr(a, meth)
vereq(bm(), res)
def testbinop(a, b, res, expr="a+b", meth="__add__"):
if verbose: print "checking", expr
dict = {'a': a, 'b': b}
# XXX Hack so this passes before 2.3 when -Qnew is specified.
if meth == "__div__" and 1/2 == 0.5:
meth = "__truediv__"
vereq(eval(expr, dict), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
vereq(m(a, b), res)
bm = getattr(a, meth)
vereq(bm(b), res)
def testternop(a, b, c, res, expr="a[b:c]", meth="__getslice__"):
if verbose: print "checking", expr
dict = {'a': a, 'b': b, 'c': c}
vereq(eval(expr, dict), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
vereq(m(a, b, c), res)
bm = getattr(a, meth)
vereq(bm(b, c), res)
def testsetop(a, b, res, stmt="a+=b", meth="__iadd__"):
if verbose: print "checking", stmt
dict = {'a': deepcopy(a), 'b': b}
exec stmt in dict
vereq(dict['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
dict['a'] = deepcopy(a)
m(dict['a'], b)
vereq(dict['a'], res)
dict['a'] = deepcopy(a)
bm = getattr(dict['a'], meth)
bm(b)
vereq(dict['a'], res)
def testset2op(a, b, c, res, stmt="a[b]=c", meth="__setitem__"):
if verbose: print "checking", stmt
dict = {'a': deepcopy(a), 'b': b, 'c': c}
exec stmt in dict
vereq(dict['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
dict['a'] = deepcopy(a)
m(dict['a'], b, c)
vereq(dict['a'], res)
dict['a'] = deepcopy(a)
bm = getattr(dict['a'], meth)
bm(b, c)
vereq(dict['a'], res)
def testset3op(a, b, c, d, res, stmt="a[b:c]=d", meth="__setslice__"):
if verbose: print "checking", stmt
dict = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d}
exec stmt in dict
vereq(dict['a'], res)
t = type(a)
while meth not in t.__dict__:
t = t.__bases__[0]
m = getattr(t, meth)
vereq(m, t.__dict__[meth])
dict['a'] = deepcopy(a)
m(dict['a'], b, c, d)
vereq(dict['a'], res)
dict['a'] = deepcopy(a)
bm = getattr(dict['a'], meth)
bm(b, c, d)
vereq(dict['a'], res)
def class_docstrings():
class Classic:
"A classic docstring."
vereq(Classic.__doc__, "A classic docstring.")
vereq(Classic.__dict__['__doc__'], "A classic docstring.")
class Classic2:
pass
verify(Classic2.__doc__ is None)
class NewStatic(object):
"Another docstring."
vereq(NewStatic.__doc__, "Another docstring.")
vereq(NewStatic.__dict__['__doc__'], "Another docstring.")
class NewStatic2(object):
pass
verify(NewStatic2.__doc__ is None)
class NewDynamic(object):
"Another docstring."
vereq(NewDynamic.__doc__, "Another docstring.")
vereq(NewDynamic.__dict__['__doc__'], "Another docstring.")
class NewDynamic2(object):
pass
verify(NewDynamic2.__doc__ is None)
def lists():
if verbose: print "Testing list operations..."
testbinop([1], [2], [1,2], "a+b", "__add__")
testbinop([1,2,3], 2, 1, "b in a", "__contains__")
testbinop([1,2,3], 4, 0, "b in a", "__contains__")
testbinop([1,2,3], 1, 2, "a[b]", "__getitem__")
testternop([1,2,3], 0, 2, [1,2], "a[b:c]", "__getslice__")
testsetop([1], [2], [1,2], "a+=b", "__iadd__")
testsetop([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__")
testunop([1,2,3], 3, "len(a)", "__len__")
testbinop([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__")
testbinop([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__")
testset2op([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__")
testset3op([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d", "__setslice__")
def dicts():
if verbose: print "Testing dict operations..."
testbinop({1:2}, {2:1}, -1, "cmp(a,b)", "__cmp__")
testbinop({1:2,3:4}, 1, 1, "b in a", "__contains__")
testbinop({1:2,3:4}, 2, 0, "b in a", "__contains__")
testbinop({1:2,3:4}, 1, 2, "a[b]", "__getitem__")
d = {1:2,3:4}
l1 = []
for i in d.keys(): l1.append(i)
l = []
for i in iter(d): l.append(i)
vereq(l, l1)
l = []
for i in d.__iter__(): l.append(i)
vereq(l, l1)
l = []
for i in dict.__iter__(d): l.append(i)
vereq(l, l1)
d = {1:2, 3:4}
testunop(d, 2, "len(a)", "__len__")
vereq(eval(repr(d), {}), d)
vereq(eval(d.__repr__(), {}), d)
testset2op({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c", "__setitem__")
def dict_constructor():
if verbose:
print "Testing dict constructor ..."
d = dict()
vereq(d, {})
d = dict({})
vereq(d, {})
d = dict(items={})
vereq(d, {})
d = dict({1: 2, 'a': 'b'})
vereq(d, {1: 2, 'a': 'b'})
vereq(d, dict(d.items()))
vereq(d, dict(items=d.iteritems()))
for badarg in 0, 0L, 0j, "0", [0], (0,):
try:
dict(badarg)
except TypeError:
pass
except ValueError:
if badarg == "0":
# It's a sequence, and its elements are also sequences (gotta
# love strings <wink>), but they aren't of length 2, so this
# one seemed better as a ValueError than a TypeError.
pass
else:
raise TestFailed("no TypeError from dict(%r)" % badarg)
else:
raise TestFailed("no TypeError from dict(%r)" % badarg)
try:
dict(senseless={})
except TypeError:
pass
else:
raise TestFailed("no TypeError from dict(senseless={})")
try:
dict({}, {})
except TypeError:
pass
else:
raise TestFailed("no TypeError from dict({}, {})")
class Mapping:
# Lacks a .keys() method; will be added later.
dict = {1:2, 3:4, 'a':1j}
try:
dict(Mapping())
except TypeError:
pass
else:
raise TestFailed("no TypeError from dict(incomplete mapping)")
Mapping.keys = lambda self: self.dict.keys()
Mapping.__getitem__ = lambda self, i: self.dict[i]
d = dict(items=Mapping())
vereq(d, Mapping.dict)
# Init from sequence of iterable objects, each producing a 2-sequence.
class AddressBookEntry:
def __init__(self, first, last):
self.first = first
self.last = last
def __iter__(self):
return iter([self.first, self.last])
d = dict([AddressBookEntry('Tim', 'Warsaw'),
AddressBookEntry('Barry', 'Peters'),
AddressBookEntry('Tim', 'Peters'),
AddressBookEntry('Barry', 'Warsaw')])
vereq(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
d = dict(zip(range(4), range(1, 5)))
vereq(d, dict([(i, i+1) for i in range(4)]))
# Bad sequence lengths.
for bad in [('tooshort',)], [('too', 'long', 'by 1')]:
try:
dict(bad)
except ValueError:
pass
else:
raise TestFailed("no ValueError from dict(%r)" % bad)
def test_dir():
if verbose:
print "Testing dir() ..."
junk = 12
vereq(dir(), ['junk'])
del junk
# Just make sure these don't blow up!
for arg in 2, 2L, 2j, 2e0, [2], "2", u"2", (2,), {2:2}, type, test_dir:
dir(arg)
# Try classic classes.
class C:
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod', '__doc__', '__module__']
vereq(dir(C), cstuff)
verify('im_self' in dir(C.Cmethod))
c = C() # c.__doc__ is an odd thing to see here; ditto c.__module__.
vereq(dir(c), cstuff)
c.cdata = 2
c.cmethod = lambda self: 0
vereq(dir(c), cstuff + ['cdata', 'cmethod'])
verify('im_self' in dir(c.Cmethod))
class A(C):
Adata = 1
def Amethod(self): pass
astuff = ['Adata', 'Amethod'] + cstuff
vereq(dir(A), astuff)
verify('im_self' in dir(A.Amethod))
a = A()
vereq(dir(a), astuff)
verify('im_self' in dir(a.Amethod))
a.adata = 42
a.amethod = lambda self: 3
vereq(dir(a), astuff + ['adata', 'amethod'])
# The same, but with new-style classes. Since these have object as a
# base class, a lot more gets sucked in.
def interesting(strings):
return [s for s in strings if not s.startswith('_')]
class C(object):
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod']
vereq(interesting(dir(C)), cstuff)
c = C()
vereq(interesting(dir(c)), cstuff)
verify('im_self' in dir(C.Cmethod))
c.cdata = 2
c.cmethod = lambda self: 0
vereq(interesting(dir(c)), cstuff + ['cdata', 'cmethod'])
verify('im_self' in dir(c.Cmethod))
class A(C):
Adata = 1
def Amethod(self): pass
astuff = ['Adata', 'Amethod'] + cstuff
vereq(interesting(dir(A)), astuff)
verify('im_self' in dir(A.Amethod))
a = A()
vereq(interesting(dir(a)), astuff)
a.adata = 42
a.amethod = lambda self: 3
vereq(interesting(dir(a)), astuff + ['adata', 'amethod'])
verify('im_self' in dir(a.Amethod))
# Try a module subclass.
import sys
class M(type(sys)):
pass
minstance = M()
minstance.b = 2
minstance.a = 1
vereq(dir(minstance), ['a', 'b'])
class M2(M):
def getdict(self):
return "Not a dict!"
__dict__ = property(getdict)
m2instance = M2()
m2instance.b = 2
m2instance.a = 1
vereq(m2instance.__dict__, "Not a dict!")
try:
dir(m2instance)
except TypeError:
pass
# Two essentially featureless objects, just inheriting stuff from
# object.
vereq(dir(None), dir(Ellipsis))
# Nasty test case for proxied objects
class Wrapper(object):
def __init__(self, obj):
self.__obj = obj
def __repr__(self):
return "Wrapper(%s)" % repr(self.__obj)
def __getitem__(self, key):
return Wrapper(self.__obj[key])
def __len__(self):
return len(self.__obj)
def __getattr__(self, name):
return Wrapper(getattr(self.__obj, name))
class C(object):
def __getclass(self):
return Wrapper(type(self))
__class__ = property(__getclass)
dir(C()) # This used to segfault
binops = {
'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'mod': '%',
'divmod': 'divmod',
'pow': '**',
'lshift': '<<',
'rshift': '>>',
'and': '&',
'xor': '^',
'or': '|',
'cmp': 'cmp',
'lt': '<',
'le': '<=',
'eq': '==',
'ne': '!=',
'gt': '>',
'ge': '>=',
}
for name, expr in binops.items():
if expr.islower():
expr = expr + "(a, b)"
else:
expr = 'a %s b' % expr
binops[name] = expr
unops = {
'pos': '+',
'neg': '-',
'abs': 'abs',
'invert': '~',
'int': 'int',
'long': 'long',
'float': 'float',
'oct': 'oct',
'hex': 'hex',
}
for name, expr in unops.items():
if expr.islower():
expr = expr + "(a)"
else:
expr = '%s a' % expr
unops[name] = expr
def numops(a, b, skip=[]):
dict = {'a': a, 'b': b}
for name, expr in binops.items():
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
testbinop(a, b, res, expr, name)
for name, expr in unops.items():
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
testunop(a, res, expr, name)
def ints():
if verbose: print "Testing int operations..."
numops(100, 3)
# The following crashes in Python 2.2
vereq((1).__nonzero__(), 1)
vereq((0).__nonzero__(), 0)
# This returns 'NotImplemented' in Python 2.2
class C(int):
def __add__(self, other):
return NotImplemented
try:
C() + ""
except TypeError:
pass
else:
raise TestFailed, "NotImplemented should have caused TypeError"
def longs():
if verbose: print "Testing long operations..."
numops(100L, 3L)
def floats():
if verbose: print "Testing float operations..."
numops(100.0, 3.0)
def complexes():
if verbose: print "Testing complex operations..."
numops(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge', 'int', 'long', 'float'])
class Number(complex):
__slots__ = ['prec']
def __new__(cls, *args, **kwds):
result = complex.__new__(cls, *args)
result.prec = kwds.get('prec', 12)
return result
def __repr__(self):
prec = self.prec
if self.imag == 0.0:
return "%.*g" % (prec, self.real)
if self.real == 0.0:
return "%.*gj" % (prec, self.imag)
return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag)
__str__ = __repr__
a = Number(3.14, prec=6)
vereq(`a`, "3.14")
vereq(a.prec, 6)
a = Number(a, prec=2)
vereq(`a`, "3.1")
vereq(a.prec, 2)
a = Number(234.5)
vereq(`a`, "234.5")
vereq(a.prec, 12)
def spamlists():
if verbose: print "Testing spamlist operations..."
import copy, xxsubtype as spam
def spamlist(l, memo=None):
import xxsubtype as spam
return spam.spamlist(l)
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamlist] = spamlist
testbinop(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+b", "__add__")
testbinop(spamlist([1,2,3]), 2, 1, "b in a", "__contains__")
testbinop(spamlist([1,2,3]), 4, 0, "b in a", "__contains__")
testbinop(spamlist([1,2,3]), 1, 2, "a[b]", "__getitem__")
testternop(spamlist([1,2,3]), 0, 2, spamlist([1,2]),
"a[b:c]", "__getslice__")
testsetop(spamlist([1]), spamlist([2]), spamlist([1,2]),
"a+=b", "__iadd__")
testsetop(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*=b", "__imul__")
testunop(spamlist([1,2,3]), 3, "len(a)", "__len__")
testbinop(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*b", "__mul__")
testbinop(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "b*a", "__rmul__")
testset2op(spamlist([1,2]), 1, 3, spamlist([1,3]), "a[b]=c", "__setitem__")
testset3op(spamlist([1,2,3,4]), 1, 3, spamlist([5,6]),
spamlist([1,5,6,4]), "a[b:c]=d", "__setslice__")
# Test subclassing
class C(spam.spamlist):
def foo(self): return 1
a = C()
vereq(a, [])
vereq(a.foo(), 1)
a.append(100)
vereq(a, [100])
vereq(a.getstate(), 0)
a.setstate(42)
vereq(a.getstate(), 42)
def spamdicts():
if verbose: print "Testing spamdict operations..."
import copy, xxsubtype as spam
def spamdict(d, memo=None):
import xxsubtype as spam
sd = spam.spamdict()
for k, v in d.items(): sd[k] = v
return sd
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamdict] = spamdict
testbinop(spamdict({1:2}), spamdict({2:1}), -1, "cmp(a,b)", "__cmp__")
testbinop(spamdict({1:2,3:4}), 1, 1, "b in a", "__contains__")
testbinop(spamdict({1:2,3:4}), 2, 0, "b in a", "__contains__")
testbinop(spamdict({1:2,3:4}), 1, 2, "a[b]", "__getitem__")
d = spamdict({1:2,3:4})
l1 = []
for i in d.keys(): l1.append(i)
l = []
for i in iter(d): l.append(i)
vereq(l, l1)
l = []
for i in d.__iter__(): l.append(i)
vereq(l, l1)
l = []
for i in type(spamdict({})).__iter__(d): l.append(i)
vereq(l, l1)
straightd = {1:2, 3:4}
spamd = spamdict(straightd)
testunop(spamd, 2, "len(a)", "__len__")
testunop(spamd, repr(straightd), "repr(a)", "__repr__")
testset2op(spamdict({1:2,3:4}), 2, 3, spamdict({1:2,2:3,3:4}),
"a[b]=c", "__setitem__")
# Test subclassing
class C(spam.spamdict):
def foo(self): return 1
a = C()
vereq(a.items(), [])
vereq(a.foo(), 1)
a['foo'] = 'bar'
vereq(a.items(), [('foo', 'bar')])
vereq(a.getstate(), 0)
a.setstate(100)
vereq(a.getstate(), 100)
def pydicts():
if verbose: print "Testing Python subclass of dict..."
verify(issubclass(dict, dict))
verify(isinstance({}, dict))
d = dict()
vereq(d, {})
verify(d.__class__ is dict)
verify(isinstance(d, dict))
class C(dict):
state = -1
def __init__(self, *a, **kw):
if a:
vereq(len(a), 1)
self.state = a[0]
if kw:
for k, v in kw.items(): self[v] = k
def __getitem__(self, key):
return self.get(key, 0)
def __setitem__(self, key, value):
verify(isinstance(key, type(0)))
dict.__setitem__(self, key, value)
def setstate(self, state):
self.state = state
def getstate(self):
return self.state
verify(issubclass(C, dict))
a1 = C(12)
vereq(a1.state, 12)
a2 = C(foo=1, bar=2)
vereq(a2[1] == 'foo' and a2[2], 'bar')
a = C()
vereq(a.state, -1)
vereq(a.getstate(), -1)
a.setstate(0)
vereq(a.state, 0)
vereq(a.getstate(), 0)
a.setstate(10)
vereq(a.state, 10)
vereq(a.getstate(), 10)
vereq(a[42], 0)
a[42] = 24
vereq(a[42], 24)
if verbose: print "pydict stress test ..."
N = 50
for i in range(N):
a[i] = C()
for j in range(N):
a[i][j] = i*j
for i in range(N):
for j in range(N):
vereq(a[i][j], i*j)
def pylists():
if verbose: print "Testing Python subclass of list..."
class C(list):
def __getitem__(self, i):
return list.__getitem__(self, i) + 100
def __getslice__(self, i, j):
return (i, j)
a = C()
a.extend([0,1,2])
vereq(a[0], 100)
vereq(a[1], 101)
vereq(a[2], 102)
vereq(a[100:200], (100,200))
def metaclass():
if verbose: print "Testing __metaclass__..."
class C:
__metaclass__ = type
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
vereq(a.getstate(), 0)
a.setstate(10)
vereq(a.getstate(), 10)
class D:
class __metaclass__(type):
def myself(cls): return cls
vereq(D.myself(), D)
d = D()
verify(d.__class__ is D)
class M1(type):
def __new__(cls, name, bases, dict):
dict['__spam__'] = 1
return type.__new__(cls, name, bases, dict)
class C:
__metaclass__ = M1
vereq(C.__spam__, 1)
c = C()
vereq(c.__spam__, 1)
class _instance(object):
pass
class M2(object):
def __new__(cls, name, bases, dict):
self = object.__new__(cls)
self.name = name
self.bases = bases
self.dict = dict
return self
__new__ = staticmethod(__new__)
def __call__(self):
it = _instance()
# Early binding of methods
for key in self.dict:
if key.startswith("__"):
continue
setattr(it, key, self.dict[key].__get__(it, self))
return it
class C:
__metaclass__ = M2
def spam(self):
return 42
vereq(C.name, 'C')
vereq(C.bases, ())
verify('spam' in C.dict)
c = C()
vereq(c.spam(), 42)
# More metaclass examples
class autosuper(type):
# Automatically add __super to the class
# This trick only works for dynamic classes
def __new__(metaclass, name, bases, dict):
cls = super(autosuper, metaclass).__new__(metaclass,
name, bases, dict)
# Name mangling for __super removes leading underscores
while name[:1] == "_":
name = name[1:]
if name:
name = "_%s__super" % name
else:
name = "__super"
setattr(cls, name, super(cls))
return cls
class A:
__metaclass__ = autosuper
def meth(self):
return "A"
class B(A):
def meth(self):
return "B" + self.__super.meth()
class C(A):
def meth(self):
return "C" + self.__super.meth()
class D(C, B):
def meth(self):
return "D" + self.__super.meth()
vereq(D().meth(), "DCBA")
class E(B, C):
def meth(self):
return "E" + self.__super.meth()
vereq(E().meth(), "EBCA")
class autoproperty(type):
# Automatically create property attributes when methods
# named _get_x and/or _set_x are found
def __new__(metaclass, name, bases, dict):
hits = {}
for key, val in dict.iteritems():
if key.startswith("_get_"):
key = key[5:]
get, set = hits.get(key, (None, None))
get = val
hits[key] = get, set
elif key.startswith("_set_"):
key = key[5:]
get, set = hits.get(key, (None, None))
set = val
hits[key] = get, set
for key, (get, set) in hits.iteritems():
dict[key] = property(get, set)
return super(autoproperty, metaclass).__new__(metaclass,
name, bases, dict)
class A:
__metaclass__ = autoproperty
def _get_x(self):
return -self.__x
def _set_x(self, x):
self.__x = -x
a = A()
verify(not hasattr(a, "x"))
a.x = 12
vereq(a.x, 12)
vereq(a._A__x, -12)
class multimetaclass(autoproperty, autosuper):
# Merge of multiple cooperating metaclasses
pass
class A:
__metaclass__ = multimetaclass
def _get_x(self):
return "A"
class B(A):
def _get_x(self):
return "B" + self.__super._get_x()
class C(A):
def _get_x(self):
return "C" + self.__super._get_x()
class D(C, B):
def _get_x(self):
return "D" + self.__super._get_x()
vereq(D().x, "DCBA")
# Make sure type(x) doesn't call x.__class__.__init__
class T(type):
counter = 0
def __init__(self, *args):
T.counter += 1
class C:
__metaclass__ = T
vereq(T.counter, 1)
a = C()
vereq(type(a), C)
vereq(T.counter, 1)
class C(object): pass
c = C()
try: c()
except TypeError: pass
else: raise TestError, "calling object w/o call method should raise TypeError"
def pymods():
if verbose: print "Testing Python subclass of module..."
log = []
import sys
MT = type(sys)
class MM(MT):
def __init__(self):
MT.__init__(self)
def __getattribute__(self, name):
log.append(("getattr", name))
return MT.__getattribute__(self, name)
def __setattr__(self, name, value):
log.append(("setattr", name, value))
MT.__setattr__(self, name, value)
def __delattr__(self, name):
log.append(("delattr", name))
MT.__delattr__(self, name)
a = MM()
a.foo = 12
x = a.foo
del a.foo
vereq(log, [("setattr", "foo", 12),
("getattr", "foo"),
("delattr", "foo")])
def multi():
if verbose: print "Testing multiple inheritance..."
class C(object):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
vereq(a.getstate(), 0)
a.setstate(10)
vereq(a.getstate(), 10)
class D(dict, C):
def __init__(self):
type({}).__init__(self)
C.__init__(self)
d = D()
vereq(d.keys(), [])
d["hello"] = "world"
vereq(d.items(), [("hello", "world")])
vereq(d["hello"], "world")
vereq(d.getstate(), 0)
d.setstate(10)
vereq(d.getstate(), 10)
vereq(D.__mro__, (D, dict, C, object))
# SF bug #442833
class Node(object):
def __int__(self):
return int(self.foo())
def foo(self):
return "23"
class Frag(Node, list):
def foo(self):
return "42"
vereq(Node().__int__(), 23)
vereq(int(Node()), 23)
vereq(Frag().__int__(), 42)
vereq(int(Frag()), 42)
# MI mixing classic and new-style classes.
class A:
x = 1
class B(A):
pass
class C(A):
x = 2
class D(B, C):
pass
vereq(D.x, 1)
# Classic MRO is preserved for a classic base class.
class E(D, object):
pass
vereq(E.__mro__, (E, D, B, A, C, object))
vereq(E.x, 1)
# But with a mix of classic bases, their MROs are combined using
# new-style MRO.
class F(B, C, object):
pass
vereq(F.__mro__, (F, B, C, A, object))
vereq(F.x, 2)
# Try something else.
class C:
def cmethod(self):
return "C a"
def all_method(self):
return "C b"
class M1(C, object):
def m1method(self):
return "M1 a"
def all_method(self):
return "M1 b"
vereq(M1.__mro__, (M1, C, object))
m = M1()
vereq(m.cmethod(), "C a")
vereq(m.m1method(), "M1 a")
vereq(m.all_method(), "M1 b")
class D(C):
def dmethod(self):
return "D a"
def all_method(self):
return "D b"
class M2(object, D):
def m2method(self):
return "M2 a"
def all_method(self):
return "M2 b"
vereq(M2.__mro__, (M2, object, D, C))
m = M2()
vereq(m.cmethod(), "C a")
vereq(m.dmethod(), "D a")
vereq(m.m2method(), "M2 a")
vereq(m.all_method(), "M2 b")
class M3(M1, object, M2):
def m3method(self):
return "M3 a"
def all_method(self):
return "M3 b"
# XXX Expected this (the commented-out result):
# vereq(M3.__mro__, (M3, M1, M2, object, D, C))
vereq(M3.__mro__, (M3, M1, M2, D, C, object)) # XXX ?
m = M3()
vereq(m.cmethod(), "C a")
vereq(m.dmethod(), "D a")
vereq(m.m1method(), "M1 a")
vereq(m.m2method(), "M2 a")
vereq(m.m3method(), "M3 a")
vereq(m.all_method(), "M3 b")
class Classic:
pass
try:
class New(Classic):
__metaclass__ = type
except TypeError:
pass
else:
raise TestFailed, "new class with only classic bases - shouldn't be"
def diamond():
if verbose: print "Testing multiple inheritance special cases..."
class A(object):
def spam(self): return "A"
vereq(A().spam(), "A")
class B(A):
def boo(self): return "B"
def spam(self): return "B"
vereq(B().spam(), "B")
vereq(B().boo(), "B")
class C(A):
def boo(self): return "C"
vereq(C().spam(), "A")
vereq(C().boo(), "C")
class D(B, C): pass
vereq(D().spam(), "B")
vereq(D().boo(), "B")
vereq(D.__mro__, (D, B, C, A, object))
class E(C, B): pass
vereq(E().spam(), "B")
vereq(E().boo(), "C")
vereq(E.__mro__, (E, C, B, A, object))
class F(D, E): pass
vereq(F().spam(), "B")
vereq(F().boo(), "B")
vereq(F.__mro__, (F, D, E, B, C, A, object))
class G(E, D): pass
vereq(G().spam(), "B")
vereq(G().boo(), "C")
vereq(G.__mro__, (G, E, D, C, B, A, object))
def objects():
if verbose: print "Testing object class..."
a = object()
vereq(a.__class__, object)
vereq(type(a), object)
b = object()
verify(a is not b)
verify(not hasattr(a, "foo"))
try:
a.foo = 12
except (AttributeError, TypeError):
pass
else:
verify(0, "object() should not allow setting a foo attribute")
verify(not hasattr(object(), "__dict__"))
class Cdict(object):
pass
x = Cdict()
vereq(x.__dict__, {})
x.foo = 1
vereq(x.foo, 1)
vereq(x.__dict__, {'foo': 1})
def slots():
if verbose: print "Testing __slots__..."
class C0(object):
__slots__ = []
x = C0()
verify(not hasattr(x, "__dict__"))
verify(not hasattr(x, "foo"))
class C1(object):
__slots__ = ['a']
x = C1()
verify(not hasattr(x, "__dict__"))
verify(not hasattr(x, "a"))
x.a = 1
vereq(x.a, 1)
x.a = None
veris(x.a, None)
del x.a
verify(not hasattr(x, "a"))
class C3(object):
__slots__ = ['a', 'b', 'c']
x = C3()
verify(not hasattr(x, "__dict__"))
verify(not hasattr(x, 'a'))
verify(not hasattr(x, 'b'))
verify(not hasattr(x, 'c'))
x.a = 1
x.b = 2
x.c = 3
vereq(x.a, 1)
vereq(x.b, 2)
vereq(x.c, 3)
# Test leaks
class Counted(object):
counter = 0 # counts the number of instances alive
def __init__(self):
Counted.counter += 1
def __del__(self):
Counted.counter -= 1
class C(object):
__slots__ = ['a', 'b', 'c']
x = C()
x.a = Counted()
x.b = Counted()
x.c = Counted()
vereq(Counted.counter, 3)
del x
vereq(Counted.counter, 0)
class D(C):
pass
x = D()
x.a = Counted()
x.z = Counted()
vereq(Counted.counter, 2)
del x
vereq(Counted.counter, 0)
class E(D):
__slots__ = ['e']
x = E()
x.a = Counted()
x.z = Counted()
x.e = Counted()
vereq(Counted.counter, 3)
del x
vereq(Counted.counter, 0)
# Test cyclical leaks [SF bug 519621]
class F(object):
__slots__ = ['a', 'b']
log = []
s = F()
s.a = [Counted(), s]
vereq(Counted.counter, 1)
s = None
import gc
gc.collect()
vereq(Counted.counter, 0)
# Test lookup leaks [SF bug 572567]
import sys,gc
class G(object):
def __cmp__(self, other):
return 0
g = G()
orig_objects = len(gc.get_objects())
for i in xrange(10):
g==g
new_objects = len(gc.get_objects())
vereq(orig_objects, new_objects)
def dynamics():
if verbose: print "Testing class attribute propagation..."
class D(object):
pass
class E(D):
pass
class F(D):
pass
D.foo = 1
vereq(D.foo, 1)
# Test that dynamic attributes are inherited
vereq(E.foo, 1)
vereq(F.foo, 1)
# Test dynamic instances
class C(object):
pass
a = C()
verify(not hasattr(a, "foobar"))
C.foobar = 2
vereq(a.foobar, 2)
C.method = lambda self: 42
vereq(a.method(), 42)
C.__repr__ = lambda self: "C()"
vereq(repr(a), "C()")
C.__int__ = lambda self: 100
vereq(int(a), 100)
vereq(a.foobar, 2)
verify(not hasattr(a, "spam"))
def mygetattr(self, name):
if name == "spam":
return "spam"
raise AttributeError
C.__getattr__ = mygetattr
vereq(a.spam, "spam")
a.new = 12
vereq(a.new, 12)
def mysetattr(self, name, value):
if name == "spam":
raise AttributeError
return object.__setattr__(self, name, value)
C.__setattr__ = mysetattr
try:
a.spam = "not spam"
except AttributeError:
pass
else:
verify(0, "expected AttributeError")
vereq(a.spam, "spam")
class D(C):
pass
d = D()
d.foo = 1
vereq(d.foo, 1)
# Test handling of int*seq and seq*int
class I(int):
pass
vereq("a"*I(2), "aa")
vereq(I(2)*"a", "aa")
vereq(2*I(3), 6)
vereq(I(3)*2, 6)
vereq(I(3)*I(2), 6)
# Test handling of long*seq and seq*long
class L(long):
pass
vereq("a"*L(2L), "aa")
vereq(L(2L)*"a", "aa")
vereq(2*L(3), 6)
vereq(L(3)*2, 6)
vereq(L(3)*L(2), 6)
# Test comparison of classes with dynamic metaclasses
class dynamicmetaclass(type):
pass
class someclass:
__metaclass__ = dynamicmetaclass
verify(someclass != object)
def errors():
if verbose: print "Testing errors..."
try:
class C(list, dict):
pass
except TypeError:
pass
else:
verify(0, "inheritance from both list and dict should be illegal")
try:
class C(object, None):
pass
except TypeError:
pass
else:
verify(0, "inheritance from non-type should be illegal")
class Classic:
pass
try:
class C(type(len)):
pass
except TypeError:
pass
else:
verify(0, "inheritance from CFunction should be illegal")
try:
class C(object):
__slots__ = 1
except TypeError:
pass
else:
verify(0, "__slots__ = 1 should be illegal")
try:
class C(object):
__slots__ = [1]
except TypeError:
pass
else:
verify(0, "__slots__ = [1] should be illegal")
def classmethods():
if verbose: print "Testing class methods..."
class C(object):
def foo(*a): return a
goo = classmethod(foo)
c = C()
vereq(C.goo(1), (C, 1))
vereq(c.goo(1), (C, 1))
vereq(c.foo(1), (c, 1))
class D(C):
pass
d = D()
vereq(D.goo(1), (D, 1))
vereq(d.goo(1), (D, 1))
vereq(d.foo(1), (d, 1))
vereq(D.foo(d, 1), (d, 1))
# Test for a specific crash (SF bug 528132)
def f(cls, arg): return (cls, arg)
ff = classmethod(f)
vereq(ff.__get__(0, int)(42), (int, 42))
vereq(ff.__get__(0)(42), (int, 42))
# Test super() with classmethods (SF bug 535444)
veris(C.goo.im_self, C)
veris(D.goo.im_self, D)
veris(super(D,D).goo.im_self, D)
veris(super(D,d).goo.im_self, D)
vereq(super(D,D).goo(), (D,))
vereq(super(D,d).goo(), (D,))
def staticmethods():
if verbose: print "Testing static methods..."
class C(object):
def foo(*a): return a
goo = staticmethod(foo)
c = C()
vereq(C.goo(1), (1,))
vereq(c.goo(1), (1,))
vereq(c.foo(1), (c, 1,))
class D(C):
pass
d = D()
vereq(D.goo(1), (1,))
vereq(d.goo(1), (1,))
vereq(d.foo(1), (d, 1))
vereq(D.foo(d, 1), (d, 1))
def classic():
if verbose: print "Testing classic classes..."
class C:
def foo(*a): return a
goo = classmethod(foo)
c = C()
vereq(C.goo(1), (C, 1))
vereq(c.goo(1), (C, 1))
vereq(c.foo(1), (c, 1))
class D(C):
pass
d = D()
vereq(D.goo(1), (D, 1))
vereq(d.goo(1), (D, 1))
vereq(d.foo(1), (d, 1))
vereq(D.foo(d, 1), (d, 1))
class E: # *not* subclassing from C
foo = C.foo
vereq(E().foo, C.foo) # i.e., unbound
verify(repr(C.foo.__get__(C())).startswith("<bound method "))
def compattr():
if verbose: print "Testing computed attributes..."
class C(object):
class computed_attribute(object):
def __init__(self, get, set=None, delete=None):
self.__get = get
self.__set = set
self.__delete = delete
def __get__(self, obj, type=None):
return self.__get(obj)
def __set__(self, obj, value):
return self.__set(obj, value)
def __delete__(self, obj):
return self.__delete(obj)
def __init__(self):
self.__x = 0
def __get_x(self):
x = self.__x
self.__x = x+1
return x
def __set_x(self, x):
self.__x = x
def __delete_x(self):
del self.__x
x = computed_attribute(__get_x, __set_x, __delete_x)
a = C()
vereq(a.x, 0)
vereq(a.x, 1)
a.x = 10
vereq(a.x, 10)
vereq(a.x, 11)
del a.x
vereq(hasattr(a, 'x'), 0)
def newslot():
if verbose: print "Testing __new__ slot override..."
class C(list):
def __new__(cls):
self = list.__new__(cls)
self.foo = 1
return self
def __init__(self):
self.foo = self.foo + 2
a = C()
vereq(a.foo, 3)
verify(a.__class__ is C)
class D(C):
pass
b = D()
vereq(b.foo, 3)
verify(b.__class__ is D)
def altmro():
if verbose: print "Testing mro() and overriding it..."
class A(object):
def f(self): return "A"
class B(A):
pass
class C(A):
def f(self): return "C"
class D(B, C):
pass
vereq(D.mro(), [D, B, C, A, object])
vereq(D.__mro__, (D, B, C, A, object))
vereq(D().f(), "C")
class PerverseMetaType(type):
def mro(cls):
L = type.mro(cls)
L.reverse()
return L
class X(A,B,C,D):
__metaclass__ = PerverseMetaType
vereq(X.__mro__, (object, A, C, B, D, X))
vereq(X().f(), "A")
def overloading():
if verbose: print "Testing operator overloading..."
class B(object):
"Intermediate class because object doesn't have a __setattr__"
class C(B):
def __getattr__(self, name):
if name == "foo":
return ("getattr", name)
else:
raise AttributeError
def __setattr__(self, name, value):
if name == "foo":
self.setattr = (name, value)
else:
return B.__setattr__(self, name, value)
def __delattr__(self, name):
if name == "foo":
self.delattr = name
else:
return B.__delattr__(self, name)
def __getitem__(self, key):
return ("getitem", key)
def __setitem__(self, key, value):
self.setitem = (key, value)
def __delitem__(self, key):
self.delitem = key
def __getslice__(self, i, j):
return ("getslice", i, j)
def __setslice__(self, i, j, value):
self.setslice = (i, j, value)
def __delslice__(self, i, j):
self.delslice = (i, j)
a = C()
vereq(a.foo, ("getattr", "foo"))
a.foo = 12
vereq(a.setattr, ("foo", 12))
del a.foo
vereq(a.delattr, "foo")
vereq(a[12], ("getitem", 12))
a[12] = 21
vereq(a.setitem, (12, 21))
del a[12]
vereq(a.delitem, 12)
vereq(a[0:10], ("getslice", 0, 10))
a[0:10] = "foo"
vereq(a.setslice, (0, 10, "foo"))
del a[0:10]
vereq(a.delslice, (0, 10))
def methods():
if verbose: print "Testing methods..."
class C(object):
def __init__(self, x):
self.x = x
def foo(self):
return self.x
c1 = C(1)
vereq(c1.foo(), 1)
class D(C):
boo = C.foo
goo = c1.foo
d2 = D(2)
vereq(d2.foo(), 2)
vereq(d2.boo(), 2)
vereq(d2.goo(), 1)
class E(object):
foo = C.foo
vereq(E().foo, C.foo) # i.e., unbound
verify(repr(C.foo.__get__(C(1))).startswith("<bound method "))
def specials():
# Test operators like __hash__ for which a built-in default exists
if verbose: print "Testing special operators..."
# Test the default behavior for static classes
class C(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
c1 = C()
c2 = C()
verify(not not c1)
vereq(hash(c1), id(c1))
vereq(cmp(c1, c2), cmp(id(c1), id(c2)))
vereq(c1, c1)
verify(c1 != c2)
verify(not c1 != c1)
verify(not c1 == c2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
verify(str(c1).find('C object at ') >= 0)
vereq(str(c1), repr(c1))
verify(-1 not in c1)
for i in range(10):
verify(i in c1)
verify(10 not in c1)
# Test the default behavior for dynamic classes
class D(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
d1 = D()
d2 = D()
verify(not not d1)
vereq(hash(d1), id(d1))
vereq(cmp(d1, d2), cmp(id(d1), id(d2)))
vereq(d1, d1)
verify(d1 != d2)
verify(not d1 != d1)
verify(not d1 == d2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
verify(str(d1).find('D object at ') >= 0)
vereq(str(d1), repr(d1))
verify(-1 not in d1)
for i in range(10):
verify(i in d1)
verify(10 not in d1)
# Test overridden behavior for static classes
class Proxy(object):
def __init__(self, x):
self.x = x
def __nonzero__(self):
return not not self.x
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __cmp__(self, other):
return cmp(self.x, other.x)
def __str__(self):
return "Proxy:%s" % self.x
def __repr__(self):
return "Proxy(%r)" % self.x
def __contains__(self, value):
return value in self.x
p0 = Proxy(0)
p1 = Proxy(1)
p_1 = Proxy(-1)
verify(not p0)
verify(not not p1)
vereq(hash(p0), hash(0))
vereq(p0, p0)
verify(p0 != p1)
verify(not p0 != p0)
vereq(not p0, p1)
vereq(cmp(p0, p1), -1)
vereq(cmp(p0, p0), 0)
vereq(cmp(p0, p_1), 1)
vereq(str(p0), "Proxy:0")
vereq(repr(p0), "Proxy(0)")
p10 = Proxy(range(10))
verify(-1 not in p10)
for i in range(10):
verify(i in p10)
verify(10 not in p10)
# Test overridden behavior for dynamic classes
class DProxy(object):
def __init__(self, x):
self.x = x
def __nonzero__(self):
return not not self.x
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __cmp__(self, other):
return cmp(self.x, other.x)
def __str__(self):
return "DProxy:%s" % self.x
def __repr__(self):
return "DProxy(%r)" % self.x
def __contains__(self, value):
return value in self.x
p0 = DProxy(0)
p1 = DProxy(1)
p_1 = DProxy(-1)
verify(not p0)
verify(not not p1)
vereq(hash(p0), hash(0))
vereq(p0, p0)
verify(p0 != p1)
verify(not p0 != p0)
vereq(not p0, p1)
vereq(cmp(p0, p1), -1)
vereq(cmp(p0, p0), 0)
vereq(cmp(p0, p_1), 1)
vereq(str(p0), "DProxy:0")
vereq(repr(p0), "DProxy(0)")
p10 = DProxy(range(10))
verify(-1 not in p10)
for i in range(10):
verify(i in p10)
verify(10 not in p10)
# Safety test for __cmp__
def unsafecmp(a, b):
try:
a.__class__.__cmp__(a, b)
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow %s.__cmp__(%r, %r)" % (
a.__class__, a, b)
unsafecmp(u"123", "123")
unsafecmp("123", u"123")
unsafecmp(1, 1.0)
unsafecmp(1.0, 1)
unsafecmp(1, 1L)
unsafecmp(1L, 1)
class Letter(str):
def __new__(cls, letter):
if letter == 'EPS':
return str.__new__(cls)
return str.__new__(cls, letter)
def __str__(self):
if not self:
return 'EPS'
return self
# sys.stdout needs to be the original to trigger the recursion bug
import sys
test_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
# nothing should actually be printed, this should raise an exception
print Letter('w')
except RuntimeError:
pass
else:
raise TestFailed, "expected a RuntimeError for print recursion"
sys.stdout = test_stdout
def weakrefs():
if verbose: print "Testing weak references..."
import weakref
class C(object):
pass
c = C()
r = weakref.ref(c)
verify(r() is c)
del c
verify(r() is None)
del r
class NoWeak(object):
__slots__ = ['foo']
no = NoWeak()
try:
weakref.ref(no)
except TypeError, msg:
verify(str(msg).find("weak reference") >= 0)
else:
verify(0, "weakref.ref(no) should be illegal")
class Weak(object):
__slots__ = ['foo', '__weakref__']
yes = Weak()
r = weakref.ref(yes)
verify(r() is yes)
del yes
verify(r() is None)
del r
def properties():
if verbose: print "Testing property..."
class C(object):
def getx(self):
return self.__x
def setx(self, value):
self.__x = value
def delx(self):
del self.__x
x = property(getx, setx, delx, doc="I'm the x property.")
a = C()
verify(not hasattr(a, "x"))
a.x = 42
vereq(a._C__x, 42)
vereq(a.x, 42)
del a.x
verify(not hasattr(a, "x"))
verify(not hasattr(a, "_C__x"))
C.x.__set__(a, 100)
vereq(C.x.__get__(a), 100)
C.x.__delete__(a)
verify(not hasattr(a, "x"))
raw = C.__dict__['x']
verify(isinstance(raw, property))
attrs = dir(raw)
verify("__doc__" in attrs)
verify("fget" in attrs)
verify("fset" in attrs)
verify("fdel" in attrs)
vereq(raw.__doc__, "I'm the x property.")
verify(raw.fget is C.__dict__['getx'])
verify(raw.fset is C.__dict__['setx'])
verify(raw.fdel is C.__dict__['delx'])
for attr in "__doc__", "fget", "fset", "fdel":
try:
setattr(raw, attr, 42)
except TypeError, msg:
if str(msg).find('readonly') < 0:
raise TestFailed("when setting readonly attr %r on a "
"property, got unexpected TypeError "
"msg %r" % (attr, str(msg)))
else:
raise TestFailed("expected TypeError from trying to set "
"readonly %r attr on a property" % attr)
class D(object):
__getitem__ = property(lambda s: 1/0)
d = D()
try:
for i in d:
str(i)
except ZeroDivisionError:
pass
else:
raise TestFailed, "expected ZeroDivisionError from bad property"
def supers():
if verbose: print "Testing super..."
class A(object):
def meth(self, a):
return "A(%r)" % a
vereq(A().meth(1), "A(1)")
class B(A):
def __init__(self):
self.__super = super(B, self)
def meth(self, a):
return "B(%r)" % a + self.__super.meth(a)
vereq(B().meth(2), "B(2)A(2)")
class C(A):
def meth(self, a):
return "C(%r)" % a + self.__super.meth(a)
C._C__super = super(C)
vereq(C().meth(3), "C(3)A(3)")
class D(C, B):
def meth(self, a):
return "D(%r)" % a + super(D, self).meth(a)
vereq(D().meth(4), "D(4)C(4)B(4)A(4)")
# Test for subclassing super
class mysuper(super):
def __init__(self, *args):
return super(mysuper, self).__init__(*args)
class E(D):
def meth(self, a):
return "E(%r)" % a + mysuper(E, self).meth(a)
vereq(E().meth(5), "E(5)D(5)C(5)B(5)A(5)")
class F(E):
def meth(self, a):
s = self.__super
return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a)
F._F__super = mysuper(F)
vereq(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)")
# Make sure certain errors are raised
try:
super(D, 42)
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow super(D, 42)"
try:
super(D, C())
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow super(D, C())"
try:
super(D).__get__(12)
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow super(D).__get__(12)"
try:
super(D).__get__(C())
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow super(D).__get__(C())"
def inherits():
if verbose: print "Testing inheritance from basic types..."
class hexint(int):
def __repr__(self):
return hex(self)
def __add__(self, other):
return hexint(int.__add__(self, other))
# (Note that overriding __radd__ doesn't work,
# because the int type gets first dibs.)
vereq(repr(hexint(7) + 9), "0x10")
vereq(repr(hexint(1000) + 7), "0x3ef")
a = hexint(12345)
vereq(a, 12345)
vereq(int(a), 12345)
verify(int(a).__class__ is int)
vereq(hash(a), hash(12345))
verify((+a).__class__ is int)
verify((a >> 0).__class__ is int)
verify((a << 0).__class__ is int)
verify((hexint(0) << 12).__class__ is int)
verify((hexint(0) >> 12).__class__ is int)
class octlong(long):
__slots__ = []
def __str__(self):
s = oct(self)
if s[-1] == 'L':
s = s[:-1]
return s
def __add__(self, other):
return self.__class__(super(octlong, self).__add__(other))
__radd__ = __add__
vereq(str(octlong(3) + 5), "010")
# (Note that overriding __radd__ here only seems to work
# because the example uses a short int left argument.)
vereq(str(5 + octlong(3000)), "05675")
a = octlong(12345)
vereq(a, 12345L)
vereq(long(a), 12345L)
vereq(hash(a), hash(12345L))
verify(long(a).__class__ is long)
verify((+a).__class__ is long)
verify((-a).__class__ is long)
verify((-octlong(0)).__class__ is long)
verify((a >> 0).__class__ is long)
verify((a << 0).__class__ is long)
verify((a - 0).__class__ is long)
verify((a * 1).__class__ is long)
verify((a ** 1).__class__ is long)
verify((a // 1).__class__ is long)
verify((1 * a).__class__ is long)
verify((a | 0).__class__ is long)
verify((a ^ 0).__class__ is long)
verify((a & -1L).__class__ is long)
verify((octlong(0) << 12).__class__ is long)
verify((octlong(0) >> 12).__class__ is long)
verify(abs(octlong(0)).__class__ is long)
# Because octlong overrides __add__, we can't check the absence of +0
# optimizations using octlong.
class longclone(long):
pass
a = longclone(1)
verify((a + 0).__class__ is long)
verify((0 + a).__class__ is long)
# Check that negative clones don't segfault
a = longclone(-1)
vereq(a.__dict__, {})
vereq(long(a), -1) # verify PyNumber_Long() copies the sign bit
class precfloat(float):
__slots__ = ['prec']
def __init__(self, value=0.0, prec=12):
self.prec = int(prec)
float.__init__(value)
def __repr__(self):
return "%.*g" % (self.prec, self)
vereq(repr(precfloat(1.1)), "1.1")
a = precfloat(12345)
vereq(a, 12345.0)
vereq(float(a), 12345.0)
verify(float(a).__class__ is float)
vereq(hash(a), hash(12345.0))
verify((+a).__class__ is float)
class madcomplex(complex):
def __repr__(self):
return "%.17gj%+.17g" % (self.imag, self.real)
a = madcomplex(-3, 4)
vereq(repr(a), "4j-3")
base = complex(-3, 4)
veris(base.__class__, complex)
vereq(a, base)
vereq(complex(a), base)
veris(complex(a).__class__, complex)
a = madcomplex(a) # just trying another form of the constructor
vereq(repr(a), "4j-3")
vereq(a, base)
vereq(complex(a), base)
veris(complex(a).__class__, complex)
vereq(hash(a), hash(base))
veris((+a).__class__, complex)
veris((a + 0).__class__, complex)
vereq(a + 0, base)
veris((a - 0).__class__, complex)
vereq(a - 0, base)
veris((a * 1).__class__, complex)
vereq(a * 1, base)
veris((a / 1).__class__, complex)
vereq(a / 1, base)
class madtuple(tuple):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__(L)
return self._rev
a = madtuple((1,2,3,4,5,6,7,8,9,0))
vereq(a, (1,2,3,4,5,6,7,8,9,0))
vereq(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1)))
vereq(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0)))
for i in range(512):
t = madtuple(range(i))
u = t.rev()
v = u.rev()
vereq(v, t)
a = madtuple((1,2,3,4,5))
vereq(tuple(a), (1,2,3,4,5))
verify(tuple(a).__class__ is tuple)
vereq(hash(a), hash((1,2,3,4,5)))
verify(a[:].__class__ is tuple)
verify((a * 1).__class__ is tuple)
verify((a * 0).__class__ is tuple)
verify((a + ()).__class__ is tuple)
a = madtuple(())
vereq(tuple(a), ())
verify(tuple(a).__class__ is tuple)
verify((a + a).__class__ is tuple)
verify((a * 0).__class__ is tuple)
verify((a * 1).__class__ is tuple)
verify((a * 2).__class__ is tuple)
verify(a[:].__class__ is tuple)
class madstring(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
s = madstring("abcdefghijklmnopqrstuvwxyz")
vereq(s, "abcdefghijklmnopqrstuvwxyz")
vereq(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba"))
vereq(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz"))
for i in range(256):
s = madstring("".join(map(chr, range(i))))
t = s.rev()
u = t.rev()
vereq(u, s)
s = madstring("12345")
vereq(str(s), "12345")
verify(str(s).__class__ is str)
base = "\x00" * 5
s = madstring(base)
vereq(s, base)
vereq(str(s), base)
verify(str(s).__class__ is str)
vereq(hash(s), hash(base))
vereq({s: 1}[base], 1)
vereq({base: 1}[s], 1)
verify((s + "").__class__ is str)
vereq(s + "", base)
verify(("" + s).__class__ is str)
vereq("" + s, base)
verify((s * 0).__class__ is str)
vereq(s * 0, "")
verify((s * 1).__class__ is str)
vereq(s * 1, base)
verify((s * 2).__class__ is str)
vereq(s * 2, base + base)
verify(s[:].__class__ is str)
vereq(s[:], base)
verify(s[0:0].__class__ is str)
vereq(s[0:0], "")
verify(s.strip().__class__ is str)
vereq(s.strip(), base)
verify(s.lstrip().__class__ is str)
vereq(s.lstrip(), base)
verify(s.rstrip().__class__ is str)
vereq(s.rstrip(), base)
identitytab = ''.join([chr(i) for i in range(256)])
verify(s.translate(identitytab).__class__ is str)
vereq(s.translate(identitytab), base)
verify(s.translate(identitytab, "x").__class__ is str)
vereq(s.translate(identitytab, "x"), base)
vereq(s.translate(identitytab, "\x00"), "")
verify(s.replace("x", "x").__class__ is str)
vereq(s.replace("x", "x"), base)
verify(s.ljust(len(s)).__class__ is str)
vereq(s.ljust(len(s)), base)
verify(s.rjust(len(s)).__class__ is str)
vereq(s.rjust(len(s)), base)
verify(s.center(len(s)).__class__ is str)
vereq(s.center(len(s)), base)
verify(s.lower().__class__ is str)
vereq(s.lower(), base)
s = madstring("x y")
vereq(s, "x y")
verify(intern(s).__class__ is str)
verify(intern(s) is intern("x y"))
vereq(intern(s), "x y")
i = intern("y x")
s = madstring("y x")
vereq(s, i)
verify(intern(s).__class__ is str)
verify(intern(s) is i)
s = madstring(i)
verify(intern(s).__class__ is str)
verify(intern(s) is i)
class madunicode(unicode):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__(u"".join(L))
return self._rev
u = madunicode("ABCDEF")
vereq(u, u"ABCDEF")
vereq(u.rev(), madunicode(u"FEDCBA"))
vereq(u.rev().rev(), madunicode(u"ABCDEF"))
base = u"12345"
u = madunicode(base)
vereq(unicode(u), base)
verify(unicode(u).__class__ is unicode)
vereq(hash(u), hash(base))
vereq({u: 1}[base], 1)
vereq({base: 1}[u], 1)
verify(u.strip().__class__ is unicode)
vereq(u.strip(), base)
verify(u.lstrip().__class__ is unicode)
vereq(u.lstrip(), base)
verify(u.rstrip().__class__ is unicode)
vereq(u.rstrip(), base)
verify(u.replace(u"x", u"x").__class__ is unicode)
vereq(u.replace(u"x", u"x"), base)
verify(u.replace(u"xy", u"xy").__class__ is unicode)
vereq(u.replace(u"xy", u"xy"), base)
verify(u.center(len(u)).__class__ is unicode)
vereq(u.center(len(u)), base)
verify(u.ljust(len(u)).__class__ is unicode)
vereq(u.ljust(len(u)), base)
verify(u.rjust(len(u)).__class__ is unicode)
vereq(u.rjust(len(u)), base)
verify(u.lower().__class__ is unicode)
vereq(u.lower(), base)
verify(u.upper().__class__ is unicode)
vereq(u.upper(), base)
verify(u.capitalize().__class__ is unicode)
vereq(u.capitalize(), base)
verify(u.title().__class__ is unicode)
vereq(u.title(), base)
verify((u + u"").__class__ is unicode)
vereq(u + u"", base)
verify((u"" + u).__class__ is unicode)
vereq(u"" + u, base)
verify((u * 0).__class__ is unicode)
vereq(u * 0, u"")
verify((u * 1).__class__ is unicode)
vereq(u * 1, base)
verify((u * 2).__class__ is unicode)
vereq(u * 2, base + base)
verify(u[:].__class__ is unicode)
vereq(u[:], base)
verify(u[0:0].__class__ is unicode)
vereq(u[0:0], u"")
class sublist(list):
pass
a = sublist(range(5))
vereq(a, range(5))
a.append("hello")
vereq(a, range(5) + ["hello"])
a[5] = 5
vereq(a, range(6))
a.extend(range(6, 20))
vereq(a, range(20))
a[-5:] = []
vereq(a, range(15))
del a[10:15]
vereq(len(a), 10)
vereq(a, range(10))
vereq(list(a), range(10))
vereq(a[0], 0)
vereq(a[9], 9)
vereq(a[-10], 0)
vereq(a[-1], 9)
vereq(a[:5], range(5))
class CountedInput(file):
"""Counts lines read by self.readline().
self.lineno is the 0-based ordinal of the last line read, up to
a maximum of one greater than the number of lines in the file.
self.ateof is true if and only if the final "" line has been read,
at which point self.lineno stops incrementing, and further calls
to readline() continue to return "".
"""
lineno = 0
ateof = 0
def readline(self):
if self.ateof:
return ""
s = file.readline(self)
# Next line works too.
# s = super(CountedInput, self).readline()
self.lineno += 1
if s == "":
self.ateof = 1
return s
f = file(name=TESTFN, mode='w')
lines = ['a\n', 'b\n', 'c\n']
try:
f.writelines(lines)
f.close()
f = CountedInput(TESTFN)
for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]):
got = f.readline()
vereq(expected, got)
vereq(f.lineno, i)
vereq(f.ateof, (i > len(lines)))
f.close()
finally:
try:
f.close()
except:
pass
try:
import os
os.unlink(TESTFN)
except:
pass
def keywords():
if verbose:
print "Testing keyword args to basic type constructors ..."
vereq(int(x=1), 1)
vereq(float(x=2), 2.0)
vereq(long(x=3), 3L)
vereq(complex(imag=42, real=666), complex(666, 42))
vereq(str(object=500), '500')
vereq(unicode(string='abc', errors='strict'), u'abc')
vereq(tuple(sequence=range(3)), (0, 1, 2))
vereq(list(sequence=(0, 1, 2)), range(3))
vereq(dict(items={1: 2}), {1: 2})
for constructor in (int, float, long, complex, str, unicode,
tuple, list, dict, file):
try:
constructor(bogus_keyword_arg=1)
except TypeError:
pass
else:
raise TestFailed("expected TypeError from bogus keyword "
"argument to %r" % constructor)
def restricted():
# XXX This test is disabled because rexec is not deemed safe
return
import rexec
if verbose:
print "Testing interaction with restricted execution ..."
sandbox = rexec.RExec()
code1 = """f = open(%r, 'w')""" % TESTFN
code2 = """f = file(%r, 'w')""" % TESTFN
code3 = """\
f = open(%r)
t = type(f) # a sneaky way to get the file() constructor
f.close()
f = t(%r, 'w') # rexec can't catch this by itself
""" % (TESTFN, TESTFN)
f = open(TESTFN, 'w') # Create the file so code3 can find it.
f.close()
try:
for code in code1, code2, code3:
try:
sandbox.r_exec(code)
except IOError, msg:
if str(msg).find("restricted") >= 0:
outcome = "OK"
else:
outcome = "got an exception, but not an expected one"
else:
outcome = "expected a restricted-execution exception"
if outcome != "OK":
raise TestFailed("%s, in %r" % (outcome, code))
finally:
try:
import os
os.unlink(TESTFN)
except:
pass
def str_subclass_as_dict_key():
if verbose:
print "Testing a str subclass used as dict key .."
class cistr(str):
"""Sublcass of str that computes __eq__ case-insensitively.
Also computes a hash code of the string in canonical form.
"""
def __init__(self, value):
self.canonical = value.lower()
self.hashcode = hash(self.canonical)
def __eq__(self, other):
if not isinstance(other, cistr):
other = cistr(other)
return self.canonical == other.canonical
def __hash__(self):
return self.hashcode
vereq(cistr('ABC'), 'abc')
vereq('aBc', cistr('ABC'))
vereq(str(cistr('ABC')), 'ABC')
d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3}
vereq(d[cistr('one')], 1)
vereq(d[cistr('tWo')], 2)
vereq(d[cistr('THrEE')], 3)
verify(cistr('ONe') in d)
vereq(d.get(cistr('thrEE')), 3)
def classic_comparisons():
if verbose: print "Testing classic comparisons..."
class classic:
pass
for base in (classic, int, object):
if verbose: print " (base = %s)" % base
class C(base):
def __init__(self, value):
self.value = int(value)
def __cmp__(self, other):
if isinstance(other, C):
return cmp(self.value, other.value)
if isinstance(other, int) or isinstance(other, long):
return cmp(self.value, other)
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
vereq(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
verify(cmp(c[x], c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
for op in "<", "<=", "==", "!=", ">", ">=":
verify(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
verify(cmp(c[x], y) == cmp(x, y), "x=%d, y=%d" % (x, y))
verify(cmp(x, c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
def rich_comparisons():
if verbose:
print "Testing rich comparisons..."
class Z(complex):
pass
z = Z(1)
vereq(z, 1+0j)
vereq(1+0j, z)
class ZZ(complex):
def __eq__(self, other):
try:
return abs(self - other) <= 1e-6
except:
return NotImplemented
zz = ZZ(1.0000003)
vereq(zz, 1+0j)
vereq(1+0j, zz)
class classic:
pass
for base in (classic, int, object, list):
if verbose: print " (base = %s)" % base
class C(base):
def __init__(self, value):
self.value = int(value)
def __cmp__(self, other):
raise TestFailed, "shouldn't call __cmp__"
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, long):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, long):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, long):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, long):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, long):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, long):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
vereq(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
verify(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
verify(eval("c[x] %s y" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
verify(eval("x %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def coercions():
if verbose: print "Testing coercions..."
class I(int): pass
coerce(I(0), 0)
coerce(0, I(0))
class L(long): pass
coerce(L(0), 0)
coerce(L(0), 0L)
coerce(0, L(0))
coerce(0L, L(0))
class F(float): pass
coerce(F(0), 0)
coerce(F(0), 0L)
coerce(F(0), 0.)
coerce(0, F(0))
coerce(0L, F(0))
coerce(0., F(0))
class C(complex): pass
coerce(C(0), 0)
coerce(C(0), 0L)
coerce(C(0), 0.)
coerce(C(0), 0j)
coerce(0, C(0))
coerce(0L, C(0))
coerce(0., C(0))
coerce(0j, C(0))
def descrdoc():
if verbose: print "Testing descriptor doc strings..."
def check(descr, what):
vereq(descr.__doc__, what)
check(file.closed, "flag set if the file is closed") # getset descriptor
check(file.name, "file name") # member descriptor
def setclass():
if verbose: print "Testing __class__ assignment..."
class C(object): pass
class D(object): pass
class E(object): pass
class F(D, E): pass
for cls in C, D, E, F:
for cls2 in C, D, E, F:
x = cls()
x.__class__ = cls2
verify(x.__class__ is cls2)
x.__class__ = cls
verify(x.__class__ is cls)
def cant(x, C):
try:
x.__class__ = C
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow %r.__class__ = %r" % (x, C)
try:
delattr(x, "__class__")
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow del %r.__class__" % x
cant(C(), list)
cant(list(), C)
cant(C(), 1)
cant(C(), object)
cant(object(), list)
cant(list(), object)
o = object()
cant(o, type(1))
cant(o, type(None))
del o
def setdict():
if verbose: print "Testing __dict__ assignment..."
class C(object): pass
a = C()
a.__dict__ = {'b': 1}
vereq(a.b, 1)
def cant(x, dict):
try:
x.__dict__ = dict
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow %r.__dict__ = %r" % (x, dict)
cant(a, None)
cant(a, [])
cant(a, 1)
del a.__dict__ # Deleting __dict__ is allowed
# Classes don't allow __dict__ assignment
cant(C, {})
def pickles():
if verbose:
print "Testing pickling and copying new-style classes and objects..."
import pickle, cPickle
def sorteditems(d):
L = d.items()
L.sort()
return L
global C
class C(object):
def __init__(self, a, b):
super(C, self).__init__()
self.a = a
self.b = b
def __repr__(self):
return "C(%r, %r)" % (self.a, self.b)
global C1
class C1(list):
def __new__(cls, a, b):
return super(C1, cls).__new__(cls)
def __init__(self, a, b):
self.a = a
self.b = b
def __repr__(self):
return "C1(%r, %r)<%r>" % (self.a, self.b, list(self))
global C2
class C2(int):
def __new__(cls, a, b, val=0):
return super(C2, cls).__new__(cls, val)
def __init__(self, a, b, val=0):
self.a = a
self.b = b
def __repr__(self):
return "C2(%r, %r)<%r>" % (self.a, self.b, int(self))
global C3
class C3(object):
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, foo):
self.foo = foo
global C4classic, C4
class C4classic: # classic
pass
class C4(C4classic, object): # mixed inheritance
pass
for p in pickle, cPickle:
for bin in 0, 1:
if verbose:
print p.__name__, ["text", "binary"][bin]
for cls in C, C1, C2:
s = p.dumps(cls, bin)
cls2 = p.loads(s)
verify(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
s = p.dumps((a, b), bin)
x, y = p.loads(s)
vereq(x.__class__, a.__class__)
vereq(sorteditems(x.__dict__), sorteditems(a.__dict__))
vereq(y.__class__, b.__class__)
vereq(sorteditems(y.__dict__), sorteditems(b.__dict__))
vereq(`x`, `a`)
vereq(`y`, `b`)
if verbose:
print "a = x =", a
print "b = y =", b
# Test for __getstate__ and __setstate__ on new style class
u = C3(42)
s = p.dumps(u, bin)
v = p.loads(s)
veris(u.__class__, v.__class__)
vereq(u.foo, v.foo)
# Test for picklability of hybrid class
u = C4()
u.foo = 42
s = p.dumps(u, bin)
v = p.loads(s)
veris(u.__class__, v.__class__)
vereq(u.foo, v.foo)
# Testing copy.deepcopy()
if verbose:
print "deepcopy"
import copy
for cls in C, C1, C2:
cls2 = copy.deepcopy(cls)
verify(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
x, y = copy.deepcopy((a, b))
vereq(x.__class__, a.__class__)
vereq(sorteditems(x.__dict__), sorteditems(a.__dict__))
vereq(y.__class__, b.__class__)
vereq(sorteditems(y.__dict__), sorteditems(b.__dict__))
vereq(`x`, `a`)
vereq(`y`, `b`)
if verbose:
print "a = x =", a
print "b = y =", b
def pickleslots():
if verbose: print "Testing pickling of classes with __slots__ ..."
import pickle, cPickle
# Pickling of classes with __slots__ but without __getstate__ should fail
global B, C, D, E
class B(object):
pass
for base in [object, B]:
class C(base):
__slots__ = ['a']
class D(C):
pass
try:
pickle.dumps(C())
except TypeError:
pass
else:
raise TestFailed, "should fail: pickle C instance - %s" % base
try:
cPickle.dumps(C())
except TypeError:
pass
else:
raise TestFailed, "should fail: cPickle C instance - %s" % base
try:
pickle.dumps(C())
except TypeError:
pass
else:
raise TestFailed, "should fail: pickle D instance - %s" % base
try:
cPickle.dumps(D())
except TypeError:
pass
else:
raise TestFailed, "should fail: cPickle D instance - %s" % base
# Give C a __getstate__ and __setstate__
class C(base):
__slots__ = ['a']
def __getstate__(self):
try:
d = self.__dict__.copy()
except AttributeError:
d = {}
try:
d['a'] = self.a
except AttributeError:
pass
return d
def __setstate__(self, d):
for k, v in d.items():
setattr(self, k, v)
class D(C):
pass
# Now it should work
x = C()
y = pickle.loads(pickle.dumps(x))
vereq(hasattr(y, 'a'), 0)
y = cPickle.loads(cPickle.dumps(x))
vereq(hasattr(y, 'a'), 0)
x.a = 42
y = pickle.loads(pickle.dumps(x))
vereq(y.a, 42)
y = cPickle.loads(cPickle.dumps(x))
vereq(y.a, 42)
x = D()
x.a = 42
x.b = 100
y = pickle.loads(pickle.dumps(x))
vereq(y.a + y.b, 142)
y = cPickle.loads(cPickle.dumps(x))
vereq(y.a + y.b, 142)
# But a subclass that adds a slot should not work
class E(C):
__slots__ = ['b']
try:
pickle.dumps(E())
except TypeError:
pass
else:
raise TestFailed, "should fail: pickle E instance - %s" % base
try:
cPickle.dumps(E())
except TypeError:
pass
else:
raise TestFailed, "should fail: cPickle E instance - %s" % base
def copies():
if verbose: print "Testing copy.copy() and copy.deepcopy()..."
import copy
class C(object):
pass
a = C()
a.foo = 12
b = copy.copy(a)
vereq(b.__dict__, a.__dict__)
a.bar = [1,2,3]
c = copy.copy(a)
vereq(c.bar, a.bar)
verify(c.bar is a.bar)
d = copy.deepcopy(a)
vereq(d.__dict__, a.__dict__)
a.bar.append(4)
vereq(d.bar, [1,2,3])
def binopoverride():
if verbose: print "Testing overrides of binary operations..."
class I(int):
def __repr__(self):
return "I(%r)" % int(self)
def __add__(self, other):
return I(int(self) + int(other))
__radd__ = __add__
def __pow__(self, other, mod=None):
if mod is None:
return I(pow(int(self), int(other)))
else:
return I(pow(int(self), int(other), int(mod)))
def __rpow__(self, other, mod=None):
if mod is None:
return I(pow(int(other), int(self), mod))
else:
return I(pow(int(other), int(self), int(mod)))
vereq(`I(1) + I(2)`, "I(3)")
vereq(`I(1) + 2`, "I(3)")
vereq(`1 + I(2)`, "I(3)")
vereq(`I(2) ** I(3)`, "I(8)")
vereq(`2 ** I(3)`, "I(8)")
vereq(`I(2) ** 3`, "I(8)")
vereq(`pow(I(2), I(3), I(5))`, "I(3)")
class S(str):
def __eq__(self, other):
return self.lower() == other.lower()
def subclasspropagation():
if verbose: print "Testing propagation of slot functions to subclasses..."
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B, C):
pass
d = D()
vereq(hash(d), id(d))
A.__hash__ = lambda self: 42
vereq(hash(d), 42)
C.__hash__ = lambda self: 314
vereq(hash(d), 314)
B.__hash__ = lambda self: 144
vereq(hash(d), 144)
D.__hash__ = lambda self: 100
vereq(hash(d), 100)
del D.__hash__
vereq(hash(d), 144)
del B.__hash__
vereq(hash(d), 314)
del C.__hash__
vereq(hash(d), 42)
del A.__hash__
vereq(hash(d), id(d))
d.foo = 42
d.bar = 42
vereq(d.foo, 42)
vereq(d.bar, 42)
def __getattribute__(self, name):
if name == "foo":
return 24
return object.__getattribute__(self, name)
A.__getattribute__ = __getattribute__
vereq(d.foo, 24)
vereq(d.bar, 42)
def __getattr__(self, name):
if name in ("spam", "foo", "bar"):
return "hello"
raise AttributeError, name
B.__getattr__ = __getattr__
vereq(d.spam, "hello")
vereq(d.foo, 24)
vereq(d.bar, 42)
del A.__getattribute__
vereq(d.foo, 42)
del d.foo
vereq(d.foo, "hello")
vereq(d.bar, 42)
del B.__getattr__
try:
d.foo
except AttributeError:
pass
else:
raise TestFailed, "d.foo should be undefined now"
# Test a nasty bug in recurse_down_subclasses()
import gc
class A(object):
pass
class B(A):
pass
del B
gc.collect()
A.__setitem__ = lambda *a: None # crash
def buffer_inherit():
import binascii
# SF bug [#470040] ParseTuple t# vs subclasses.
if verbose:
print "Testing that buffer interface is inherited ..."
class MyStr(str):
pass
base = 'abc'
m = MyStr(base)
# b2a_hex uses the buffer interface to get its argument's value, via
# PyArg_ParseTuple 't#' code.
vereq(binascii.b2a_hex(m), binascii.b2a_hex(base))
# It's not clear that unicode will continue to support the character
# buffer interface, and this test will fail if that's taken away.
class MyUni(unicode):
pass
base = u'abc'
m = MyUni(base)
vereq(binascii.b2a_hex(m), binascii.b2a_hex(base))
class MyInt(int):
pass
m = MyInt(42)
try:
binascii.b2a_hex(m)
raise TestFailed('subclass of int should not have a buffer interface')
except TypeError:
pass
def str_of_str_subclass():
import binascii
import cStringIO
if verbose:
print "Testing __str__ defined in subclass of str ..."
class octetstring(str):
def __str__(self):
return binascii.b2a_hex(self)
def __repr__(self):
return self + " repr"
o = octetstring('A')
vereq(type(o), octetstring)
vereq(type(str(o)), str)
vereq(type(repr(o)), str)
vereq(ord(o), 0x41)
vereq(str(o), '41')
vereq(repr(o), 'A repr')
vereq(o.__str__(), '41')
vereq(o.__repr__(), 'A repr')
capture = cStringIO.StringIO()
# Calling str() or not exercises different internal paths.
print >> capture, o
print >> capture, str(o)
vereq(capture.getvalue(), '41\n41\n')
capture.close()
def kwdargs():
if verbose: print "Testing keyword arguments to __init__, __call__..."
def f(a): return a
vereq(f.__call__(a=42), 42)
a = []
list.__init__(a, sequence=[0, 1, 2])
vereq(a, [0, 1, 2])
def delhook():
if verbose: print "Testing __del__ hook..."
log = []
class C(object):
def __del__(self):
log.append(1)
c = C()
vereq(log, [])
del c
vereq(log, [1])
class D(object): pass
d = D()
try: del d[0]
except TypeError: pass
else: raise TestFailed, "invalid del() didn't raise TypeError"
def hashinherit():
if verbose: print "Testing hash of mutable subclasses..."
class mydict(dict):
pass
d = mydict()
try:
hash(d)
except TypeError:
pass
else:
raise TestFailed, "hash() of dict subclass should fail"
class mylist(list):
pass
d = mylist()
try:
hash(d)
except TypeError:
pass
else:
raise TestFailed, "hash() of list subclass should fail"
def strops():
try: 'a' + 5
except TypeError: pass
else: raise TestFailed, "'' + 5 doesn't raise TypeError"
try: ''.split('')
except ValueError: pass
else: raise TestFailed, "''.split('') doesn't raise ValueError"
try: ''.join([0])
except TypeError: pass
else: raise TestFailed, "''.join([0]) doesn't raise TypeError"
try: ''.rindex('5')
except ValueError: pass
else: raise TestFailed, "''.rindex('5') doesn't raise ValueError"
try: ''.replace('', '')
except ValueError: pass
else: raise TestFailed, "''.replace('', '') doesn't raise ValueError"
try: '%(n)s' % None
except TypeError: pass
else: raise TestFailed, "'%(n)s' % None doesn't raise TypeError"
try: '%(n' % {}
except ValueError: pass
else: raise TestFailed, "'%(n' % {} '' doesn't raise ValueError"
try: '%*s' % ('abc')
except TypeError: pass
else: raise TestFailed, "'%*s' % ('abc') doesn't raise TypeError"
try: '%*.*s' % ('abc', 5)
except TypeError: pass
else: raise TestFailed, "'%*.*s' % ('abc', 5) doesn't raise TypeError"
try: '%s' % (1, 2)
except TypeError: pass
else: raise TestFailed, "'%s' % (1, 2) doesn't raise TypeError"
try: '%' % None
except ValueError: pass
else: raise TestFailed, "'%' % None doesn't raise ValueError"
vereq('534253'.isdigit(), 1)
vereq('534253x'.isdigit(), 0)
vereq('%c' % 5, '\x05')
vereq('%c' % '5', '5')
def deepcopyrecursive():
if verbose: print "Testing deepcopy of recursive objects..."
class Node:
pass
a = Node()
b = Node()
a.b = b
b.a = a
z = deepcopy(a) # This blew up before
def modules():
if verbose: print "Testing uninitialized module objects..."
from types import ModuleType as M
m = M.__new__(M)
str(m)
vereq(hasattr(m, "__name__"), 0)
vereq(hasattr(m, "__file__"), 0)
vereq(hasattr(m, "foo"), 0)
vereq(m.__dict__, None)
m.foo = 1
vereq(m.__dict__, {"foo": 1})
def docdescriptor():
# SF bug 542984
if verbose: print "Testing __doc__ descriptor..."
class DocDescr(object):
def __get__(self, object, otype):
if object:
object = object.__class__.__name__ + ' instance'
if otype:
otype = otype.__name__
return 'object=%s; type=%s' % (object, otype)
class OldClass:
__doc__ = DocDescr()
class NewClass(object):
__doc__ = DocDescr()
vereq(OldClass.__doc__, 'object=None; type=OldClass')
vereq(OldClass().__doc__, 'object=OldClass instance; type=OldClass')
vereq(NewClass.__doc__, 'object=None; type=NewClass')
vereq(NewClass().__doc__, 'object=NewClass instance; type=NewClass')
def imulbug():
# SF bug 544647
if verbose: print "Testing for __imul__ problems..."
class C(object):
def __imul__(self, other):
return (self, other)
x = C()
y = x
y *= 1.0
vereq(y, (x, 1.0))
y = x
y *= 2
vereq(y, (x, 2))
y = x
y *= 3L
vereq(y, (x, 3L))
y = x
y *= 1L<<100
vereq(y, (x, 1L<<100))
y = x
y *= None
vereq(y, (x, None))
y = x
y *= "foo"
vereq(y, (x, "foo"))
def copy_setstate():
if verbose:
print "Testing that copy.*copy() correctly uses __setstate__..."
import copy
class C(object):
def __init__(self, foo=None):
self.foo = foo
self.__foo = foo
def setfoo(self, foo=None):
self.foo = foo
def getfoo(self):
return self.__foo
def __getstate__(self):
return [self.foo]
def __setstate__(self, lst):
assert len(lst) == 1
self.__foo = self.foo = lst[0]
a = C(42)
a.setfoo(24)
vereq(a.foo, 24)
vereq(a.getfoo(), 42)
b = copy.copy(a)
vereq(b.foo, 24)
vereq(b.getfoo(), 24)
b = copy.deepcopy(a)
vereq(b.foo, 24)
vereq(b.getfoo(), 24)
def subtype_resurrection():
if verbose:
print "Testing resurrection of new-style instance..."
class C(object):
container = []
def __del__(self):
# resurrect the instance
C.container.append(self)
c = C()
c.attr = 42
# The most interesting thing here is whether this blows up, due to flawed
# GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1 bug).
del c
# If that didn't blow up, it's also interesting to see whether clearing
# the last container slot works: that will attempt to delete c again,
# which will cause c to get appended back to the container again "during"
# the del.
del C.container[-1]
vereq(len(C.container), 1)
vereq(C.container[-1].attr, 42)
# Make c mortal again, so that the test framework with -l doesn't report
# it as a leak.
del C.__del__
def funnynew():
if verbose: print "Testing __new__ returning something unexpected..."
class C(object):
def __new__(cls, arg):
if isinstance(arg, str): return [1, 2, 3]
elif isinstance(arg, int): return object.__new__(D)
else: return object.__new__(cls)
class D(C):
def __init__(self, arg):
self.foo = arg
vereq(C("1"), [1, 2, 3])
vereq(D("1"), [1, 2, 3])
d = D(None)
veris(d.foo, None)
d = C(1)
vereq(isinstance(d, D), True)
vereq(d.foo, 1)
d = D(1)
vereq(isinstance(d, D), True)
vereq(d.foo, 1)
def subclass_right_op():
if verbose:
print "Testing correct dispatch of subclass overloading __r<op>__..."
# This code tests various cases where right-dispatch of a subclass
# should be preferred over left-dispatch of a base class.
# Case 1: subclass of int; this tests code in abstract.c::binary_op1()
class B(int):
def __div__(self, other):
return "B.__div__"
def __rdiv__(self, other):
return "B.__rdiv__"
vereq(B(1) / 1, "B.__div__")
vereq(1 / B(1), "B.__rdiv__")
# Case 2: subclass of object; this is just the baseline for case 3
class C(object):
def __div__(self, other):
return "C.__div__"
def __rdiv__(self, other):
return "C.__rdiv__"
vereq(C(1) / 1, "C.__div__")
vereq(1 / C(1), "C.__rdiv__")
# Case 3: subclass of new-style class; here it gets interesting
class D(C):
def __div__(self, other):
return "D.__div__"
def __rdiv__(self, other):
return "D.__rdiv__"
vereq(D(1) / C(1), "D.__div__")
vereq(C(1) / D(1), "D.__rdiv__")
# Case 4: this didn't work right in 2.2.2 and 2.3a1
class E(C):
pass
vereq(E.__rdiv__, C.__rdiv__)
vereq(E(1) / 1, "C.__div__")
vereq(1 / E(1), "C.__rdiv__")
vereq(E(1) / C(1), "C.__div__")
vereq(C(1) / E(1), "C.__div__") # This one would fail
def dict_type_with_metaclass():
if verbose:
print "Testing type of __dict__ when __metaclass__ set..."
class B(object):
pass
class M(type):
pass
class C:
# In 2.3a1, C.__dict__ was a real dict rather than a dict proxy
__metaclass__ = M
veris(type(C.__dict__), type(B.__dict__))
def weakref_segfault():
# SF 742911
if verbose:
print "Testing weakref segfault..."
import weakref
class Provoker:
def __init__(self, referrent):
self.ref = weakref.ref(referrent)
def __del__(self):
x = self.ref()
class Oops(object):
pass
o = Oops()
o.whatever = Provoker(o)
del o
def crash_in_get_sf736892():
def func():
pass
try:
f = func.__get__(None)
except TypeError:
pass
else:
# should not get here
f(1) # crash
def test_main():
weakref_segfault() # Must be first, somehow
class_docstrings()
lists()
dicts()
dict_constructor()
test_dir()
ints()
longs()
floats()
complexes()
spamlists()
spamdicts()
pydicts()
pylists()
metaclass()
pymods()
multi()
diamond()
objects()
slots()
dynamics()
errors()
classmethods()
staticmethods()
classic()
compattr()
newslot()
altmro()
overloading()
methods()
specials()
weakrefs()
properties()
supers()
inherits()
keywords()
restricted()
str_subclass_as_dict_key()
classic_comparisons()
rich_comparisons()
coercions()
descrdoc()
setclass()
setdict()
pickles()
copies()
binopoverride()
subclasspropagation()
buffer_inherit()
str_of_str_subclass()
kwdargs()
delhook()
hashinherit()
strops()
deepcopyrecursive()
modules()
pickleslots()
docdescriptor()
imulbug()
copy_setstate()
subtype_resurrection()
funnynew()
subclass_right_op()
dict_type_with_metaclass()
crash_in_get_sf736892()
if verbose: print "All OK"
if __name__ == "__main__":
test_main()
| gpl-3.0 | 2,229,904,655,857,334,500 | 26.952337 | 88 | 0.503284 | false |
sthyme/ZFSchizophrenia | BehaviorAnalysis/Alternative_Analyses/Correlation_between_genes/correlations_DISTANCE_betweengenes.py | 1 | 5605 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import matplotlib.colors as mat_col
from matplotlib.colors import LinearSegmentedColormap
import scipy
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import set_link_color_palette
import numpy as np
import pandas as pd
import glob
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from scipy.spatial import distance
#Dig=pd.read_csv("all_regions_sum_nPix_perk_red_channel_PaperData_thres50_newnames.csv")
#Dig=pd.read_csv("all_regions_sum_nPix_perk_green_channel_PaperData_thres50_newnames.csv")
#Dig=pd.read_csv("all_regions_sum_perk_red_channel_PaperData_thres50_newnames.csv")
#Dir=pd.read_csv("all_regions_sum_perk_red_channel_PaperData_newnames.csv")
#Db=pd.read_csv("MAYbehaviorfullset_transposed.csv")
Db=pd.read_csv("AUG16_12_dectest.csv")
#Db=pd.read_csv("AUGMAY18testingfinalfullgoodonesoct30nonoise_transposed.csv")
#Dig = Dig.applymap(np.log)
#Digl = Dig # use if skipping log10
#Digl = Dig.applymap(np.log10)
#print Dig
#Digl = Digl.replace([np.inf, -np.inf], 0)
#Digl = Digl.replace([np.inf, -np.inf], np.nan)
# use if not doing log10
#Digl = Digl.replace([0], np.nan)
#Dig = Dig.replace([0], np.nan)
#DignoNA = Dig.dropna()
#Db = Db.apply(lambda x: [y if 0 < y < 0.05 else np.nan for y in x])
#Db = Db.apply(lambda x: [y if -0.05 < y < 0 else np.nan for y in x])
#print Db["adamtsl3"]
#for binarizing
# DEC 2018, THIS BINARIZING WORKS, BUT NOT DOIN GIT
# only binarizing the "non-significant" data
Db = Db.apply(lambda x: [y if -0.05 < y < 0.05 else 1 for y in x])
# convert all non-significant values to large number
##Db = Db.apply(lambda x: [y if -0.05 < y < 0.05 else 5 for y in x])
#print Db["adamtsl3"]
# keep all positive values, everything negative (between 0 and -0.05) becomes -1
##Db = Db.apply(lambda x: [y if y > 0 else -1 for y in x])
#print Db["adamtsl3"]
##Db = Db.apply(lambda x: [y if y < 2 else 0 for y in x])
#print Db["adamtsl3"]
# everything that is negative or 0 stays the same, everything else (between 0 and 0.05) becomes 1
##Db = Db.apply(lambda x: [y if y <= 0 else 1 for y in x])
#print Db["adamtsl3"]
#Db = Db.apply(lambda x: [y if y == np.nan else 1 for y in x])
#Db = Db.apply(lambda x: [y if y != np.nan else 0 for y in x])
# TRYING LOG ON P-VALUES, NOT SURE IF GOOD IDEA
#Db = Db.applymap(np.log10)
###Db = Db.apply(lambda x: [y if -0.1 < y < 0.1 else np.nan for y in x])
#print Db
#exit()
corrlist = []
dfdict = {}
dfdictdist = {}
collist = []
for column1 in Db:
for column2 in Db:
corr = Db[column1].corr(Db[column2], min_periods=6)
# dist = np.square(Db[column1] - Db[column2])
# print dist
dist = distance.euclidean(Db[column1], Db[column2])
# print dist
#corr = Db[column1].corr(Dig[column2], method='spearman', min_periods=7)
# if corr > 0.6 or corr < -0.6:
#corrlist.append( (corr, column1, column2))
#newdf = pd.concat([Dig[column2], Digl[column2], Db[column1]], axis=1)
newdf = pd.concat([Db[column2], Db[column1]], axis=1)
# newdf = newdf.dropna()
corrlist.append( (corr, newdf, column1, column2, dist))
if column1 in dfdict.keys():
dfdict[column1].append(corr)
dfdictdist[column1].append(dist)
else:
dfdict[column1] = []
dfdictdist[column1] = []
dfdict[column1].append(corr)
dfdictdist[column1].append(dist)
if column2 not in collist:
collist.append(column2)
#corrlist.append( (corr, column1, column2, newdf))
#newdf = Dig[column2].copy()
#newdf2 = newdf.concat(Db[column1])
#newdf[column1] = Db[column1]
#print newdf.dropna()
#exit()
# break
#break
#print dfdict
#print dfdictdist
#print collist
dfcor = pd.DataFrame.from_dict(dfdict, orient='index')
dfcor.columns = collist
dfdist = pd.DataFrame.from_dict(dfdictdist, orient='index')
dfdist.columns = collist
dfcor = dfcor.sort_index()
dfdist = dfdist.sort_index()
dfcor.to_csv("dec_correlation_sort1.csv")
dfdist.to_csv("dec_distance_sort1.csv")
#print dfcor
#corrlist.sort(key=lambda tup: tup[0])
#old way of just printing before generate the DF
##for i in range(0, len(corrlist)):
## print corrlist[i][0], corrlist[i][4], corrlist[i][2], corrlist[i][3]
#print corrlist[i][1]
#print corrlist[i][2]
#Db=pd.read_csv("MAY2018fullheatmapsetfinal_0.csv")
#Db = Db.transpose()
#Dig = Dig.values
#Dir = Dir.values
#Db = Db.values
#print "test1"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print Dig
#print "test2"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print Db
#Digb = Dig[:,1:]
#Dirb = Dir[:,1:]
#Digb = np.delete(Dig, 0, axis=1)
#Dbb = Db[:,1:]
#Dbb = np.delete(Db, 0, axis=1)
#Digb = np.log(Digb)
#Digb = Dig.values
#Dbb = Db.values
#print "test1"
#print Dbb
#print "test2"
#print Digb
#print np.shape(Dbb)
#print np.shape(Digb)
#for row in range(Digb.shape[0]):
#print str(pearsonr(Dbb[row,:], Digb[row,:]))
#print str(pearsonr(Dbb[:,row], Digb[:,row]))
#spearlist = []
#print "green correlation"
#for column1 in Digb.T:
# for column2 in Dbb.T:
# spearlist.append(str(spearmanr(column1, column2, nan_policy='omit')))
#spearlist.sort()
#for s in spearlist:
# print s
#print "red correlation"
#for column3 in Dirb.T:
# for column4 in Dbb.T:
# print str(pearsonr(column3, column4))
#for column1 in Dig:
# for column2 in Db:
# print column1.corr
#print "green correlation"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
#print Dig.corrwith(Db.set_axis(Dig.columns, axis='columns', inplace=False))
#print Dig.corrwith(Db)
#print "red correlation"
#Dir.corrwith(Db)
| mit | 5,382,613,263,176,340,000 | 28.192708 | 97 | 0.693666 | false |
sillvan/hyperspy | hyperspy/drawing/_markers/horizontal_line_segment.py | 1 | 3320 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The Hyperspy developers
#
# This file is part of Hyperspy.
#
# Hyperspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hyperspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hyperspy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class HorizontalLineSegment(MarkerBase):
"""Horizontal line segment marker that can be added to the signal figure
Parameters
---------
x1: array or float
The position of the start of the line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the nagivation axes.
x2: array or float
The position of the end of the line segment in x.
see x1 arguments
y: array or float
The position of line segment in y.
see x1 arguments
kwargs:
Kewywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> import numpy as np
>>> im = signals.Image(np.zeros((100, 100)))
>>> m = utils.plot.markers.horizontal_line_segment(
>>> x1=20, x2=70, y=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
"""
def __init__(self, x1, x2, y, **kwargs):
MarkerBase.__init__(self)
lp = {}
lp['color'] = 'black'
lp['linewidth'] = 1
self.marker_properties = lp
self.set_data(x1=x1, x2=x2, y1=y)
self.set_marker_properties(**kwargs)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def plot(self):
if self.ax is None:
raise AttributeError(
"To use this method the marker needs to be first add to a " +
"figure using `s._plot.signal_plot.add_marker(m)` or " +
"`s._plot.navigator_plot.add_marker(m)`")
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
self.marker.set_animated(True)
try:
self.ax.hspy_fig._draw_animated()
except:
pass
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 1] = self.get_data_position('y1')
segments[0][1, 1] = segments[0][0, 1]
if self.get_data_position('x1') is None:
segments[0][0, 0] = plt.getp(self.marker.axes, 'xlim')[0]
else:
segments[0][0, 0] = self.get_data_position('x1')
if self.get_data_position('x2') is None:
segments[0][1, 0] = plt.getp(self.marker.axes, 'xlim')[1]
else:
segments[0][1, 0] = self.get_data_position('x2')
self.marker.set_segments(segments)
| gpl-3.0 | -754,029,751,437,374,500 | 33.947368 | 78 | 0.614157 | false |
giacomov/lclike | lclike/duration_computation.py | 1 | 12141 | __author__ = 'giacomov'
# !/usr/bin/env python
# add |^| to the top line to run the script without needing 'python' to run it at cmd
# importing modules1
import numpy as np
# cant use 'show' inside the farm
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib import gridspec
import os
import argparse
import decayLikelihood
import warnings
####################################################################
mycmd = argparse.ArgumentParser() # this is a class
mycmd.add_argument('triggername', help="The name of the GRB in YYMMDDXXX format (ex. bn080916009)")
mycmd.add_argument('redshift', help="Redshift for object.")
mycmd.add_argument('function', help="Function to model. (ex. crystalball2, band)")
mycmd.add_argument('directory', help="Directory containing the file produced by gtburst")
if __name__ == "__main__":
args = mycmd.parse_args()
os.chdir(args.directory)
##############################################################################
textfile = os.path.join(args.directory, '%s_res.txt' % (args.triggername))
tbin = np.recfromtxt(textfile, names=True)
textfile = os.path.join(args.directory, '%s_MCsamples_%s.txt' % (args.triggername, args.function))
samples = np.recfromtxt(textfile, names=True)
# function for returning 1 and 2 sigma errors from sample median
def getErr(sampleArr):
# compute sample percentiles for 1 and 2 sigma
m, c, p = np.percentile(sampleArr, [16, 50, 84])
# print("%.3f -%.3f +%.3f" %(c,m-c,p-c)) median, minus, plus
m2, c2, p2 = np.percentile(sampleArr, [3, 50, 97])
return m, c, p, m2, c2, p2
# prepare for plotting and LOOP
t = np.logspace(0, 4, 100)
t = np.append(t, np.linspace(0, 1, 10))
t.sort()
t = np.unique(t)
print('NUMBER OF times to iterate: %s' % (len(t)))
x = decayLikelihood.DecayLikelihood()
if args.function == 'crystalball2':
crystal = decayLikelihood.CrystalBall2() # declaring instance of DecayLikelihood using POWER LAW FIT
x.setDecayFunction(crystal)
# CrystalBall DiffFlux####################################################
Peak = np.zeros(samples.shape[0])
ePeak = np.zeros(samples.shape[0])
tPeak = np.zeros(samples.shape[0])
tePeak = np.zeros(samples.shape[0])
print('ENTERING samples LOOP')
# mu,sigma,decayIndex, and N
for i, parameters in enumerate(samples):
x.decayFunction.setParameters(*parameters)
# NORMALIZATION IS THE FLUX AT THE PEAK
pB = parameters[3] # decay time is independent of scale # (y*.001) # scale =0.001, for all xml files
fBe = pB / np.e
# t = (fBe/N)**(-1/a) defined to be 1
mu = parameters[0]
tP = mu
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
teP = mu + (fBe / parameters[3]) ** (
-1 / parameters[2]) # sometimes 'RuntimeWarning: overflow encountered in double_scalars'
except Warning:
print('RuntimeWarning Raised! mu,sigma,decayIndex,and N:', parameters)
teP = parameters[0] + (fBe / parameters[3]) ** (-1 / parameters[2])
Peak[i] = pB
ePeak[i] = fBe
# redshift correcting t/(1+z)
tPeak[i] = tP / (1 + float(args.redshift)) ################################
tePeak[i] = teP / (1 + float(args.redshift)) ################################
elif args.function == 'band':
band = decayLikelihood.DecayBand() # declaring instance of DecayLikelihood using POWER LAW FIT
x.setDecayFunction(band)
Peak = np.zeros(samples.shape[0])
ePeak = np.zeros(samples.shape[0]) # fractional brightness used in calcuating char-time, but not needed otherwise
tPeak = np.zeros(samples.shape[0])
tePeak = np.zeros(samples.shape[0]) # characteristic time
T05 = np.zeros(samples.shape[0])
T90 = np.zeros(samples.shape[0])
T95 = np.zeros(samples.shape[0])
T25 = np.zeros(samples.shape[0])
T50 = np.zeros(samples.shape[0])
T75 = np.zeros(samples.shape[0])
print('ENTERING samples LOOP')
# mu,sigma,decayIndex, and N
for i, parameters in enumerate(samples):
x.decayFunction.setParameters(*parameters)
tc = band.getCharacteristicTime() # get the characteristic time.
# T50/T90 TAKING TOO LONG (1/4)
# t90, t05, t95 = band.getTsomething( 90 ) # if the argument is 90, returns the T90 as well as the T05 and the T95. If the argument is 50, returns the T50 as well as the T25 and T75, and so on.
# t50, t25, t75 = band.getTsomething( 50 )
tp, fp = band.getPeakTimeAndFlux() # returns the time of the peak, as well as the peak flux
tePeak[i] = tc / (1 + float(args.redshift)) ################################
tPeak[i] = tp / (1 + float(args.redshift))
Peak[i] = fp
# T50/T90 TAKING TOO LONG (2/4)
# T05[i] = t05/(1+float(args.redshift))
# T90[i] = t90/(1+float(args.redshift))
# T95[i] = t95/(1+float(args.redshift))
# T50/T90 TAKING TOO LONG (3/4)
# T25[i] = t25/(1+float(args.redshift))
# T50[i] = t50/(1+float(args.redshift))
# T75[i] = t75/(1+float(args.redshift))
# Defining sigma bands
print('ENTERING Percentile LOOP')
upper = np.zeros(t.shape[0])
lower = np.zeros(t.shape[0])
upper2 = np.zeros(t.shape[0])
lower2 = np.zeros(t.shape[0])
meas = np.zeros(t.shape[0])
fluxMatrix = np.zeros([samples.shape[0], t.shape[0]])
for i, s in enumerate(samples):
x.decayFunction.setParameters(*s)
fluxes = map(x.decayFunction.getDifferentialFlux, t)
fluxMatrix[i, :] = np.array(fluxes)
for i, tt in enumerate(t):
allFluxes = fluxMatrix[:, i]
m, p = np.percentile(allFluxes, [16, 84])
lower[i] = m
upper[i] = p
m2, p2 = np.percentile(allFluxes, [2.5, 97.5])
lower2[i] = m2
upper2[i] = p2
wdir = '%s' % (args.directory)
# save TXT files instead of .npy
placeFile = os.path.join(wdir, "%s_tBrightness_%s" % (args.triggername, args.function))
with open(placeFile, 'w+') as f:
f.write("Peak tPeak ePeak tePeak\n")
for i, s in enumerate(Peak):
f.write("%s %s %s %s\n" % (Peak[i], tPeak[i], ePeak[i], tePeak[i]))
# CALCULATING T50/T90 TAKES TOO LONG
# T50/T90 TAKING TOO LONG (4/4)
# if args.function == 'band':
# #compute percentiles for 1 sigma
# m90,c90,p90 = np.percentile(T90,[16,50,84])
# m50,c50,p50 = np.percentile(T50,[16,50,84])
# #compute percentiles for 1 and 2 sigma
# #90m,90c,90p,90m2,90c2,90p2 = getErr(T90)
# #50m,50c,50p,50m2,50c2,50p2 = getErr(T50)
# #print("%.3f -%.3f +%.3f" %(c,m-c,p-c)) median, minus, plus
#
# placeFile=os.path.join(wdir,"%s_t90_t50_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t90 90minus 90plus t50 50minus 50plus\n")
# for i,s in enumerate(T90):
# f.write("%s %s %s %s %s %s\n" % (m90,m90-c90,p90-c90,c50,m50-c50,p50-c50)) #c,m-c,p-c
#
# placeFile=os.path.join(wdir,"%s_samplesT90_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t90 t05 t95\n")
# for i,s in enumerate(T90):
# f.write("%s %s %s\n" % (T90[i],T05[i],T95[i]))
# placeFile=os.path.join(wdir,"%s_samplesT50_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t50 t25 t25\n")
# for i,s in enumerate(T50):
# f.write("%s %s %s\n" % (T50[i],T25[i],T75[i]))
# compute char-time percentiles for 1 and 2 sigma
m, c, p, m2, c2, p2 = getErr(tePeak)
# saves txt file
wkdir = '%s' % (args.directory)
fileDir = os.path.join(wkdir, '%s_timeRes_%s' % (args.triggername, args.function))
with open(fileDir, 'w+') as f:
f.write('%s %s %s\n' % ('median', 'minus', 'plus'))
f.write('%s %s %s\n' % (c, m - c, p - c))
# PLOTTING BINS AND SIGMA BAND
print("PLOTTING...")
fig = plt.figure()
# median is your "x"
# Y is your "y"
# DY is the array containing the errors
# DY==0 filters only the zero error
data = tbin
# redshift correction /(1+args.redshif)
median = (data["tstart"] + data["tstop"]) / 2 / (1 + float(args.redshift))
start = data['tstart'] / (1 + float(args.redshift)) ##
stop = data['tstop'] / (1 + float(args.redshift)) ##
y = data["photonFlux"]
Dy = data["photonFluxError"]
try:
y = np.core.defchararray.replace(y, "<", "", count=None) # runs through array and removes strings
except:
print('No Upper-Limits Found in %s.' % (args.triggername))
try:
Dy = np.core.defchararray.replace(Dy, "n.a.", "0",
count=None) ## 0 error is nonphysical, and will be checked for in plotting
except:
print('No 0-Error Found in %s.' % (args.triggername))
bar = 0.5
color = "blue"
Y = np.empty(0, dtype=float) # makes empty 1-D array for float values
for i in y:
Y = np.append(Y, float(i))
DY = np.empty(0, dtype=float)
for i in Dy:
DY = np.append(DY, float(i))
plt.clf()
if (DY > 0).sum() > 0: # if sum() gives a non-zero value then there are error values
plt.errorbar(median[DY > 0], Y[DY > 0],
xerr=[median[DY > 0] - start[DY > 0], stop[DY > 0] - median[DY > 0]],
yerr=DY[DY > 0], ls='None', marker='o', mfc=color, mec=color, ecolor=color, lw=2, label=None)
if (DY == 0).sum() > 0:
plt.errorbar(median[DY == 0], Y[DY == 0],
xerr=[median[DY == 0] - start[DY == 0], stop[DY == 0] - median[DY == 0]],
yerr=[bar * Y[DY == 0], 0.0 * Y[DY == 0]], lolims=True, ls='None', marker='', mfc=color, mec=color,
ecolor=color, lw=2, label=None)
plt.suptitle('%s photonFlux per Time' % (args.triggername))
plt.xlabel('Rest Frame Time(s)')
plt.ylabel('Photon Flux')
plt.xscale('symlog')
plt.yscale('log')
plt.grid(True)
if args.function == 'crystalball2':
SCALE = 0.001
elif args.function == 'band':
SCALE = 1.0 # 0.1 # shouldn't need a scale anymore for Band function
ylo = 1e-7 # min(lower2*SCALE)*1e-1 # CANT GET THIS TO WORK YET DYNAMICALLY
yup = max(upper2 * SCALE) * 10
plt.ylim([ylo, yup])
# correcting for redshift t/(1+args.redshift)
plt.fill_between(t / (1 + float(args.redshift)), lower * SCALE, upper * SCALE, alpha=0.5, color='blue')
plt.fill_between(t / (1 + float(args.redshift)), lower2 * SCALE, upper2 * SCALE, alpha=0.3, color='green')
# y = map(x.decayFunction.getDifferentialFlux, t) # maps infinitesimal values of flux at time t to y
# raw_input("Press ENTER")
# PowerLaw
# plt.plot(t,,'o')
# saves plots
wdir = '%s' % (args.directory)
imsave = os.path.join(wdir, '%s_objFit_%s' % (args.triggername, args.function))
plt.savefig(imsave + '.png')
# histograms of 1/e and save
print("Making histograms")
fig = plt.figure(figsize=(10, 6))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1])
bins = np.linspace(min(tePeak), np.max(tePeak), 100)
ax0 = plt.subplot(gs[0])
ax0.hist(tePeak, bins, normed=True)
plt.title('1/e (min to medx2)')
plt.xlabel('1/e time (s)')
plt.xlim([min(tePeak), np.median(tePeak) * 2])
ax1 = plt.subplot(gs[1])
ax1.hist(tePeak, bins, normed=True)
plt.title('1/e (min to max)')
plt.xlabel('time (s)')
plt.tight_layout()
imsave = os.path.join(wdir, '%s_hist_%s' % (args.triggername, args.function))
plt.savefig(imsave + '.png')
print("Finished Potting/Saving!")
| bsd-3-clause | -413,746,837,952,500,500 | 35.459459 | 205 | 0.567581 | false |
alingse/jsoncsv | jsoncsv/dumptool.py | 1 | 3539 | # coding=utf-8
# author@alingse
# 2015.10.09
import json
import unicodecsv as csv
import xlwt
class Dump(object):
def __init__(self, fin, fout, **kwargs):
self.fin = fin
self.fout = fout
self.initialize(**kwargs)
def initialize(self, **kwargs):
pass
def prepare(self):
pass
def dump_file(self, obj):
raise NotImplementedError
def on_finish(self):
pass
def dump(self):
self.prepare()
self.dump_file()
self.on_finish()
class ReadHeadersMixin(object):
@staticmethod
def load_headers(fin, read_row=None, sort_type=None):
headers = set()
datas = []
# read
if not read_row or read_row < 1:
read_row = -1
for line in fin:
obj = json.loads(line)
headers.update(obj.keys())
datas.append(obj)
read_row -= 1
if not read_row:
break
# TODO: add some sort_type here
headers = sorted(list(headers))
return (list(headers), datas)
class DumpExcel(Dump, ReadHeadersMixin):
def initialize(self, **kwargs):
super(DumpExcel, self).initialize(**kwargs)
self._read_row = kwargs.get('read_row')
self._sort_type = kwargs.get('sort_type')
def prepare(self):
headers, datas = self.load_headers(self.fin, self._read_row,
self._sort_type)
self._headers = headers
self._datas = datas
def write_headers(self):
raise NotImplementedError
def write_obj(self):
raise NotImplementedError
def dump_file(self):
self.write_headers()
for obj in self._datas:
self.write_obj(obj)
for line in self.fin:
obj = json.loads(line)
self.write_obj(obj)
class DumpCSV(DumpExcel):
def initialize(self, **kwargs):
super(DumpCSV, self).initialize(**kwargs)
self.csv_writer = None
def write_headers(self):
self.csv_writer = csv.DictWriter(self.fout, self._headers)
self.csv_writer.writeheader()
def write_obj(self, obj):
patched_obj = {
key: self.patch_value(value)
for key, value in obj.items()
}
self.csv_writer.writerow(patched_obj)
def patch_value(self, value):
if value in (None, {}, []):
return ""
return value
class DumpXLS(DumpExcel):
def initialize(self, **kwargs):
super(DumpXLS, self).initialize(**kwargs)
self.sheet = kwargs.get('sheet', 'Sheet1')
self.wb = xlwt.Workbook(encoding='utf-8')
self.ws = self.wb.add_sheet(self.sheet)
self.row = 0
self.cloumn = 0
def write_headers(self):
for head in self._headers:
self.ws.write(self.row, self.cloumn, head)
self.cloumn += 1
self.row += 1
def write_obj(self, obj):
self.cloumn = 0
for head in self._headers:
value = obj.get(head)
# patch
if value in ({},):
value = "{}"
self.ws.write(self.row, self.cloumn, value)
self.cloumn += 1
self.row += 1
def on_finish(self):
self.wb.save(self.fout)
def dump_excel(fin, fout, klass, **kwargs):
if not isinstance(klass, type) or not issubclass(klass, DumpExcel):
raise ValueError("unknow dumpexcel type")
dump = klass(fin, fout, **kwargs)
dump.dump()
| apache-2.0 | 5,747,975,556,471,629,000 | 22.912162 | 71 | 0.552133 | false |
DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/examples/openmdao.examples.bar3simulation/openmdao/examples/bar3simulation/bar3_optimization.py | 1 | 4444 | """
bar3_optimization.py - Top level assembly for the example problem.
"""
# Optimize the bar3 design using the CONMIN optimizer.
# pylint: disable-msg=E0611,F0401
from openmdao.lib.drivers.api import CONMINdriver
from openmdao.main.api import Assembly
from openmdao.main.datatypes.api import Float
# from openmdao.examples.bar3simulation.bar3 import Bar3Truss
from openmdao.examples.bar3simulation.bar3_wrap_f import Bar3Truss
class Bar3Optimization(Assembly):
""" Optimization of a three bar truss. """
# set up interface to the framework
# pylint: disable-msg=E1101
# Constraint allowables
bar1_stress_allowable = Float(20., iotype='in',
units='lb/(inch*inch)',
desc='Stress allowable in bar 1')
bar2_stress_allowable = Float(20., iotype='in',
units='lb/(inch*inch)',
desc='Stress allowable in bar 2')
bar3_stress_allowable = Float(20., iotype='in',
units='lb/(inch*inch)',
desc='Stress allowable in bar 3')
displacement_x_dir_allowable = Float(0.20, iotype='in', units='inch',
desc='Displacement limitation in x-direction')
displacement_y_dir_allowable = Float(0.05, iotype='in', units='inch',
desc='Displacement limitation in y-direction')
frequency_allowable = Float(14.1421, iotype='in', units='Hz',
desc='Frequency limitation in Hertz')
def configure(self):
# Create CONMIN Optimizer instance
self.add('driver', CONMINdriver())
# Create Bar3_Truss component instances
self.add('bar3_truss', Bar3Truss())
self.driver.workflow.add('bar3_truss')
# CONMIN Flags
self.driver.iprint = 0
self.driver.itmax = 30
self.driver.fdch = .00001
self.driver.fdchm = .00001
self.driver.ct = -.001
# CONMIN Objective
self.driver.add_objective('bar3_truss.weight')
# CONMIN Design Variables
for param, low, high in zip(['bar3_truss.bar1_area',
'bar3_truss.bar2_area',
'bar3_truss.bar3_area'],
[0.001, 0.001, 0.001],
[10000.0, 10000.0, 10000.0]):
self.driver.add_parameter(param, low=low, high=high)
# CONMIN Constraints
constraints = [
'abs(bar3_truss.bar1_stress/bar1_stress_allowable) <= 1.0',
'abs(bar3_truss.bar2_stress/bar2_stress_allowable) <= 1.0',
'abs(bar3_truss.bar3_stress/bar3_stress_allowable) <= 1.0',
'abs(bar3_truss.displacement_x_dir/displacement_x_dir_allowable) <= 1.0',
'abs(bar3_truss.displacement_y_dir/displacement_y_dir_allowable) <= 1.0',
'frequency_allowable**2 <= bar3_truss.frequency**2']
map(self.driver.add_constraint, constraints)
if __name__ == "__main__": # pragma: no cover
import time
# pylint: disable-msg=E1101
opt_bar3 = Bar3Optimization()
def prz(title):
""" Print before and after"""
print '---------------------------------'
print title
print '---------------------------------'
print 'Bar3: Weight = ', opt_bar3.bar3_truss.weight
print 'DV1: Bar1_area = ', opt_bar3.bar3_truss.bar1_area
print 'DV2: Bar2_area = ', opt_bar3.bar3_truss.bar2_area
print 'Dv3: Bar3_area = ', opt_bar3.bar3_truss.bar3_area
print '---------------------------------'
print 'Con1: Bar1_stress = ', opt_bar3.bar3_truss.bar1_stress
print 'Con2: Bar2_stress = ', opt_bar3.bar3_truss.bar2_stress
print 'Con3: Bar3_stress = ', opt_bar3.bar3_truss.bar3_stress
print 'Con4: Displ_u = ', opt_bar3.bar3_truss.displacement_x_dir
print 'Con5: Displ_v = ', opt_bar3.bar3_truss.displacement_y_dir
print 'Con6: Frequency = ', opt_bar3.bar3_truss.frequency
print '\n'
opt_bar3.bar3_truss.run()
prz('Old Design')
time1 = time.time()
opt_bar3.run()
prz('New Design')
print "CONMIN Iterations: ", opt_bar3.driver.iter_count
print ""
print "Elapsed time: ", time.time() - time1
# end bar3_optimization.py
| mit | 6,631,293,098,315,025,000 | 36.982906 | 87 | 0.560981 | false |
hall1467/wikidata_usage_tracking | python_analysis_scripts/edit_analyses/session_stats.py | 1 | 2861 | """
Selects number of distinct revisions.
Usage:
session_stats (-h|--help)
session_stats <input> <output>
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<input> Path to input file to process.
<output> Where output will be written
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import logging
import operator
import sys
import mysqltsv
from collections import defaultdict
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_file = mysqltsv.Reader(
open(args['<input>'],'rt'), headers=True,
types=[str, str, str, str, str, int, str, str, str, str, str, str,
str, str])
output_file = open(args['<output>'], "w")
verbose = args['--verbose']
run(input_file, output_file, verbose)
def run(input_file, output_file, verbose):
sessions = defaultdict(lambda: defaultdict(int))
bot_sessions = defaultdict(lambda: defaultdict(int))
human_sessions = defaultdict(lambda: defaultdict(int))
revision_namespaces = defaultdict(int)
bot_revisions_sum = 0
human_revisions_sum = 0
for i, line in enumerate(input_file):
sessions[line["user"]][line["session_start"]] = 1
revision_namespaces[line["namespace"]] += 1
if line["edit_type"] == 'bot':
bot_revisions_sum += 1
bot_sessions[line["user"]][line["session_start"]] = 1
else:
human_revisions_sum += 1
human_sessions[line["user"]][line["session_start"]] = 1
if verbose and i % 10000 == 0 and i != 0:
sys.stderr.write("Revisions analyzed: {0}\n".format(i))
sys.stderr.flush()
session_sum = 0
for user in sessions:
for session_start in sessions[user]:
session_sum += 1
bot_session_sum = 0
for user in bot_sessions:
for session_start in bot_sessions[user]:
bot_session_sum += 1
human_session_sum = 0
for user in human_sessions:
for session_start in human_sessions[user]:
human_session_sum += 1
output_file.write("Sessions: {0}\n".format(session_sum))
output_file.write("Bot sessions: {0}\n".format(bot_session_sum))
output_file.write("Bot revisions: {0}\n".format(bot_revisions_sum))
output_file.write("Human sessions: {0}\n".format(human_session_sum))
output_file.write("Human revisions: {0}\n".format(human_revisions_sum))
output_file.write("Revision namespaces: {0}\n".format(revision_namespaces))
main()
| mit | 18,738,158,364,081,736 | 26.509615 | 79 | 0.606082 | false |
StevenCHowell/code_sas_modeling | sas_modeling/calc_i0.py | 1 | 13051 | #!/usr/bin/env python
# coding:utf-8
'''
Author: Steven C. Howell --<[email protected]>
Purpose: calculating the Guinier fit
Created: 12/21/2016
00000000011111111112222222222333333333344444444445555555555666666666677777777778
12345678901234567890123456789012345678901234567890123456789012345678901234567890
'''
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from scipy import optimize
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
def fit_line_v0(x, y, dy):
'''
Fit data for y = mx + b
return m and b
http://scipy-cookbook.readthedocs.io/items/FittingData.html#id2
error estimate seems reasonable compared to input data
'''
w = 1 / dy
# define our (line) fitting function
fitfunc = lambda p, x: p[0] * x + p[1]
errfunc = lambda p, x, y, w: (y - fitfunc(p, x)) * w
# use the last two points to guess the initial values
m_guess = (y[-2] - y[-1]) / (x[-2] - x[-1]) # use 2 points to guess slope
b_guess = y[-1] - m_guess * x[-1] # gues the y-intercept from 2 points
p_guess = [m_guess, b_guess]
out = optimize.leastsq(errfunc, p_guess, args=(x, y, w), full_output=1)
p_final = out[0]
m = p_final[0]
b = p_final[1]
# from the docs page:
# cov_x : ndarray
# Uses the fjac and ipvt optional outputs to construct an estimate
# of the jacobian around the solution. None if a singular matrix
# encountered (indicates very flat curvature in some direction).
# This matrix must be multiplied by the residual variance to get the
# covariance of the parameter estimates – see curve_fit.
#
# curve_fit documentation says:
# The diagonals provide the variance of the parameter estimate.
# To compute one standard deviation errors on the parameters use
# perr = np.sqrt(np.diag(pcov)).
#
# How the sigma parameter affects the estimated covariance depends
# on absolute_sigma argument, as described above.
#
# If the Jacobian matrix at the solution doesn’t have a full rank,
# then ‘lm’ method returns a matrix filled with np.inf, on the other
# hand ‘trf’ and ‘dogbox’ methods use Moore-Penrose pseudoinverse to
# compute the covariance matrix.
cov = out[1]
m_err = np.sqrt(cov[0, 0])
b_err = np.sqrt(cov[1, 1])
return m, b, m_err, b_err
def fit_line_v1(x, y, dy):
'''
Fit data for y = mx + b
return m and b
no error estimates
'''
w = 1 / dy ** 2
A = np.vstack([x * w, 1.0 * w]).T
p, residuals, _, _ = np.linalg.lstsq(A, y * w)
m = p[0]
b = p[1]
# from the docs page:
# residuals : {(), (1,), (K,)} ndarray
# Sums of residuals; squared Euclidean 2-norm for each column in b - a*x.
# If the rank of a is < N or M <= N, this is an empty array. If b is
# 1-dimensional, this is a (1,) shape array. Otherwise the shape is (K,).
# rank : int
# Rank of matrix a.
# s : (min(M, N),) ndarray
# Singular values of a.
m_err = 0.0
b_err = 0.0
return m, b, m_err, b_err
def fit_line_v2(x, y, dy):
'''
Fit data for y = mx + b
return m and b
essentially the same results as fit_line_v0
no error estimates
'''
w = 1 / dy ** 2
out = np.polynomial.polynomial.polyfit(x, y, 1, w=w, full=True)
# does not provide the covariance matrix, not sure how to extract error
p_final = out[0]
m = p_final[1]
b = p_final[0]
# from the docs page:
# [residuals, rank, singular_values, rcond] : list
# These values are only returned if full = True
# resid – sum of squared residuals of the least squares fit
# rank – the numerical rank of the scaled Vandermonde matrix
# sv – singular values of the scaled Vandermonde matrix
# rcond – value of rcond.
# For more details, see linalg.lstsq.
b_err = 0.0
m_err = 0.0
return m, b, m_err, b_err
def fit_line_v3(x, y, dy):
'''
Fit data for y = mx + b
return m and b
method taken from SasView:
github.com/SasView/sasview/blob/master/src/sas/sascalc/invariant/invariant.py
error estimate seems reasonable
'''
A = np.vstack([x / dy, 1.0 / dy]).T
p, residuals, _, _ = np.linalg.lstsq(A, y / dy)
m = p[0]
b = p[1]
# Get the covariance matrix, defined as inv_cov = a_transposed * a
inv_cov = np.dot(A.transpose(), A)
cov = np.linalg.pinv(inv_cov)
err_matrix = np.abs(residuals) * cov
m_err, b_err = np.sqrt(np.diag(err_matrix))
return m, b, m_err, b_err
def fit_line_v4(x, y, dy):
'''
Fit data for y = mx + b
return m and b
error estimate seems much too small
'''
w = 1 / dy ** 2
p, cov = np.polyfit(x, y, 1, w=w, cov=True)
m, b = p
# From docs page:
# The diagonal of this matrix (cov) are the
# variance estimates for each coefficient.
m_err, b_err = np.sqrt(np.diag(cov)) # standard devaitions
# m_err, b_err = np.diag(cov)
return m, b, m_err, b_err
def fit_line_v5(x, y, dy):
'''
Fit data for y = mx + b
return m and b
method taken from wikipedia:
https://en.wikipedia.org/wiki/Linear_least_squares_(mathematics)#Python
error estimate seems reasonable comared to input data
This result is identical to v0 and v7
'''
w = 1 / dy ** 2
n = len(x)
X = np.array([x, np.ones(n)]).T
Y = np.array(y).reshape(-1, 1)
W = np.eye(n) * w # weight using the inverse of the variance
# calculate the parameters
xtwx_inv = np.linalg.inv(X.T.dot(W).dot(X))
m, b = xtwx_inv.dot(X.T).dot(W).dot(Y).reshape(2)
# calculate the error of the parameters:
# (X.T * W * X)^-1 * X.T * W * M * W.T * X * (X.T * W.T * X)^-1
# cov_xy = covariance(x, y)
# var_x = covariance(x, x)
# var_y = covariance(y, y)
# M = np.eye(m) * dy ** 2
# xtwtx_inv = np.linalg.inv(X.T.dot(W.T).dot(X))
# M_beta = xtwx_inv.dot(X.T).dot(W).dot(M).dot(W.T).dot(X).dot(xtwtx_inv)
# M_beta = xtwx_inv # because M = W^-1
cov = xtwx_inv
m_err, b_err = np.sqrt(np.diag(cov))
return m, b, m_err, b_err
def fit_line_v6(x, y, dy):
'''
Fit data for y = mx + b
return m and b
method taken from Baird's "Experimentation": pg 138-140
The dy's in the derivation are not the same as the error of the y values
This method does not propagate the error
'''
var = dy ** 2 # variance, when dy is the standard deviation
wx = x / var
wy = y / var
sum_xy = np.sum(wx * wy)
sum_x = np.sum(wx)
sum_y = np.sum(wy)
sum_x_dy_inv = np.sum(wx / var)
sum_dy_inv = np.sum(1 / var)
sum_x2 = np.sum(wx ** 2)
den = sum_dy_inv * sum_x2 - sum_x * sum_x_dy_inv
m_num = sum_dy_inv * sum_xy - sum_x_dy_inv * sum_y
m = m_num / den
b_num = sum_x2 * sum_y - sum_x * sum_xy
b = b_num / den
n = len(x)
y_fit = m * x + b
delta_y = y - y_fit
y_err = np.sqrt(np.sum(delta_y ** 2) / (n - 2))
m_err = y_err * np.sqrt(n / den)
b_err = y_err * np.sqrt(sum_x2 / den)
return m, b, m_err, b_err
def fit_line_v7(x, y, dy):
'''
Fit data for y = mx + b
return m and b
from Huges & Hase "Measurements and their Uncertainties", pg 69-70
and Press et al. "Numerical Recipes 3rd Edition", pg 781-783
'''
w = 1 / dy ** 2 # weight is the inverse square of the uncertainty
s = np.sum(w)
sx = np.sum(w * x)
sy = np.sum(w * y)
sxx = np.sum(w * x ** 2)
sxy = np.sum(w * x * y)
den = s * sxx - sx ** 2
m_num = s * sxy - sx * sy
m = m_num / den
b_num = sxx * sy - sx * sxy
b = b_num / den
m_err = np.sqrt(s / den)
b_err = np.sqrt(sxx / den)
return m, b, m_err, b_err
def fit_line_v8(x, y, dy):
'''
Fit data for y = mx + b
return m and b
from Press et al. "Numerical Recipes 3rd Edition", pg 781-783
using numerically robust formalism
'''
w = 1 / dy ** 2 # weight is the inverse square of the uncertainty
s = np.sum(w)
sx = np.sum(w * x)
sy = np.sum(w * y)
t = 1 / dy * (x - sx / s)
stt = np.sum(t ** 2)
m = np.sum(t * y / dy) / stt
b = (sy - sx * m) / s
m_err = np.sqrt(1 / stt)
b_err = np.sqrt((1 + sx ** 2 / (s * stt)) / s)
return m, b, m_err, b_err
def guinier_fit(q, iq, diq, dq=None, q_min=0.0, q_max=0.1, view_fit=False,
fit_method=fit_line_v5, save_fname='guiner_fit.html',
refine=False):
'''
perform Guinier fit
return I(0) and Rg
'''
# Identify the range for the fit
id_x = (q >= q_min) & (q <= q_max)
q2 = q[id_x] ** 2
log_iq = np.log(iq[id_x])
dlog_iq = diq[id_x] / iq[id_x]
if dq is not None:
dq2 = 2 * q[id_x] * dq[id_x]
m, b, m_err, b_err = fit_method(q2, log_iq, dlog_iq)
rg = np.sqrt(-3 * m)
rg_err = 3 / (2 * rg) * m_err
rg, rg_err = round_error(rg, rg_err)
i0 = np.exp(b)
i0_err = i0 * b_err
i0, i0_err = round_error(i0, i0_err)
rg_q_max = 1.3 / rg
if rg_q_max < q[id_x][-1]:
logging.warning('initial q-max too high, 1.3/Rg={} < {}'.format(
rg_q_max, q[id_x][-1]))
if refine:
logging.warning('repeating fit with q-max={}'.format(rg_q_max))
return guinier_fit(q, iq, diq, dq=dq, q_min=q_min, q_max=rg_q_max,
view_fit=view_fit, fit_method=fit_method,
save_fname=save_fname)
if view_fit:
from sas_modeling import make_figures
q2 = np.insert(q2, 0, 0.0)
log_iq = np.insert(log_iq, 0, b)
dlog_iq = np.insert(dlog_iq, 0, b_err)
fit_line = m * q2 + b
q_range = q[id_x][[0, -1]]
fig = make_figures.plot_guinier_fit(q2, log_iq, fit_line, i0, i0_err,
rg, rg_err, dlog_iq, q_range,
save_fname=save_fname)
return i0, rg, i0_err, rg_err, fig
return i0, rg, i0_err, rg_err
def round_error(val, val_err, sig_figs=2):
'''
Round a value and its error estimate to a certain number
of significant figures (on the error estimate). By default 2
significant figures are used.
'''
# round number to a certain number of significant figures
n = int(np.log10(val_err)) # displacement from ones place
if val_err >= 1:
n += 1
scale = 10 ** (sig_figs - n)
val = round(val * scale) / scale
val_err = round(val_err * scale) / scale
return val, val_err
def compare_guinier_fit(q, iq, diq, **args):
'''
perform Guinier fit
return I(0) and Rg
'''
fit_methods = [
fit_line_v0,
fit_line_v1,
fit_line_v2,
fit_line_v3,
fit_line_v4,
fit_line_v5,
fit_line_v6,
fit_line_v7,
fit_line_v8,
]
for fit_method in fit_methods:
save_fname = 'fit_{}_comparison.html'.format(fit_method.__name__[-2:])
i0, rg, i0_err, rg_err = guinier_fit(q, iq, diq, fit_method=fit_method,
save_fname=save_fname,
view_fit=True, **args)
def covariance(x, y):
assert len(x) == len(y)
cov = ((x - x.mean()) * (y - y.mean())).sum() / (len(x) - 1)
return cov
def bayesian():
NotImplemented
if __name__ == '__main__':
import os
import make_figures
# data_fname = 'data/1mgml_LysoSANS.sub'; skiprows = 1
skiprows = 0
data_fname = 'data/1mgml_lys_sans.dat'; q_max = 0.091 # lys
# data_fname = 'data/5mgml_nist_mab_sans.dat'; q_max = 0.0296 # mab
assert os.path.exists(data_fname)
data = np.asfortranarray(np.loadtxt(data_fname, skiprows=skiprows))
# data[:, 1:3] *= 1 / data[0, 1]
# column 4 is the effective q-values, accounting for the beam spread
if True:
plot_fname = 'I(q)_and_guinier-no_scale.html'
make_figures.plot_iq_and_guinier(data[:, 0], data[:, 1], data[:, 2],
save_fname=plot_fname)
# scale the data
# data[:, 1:3] *= 1 / data[0, 1] # set the first measured point to 1
# data[:, 1:3] *= 10 / data[0, 1] # set the first measured point to 10
# data[:, 1:3] *= 100 / data[0, 1] # set the first measured point to 100
# data[:, 1:3] *= 1000 / data[0, 1] # set the first measured point to 1000
# compare_guinier_fit(data[:, 0], data[:, 1], data[:, 2], q_max=q_max,
# refine=True)
save_fname = data_fname.replace('.dat', '.html')
i0, rg, i0_err, rg_err = guinier_fit(data[:, 0], data[:, 1], data[:, 2],
dq=data[:, 3], q_max=q_max,
view_fit=True, fit_method=fit_line_v8,
refine=True, save_fname=save_fname)
logging.debug('\m/ >.< \m/')
| gpl-3.0 | 3,665,040,384,251,789,300 | 27.567982 | 81 | 0.555231 | false |
tijme/not-your-average-web-crawler | test/test_helpers_url_helper.py | 1 | 5609 | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Tijme Gommers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
from nyawc.helpers.URLHelper import URLHelper
class TestUrlHelper(unittest.TestCase):
"""The TestUrlHelper class checks if the methods in the URLHelper work correctly."""
def test_make_absolute(self):
"""Check if the make absolute method works correctly."""
host = "https://example.ltd/current"
tests = [
("https://example.ltd/new.html", "new.html"),
("https://example.ltd/new", "new"),
("https://example.ltd/new1/new2", "new1/new2"),
("https://example.ltd/new1/new3", "/new1/new3"),
("https://example.ltd/current?a=a", "?a=a")
]
for test in tests:
self.assertEqual(URLHelper.make_absolute(host, test[1]), test[0])
def test_make_absolute_with_base(self):
"""Check if the make absolute method works correctly in interpreted with a base URL."""
host = "https://example.ltd/base/"
tests = [
("https://example.ltd/base/new.html", "new.html"),
("https://example.ltd/base/new", "new"),
("https://example.ltd/base/new1/new2", "new1/new2"),
("https://example.ltd/new1/new2", "/new1/new2"),
("https://example.ltd/base/?a=a", "?a=a")
]
for test in tests:
self.assertEqual(URLHelper.make_absolute(host, test[1]), test[0])
def test_make_absolute_folder_traversal(self):
"""Ensure folder traversal works correclty."""
host = "https://example.ltd/dir1/dir2/dir3"
tests = [
("https://example.ltd/dir1/dir2", "../"),
("https://example.ltd/dir1", "../../"),
("https://example.ltd", "../../../"),
("https://example.ltd", "../../../../"),
("https://example.ltd", "../../../../../")
]
for test in tests:
self.assertEqual(URLHelper.make_absolute(host, test[1]), test[0])
def test_get_protocol(self):
"""Check if the get protocol method works correctly."""
tests = [
("", "domain.tld"),
("http", "http://domain.tld"),
("arbitrary", "arbitrary://omain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_protocol(test[1]), test[0])
def test_get_subdomain(self):
"""Check if the get subdomain method works correctly."""
tests = [
("", ""),
("", "http://"),
("", "http://domain"),
("", "http://domain.tld"),
("sub1", "http://sub1.domain.tld"),
("sub2.sub1", "http://sub2.sub1.domain.tld"),
("sub3.sub2.sub1", "http://sub3.sub2.sub1.domain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_subdomain(test[1]), test[0])
def test_get_hostname(self):
"""Check if the get hostname method works correctly."""
tests = [
("", ""),
("", "http://"),
("domain", "http://domain"),
("domain", "http://domain.tld"),
("domain", "http://sub1.domain.tld"),
("domain", "http://sub2.sub1.domain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_hostname(test[1]), test[0])
def test_get_tld(self):
"""Check if the get tld method works correctly."""
tests = [
("", ""),
("", "http://"),
("", "http://domain"),
("tld", "http://domain.tld"),
("tld", "http://sub1.domain.tld"),
("tld", "http://sub2.sub1.domain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_tld(test[1]), test[0])
def test_get_ordered_params(self):
"""Check if the get ordered params method works correctly."""
val1 = URLHelper.get_ordered_params("http://example.tld?a=a&c=c&b=b&d=d")
val2 = URLHelper.get_ordered_params("http://sub.domain.ltd?c=c&b=b&a=a&d=d")
self.assertEqual(val1, val2)
def test_append_with_data_encoded_and_decoded(self):
"""Make sure values do not get decoded or encoded."""
val1 = URLHelper.append_with_data("http://example.tld/", {"val": "{{aaaa}}"})
val2 = URLHelper.append_with_data("http://example.tld/", {"val": "%7B%7Baaaa%7D%7D"})
self.assertEqual(val1, "http://example.tld/?val={{aaaa}}")
self.assertEqual(val2, "http://example.tld/?val=%7B%7Baaaa%7D%7D")
| mit | -6,238,477,947,585,220,000 | 35.660131 | 95 | 0.573721 | false |
SeedScientific/polio | source_data/migrations/0053_auto__chg_field_sourcedatapoint_error_msg.py | 1 | 71250 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SourceDataPoint.error_msg'
db.alter_column(u'source_data_sourcedatapoint', 'error_msg', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'SourceDataPoint.error_msg'
db.alter_column(u'source_data_sourcedatapoint', 'error_msg', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datapoints.campaign': {
'Meta': {'object_name': 'Campaign', 'db_table': "'campaign'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'get_full_name'"}),
'start_date': ('django.db.models.fields.DateField', [], {'unique': 'True'})
},
u'datapoints.indicator': {
'Meta': {'object_name': 'Indicator', 'db_table': "'indicator'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '55', 'populate_from': "'name'", 'unique_with': '()'})
},
u'datapoints.office': {
'Meta': {'object_name': 'Office', 'db_table': "'office'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.region': {
'Meta': {'unique_together': "(('source', 'source_guid'),)", 'object_name': 'Region', 'db_table': "'region'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '10', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '13', 'decimal_places': '10', 'blank': 'True'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'settlement_code': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'shape_file_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '55', 'populate_from': "'full_name'"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'datapoints.source': {
'Meta': {'object_name': 'Source', 'db_table': "'source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'source_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
'source_data.activityreport': {
'Meta': {'object_name': 'ActivityReport'},
'activity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_hh_pending_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_local_leadership_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_num_hh_affected': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_pro_opv_cd': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_caregiver_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_husband_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_positive': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_vcm_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_vcm_sett': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipds_community_leader_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_issue_reported': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_issue_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_num_children': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_num_hh': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_other_issue': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_team': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.campaignmap': {
'Meta': {'object_name': 'CampaignMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
'source_campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceCampaign']", 'unique': 'True'})
},
'source_data.clustersupervisor': {
'Meta': {'object_name': 'ClusterSupervisor'},
'coord_rfp_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'coord_smwg_meetings': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'coord_vcm_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'end_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fund_transparency': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_activities_conducted': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_activities_planned': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_endorsed': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_implementation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_socialdata': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_special_pop': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_workplan_aligned': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_lgac': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ri_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisee_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisor_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisor_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_birthtracking': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.document': {
'Meta': {'object_name': 'Document'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'docfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'source_data.etljob': {
'Meta': {'object_name': 'EtlJob'},
'date_attempted': ('django.db.models.fields.DateTimeField', [], {}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'error_msg': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'success_msg': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
'source_data.healthcamp': {
'Meta': {'object_name': 'HealthCamp'},
'agencyname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'formhub_uuid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_photo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'megaphone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.indicatormap': {
'Meta': {'object_name': 'IndicatorMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'source_indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceIndicator']", 'unique': 'True'})
},
'source_data.knowthepeople': {
'Meta': {'object_name': 'KnowThePeople'},
'brothers': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'citiesvisited': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofpax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'prefferedcity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sisters': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state_country': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.paxlistreporttraining': {
'Meta': {'object_name': 'PaxListReportTraining'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'emailaddr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofparticipant': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.phoneinventory': {
'Meta': {'object_name': 'PhoneInventory'},
'asset_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'colour_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'telephone_no': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmbirthrecord': {
'Meta': {'object_name': 'PracticeVCMBirthRecord'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datereport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'householdnumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofchild': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm0dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmnamecattended': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmrilink': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmsettcoordinates': {
'Meta': {'object_name': 'PracticeVCMSettCoordinates'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmphone': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmsummary': {
'Meta': {'object_name': 'PracticeVCMSummary'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msd_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.processstatus': {
'Meta': {'object_name': 'ProcessStatus'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status_text': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'source_data.regionmap': {
'Meta': {'object_name': 'RegionMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'source_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceRegion']", 'unique': 'True'})
},
u'source_data.sourcecampaign': {
'Meta': {'unique_together': "(('source', 'campaign_string'),)", 'object_name': 'SourceCampaign'},
'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.sourcedatapoint': {
'Meta': {'unique_together': "(('source', 'source_guid', 'indicator_string'),)", 'object_name': 'SourceDataPoint'},
'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cell_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
'error_msg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'row_number': ('django.db.models.fields.IntegerField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"})
},
u'source_data.sourceindicator': {
'Meta': {'unique_together': "(('source', 'indicator_string'),)", 'object_name': 'SourceIndicator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.sourceregion': {
'Meta': {'object_name': 'SourceRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'lon': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlement_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmbirthrecord': {
'Meta': {'object_name': 'VCMBirthRecord'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datereport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'householdnumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofchild': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm0dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmnamecattended': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmrilink': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsettlement': {
'Meta': {'object_name': 'VCMSettlement'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmphone': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsummary': {
'Meta': {'object_name': 'VCMSummary'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msd_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsummarynew': {
'Meta': {'object_name': 'VCMSummaryNew'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_msd1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_msd2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax4': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax6': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax7': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax8': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax9': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_display_msd3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_tot_missed_check': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_12_59months': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_2_11months': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_census': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_missed': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_newborns': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax12_59mo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax2_11mo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vaxnewborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vwsregister': {
'Meta': {'object_name': 'VWSRegister'},
'acceptphoneresponsibility': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'datephonecollected': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fname_vws': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lname_vws': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'personal_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'wardcode': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['source_data'] | agpl-3.0 | 1,803,691,983,139,624,700 | 91.055556 | 195 | 0.568393 | false |
RandyMoore/mySiteDjango | my_site_django/weblog/models.py | 1 | 3693 | from django.db import models
from django.db.models.fields import CharField
from django.utils.safestring import mark_safe
from markdown import markdown
from pygments import highlight
from pygments.formatters import get_formatter_by_name
from pygments.lexers import get_lexer_by_name
from wagtail.core import blocks
from wagtail.core.blocks import BlockQuoteBlock, RawHTMLBlock
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.embeds.blocks import EmbedBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.search import index
# Custom blocks for StreamField. From https://gist.github.com/frankwiles/74a882f16704db9caa27
# See also http://docs.wagtail.io/en/v1.9/releases/1.6.html#render-and-render-basic-methods-on-streamfield-blocks-now-accept-a-context-keyword-argument
class CodeBlock(blocks.StructBlock):
"""
Code Highlighting Block
"""
LANGUAGE_CHOICES = (
('python', 'Python'),
('bash', 'Bash/Shell'),
('html', 'HTML'),
('css', 'CSS'),
('scss', 'SCSS'),
)
language = blocks.ChoiceBlock(choices=LANGUAGE_CHOICES)
code = blocks.TextBlock()
class Meta:
icon = 'code'
def render(self, value, context=None):
src = value['code'].strip('\n')
lang = value['language']
lexer = get_lexer_by_name(lang)
formatter = get_formatter_by_name(
'html',
linenos=None,
cssclass='codehilite',
style='default',
noclasses=False,
)
return mark_safe(highlight(src, lexer, formatter))
class MarkDownBlock(blocks.TextBlock):
""" MarkDown Block """
class Meta:
icon = 'code'
def render_basic(self, value, context=None):
md = markdown(
value,
[
'markdown.extensions.fenced_code',
'codehilite',
],
)
return mark_safe(md)
# Page Models
class BlogIndexPage(Page):
subheading = CharField(max_length=255)
content_panels = Page.content_panels + [
FieldPanel('subheading', classname="full"),
]
@property
def blogs(self):
blogs = WeblogPage.objects.live().descendant_of(self)
blogs = blogs.order_by('-date')
return blogs
def get_context(self, request):
blogs = self.blogs
context = super(BlogIndexPage, self).get_context(request)
context['blogs'] = blogs
context['title'] = self.title
context['subheading'] = self.subheading
return context
class WeblogPage(Page):
body = StreamField([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
('html', RawHTMLBlock()),
('block_quote', BlockQuoteBlock()),
('embed', EmbedBlock()),
('code', CodeBlock()),
('markdown', MarkDownBlock()),
])
subheading = CharField(max_length=255)
date = models.DateField("Post date")
search_fields = Page.search_fields + [
index.SearchField('body'),
index.FilterField('date'),
]
content_panels = Page.content_panels + [
FieldPanel('subheading', classname="full"),
FieldPanel('date'),
StreamFieldPanel('body', classname="full"),
]
def get_context(self, request):
context = super(WeblogPage, self).get_context(request)
context['title'] = self.title
context['subheading'] = self.subheading
context['body'] = self.body
return context
| gpl-3.0 | -1,130,170,314,572,524,900 | 27.19084 | 151 | 0.629299 | false |
brianchoate/chef2ldif | setup.py | 1 | 1553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='chef2ldif',
version='0.0.1',
description="Small tool to read in user data_bags and generate LDIF records.",
long_description=readme + '\n\n' + history,
author="Brian Choate",
author_email='[email protected]',
url='https://github.com/brianchoate/chef2ldif',
packages=[
'chef2ldif',
],
package_dir={'chef2ldif':
'chef2ldif'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='chef2ldif',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| bsd-3-clause | 3,532,986,478,273,784,000 | 26.245614 | 82 | 0.618158 | false |
citrix-openstack-build/neutron-vpnaas | neutron_vpnaas/db/vpn/vpn_db.py | 1 | 31697 | # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.utils import excutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.common import constants as n_constants
from neutron.db import common_db_mixin as base_db
from neutron.db import l3_agentschedulers_db as l3_agent_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import vpnaas
from neutron.i18n import _LW
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.plugins.common import utils
from neutron_vpnaas.db.vpn import vpn_validator
LOG = logging.getLogger(__name__)
class IPsecPeerCidr(model_base.BASEV2):
"""Internal representation of a IPsec Peer Cidrs."""
cidr = sa.Column(sa.String(32), nullable=False, primary_key=True)
ipsec_site_connection_id = sa.Column(
sa.String(36),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
class IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IPsecPolicy Object."""
__tablename__ = 'ipsecpolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp",
name="ipsec_transform_protocols"),
nullable=False)
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport",
name="ipsec_encapsulations"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IKEPolicy Object."""
__tablename__ = 'ikepolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
phase1_negotiation_mode = sa.Column(sa.Enum("main",
name="ike_phase1_mode"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"),
nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IPsecSiteConnection(model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a IPsecSiteConnection Object."""
__tablename__ = 'ipsec_site_connections'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
peer_address = sa.Column(sa.String(255), nullable=False)
peer_id = sa.Column(sa.String(255), nullable=False)
route_mode = sa.Column(sa.String(8), nullable=False)
mtu = sa.Column(sa.Integer, nullable=False)
initiator = sa.Column(sa.Enum("bi-directional", "response-only",
name="vpn_initiators"), nullable=False)
auth_mode = sa.Column(sa.String(16), nullable=False)
psk = sa.Column(sa.String(255), nullable=False)
dpd_action = sa.Column(sa.Enum("hold", "clear",
"restart", "disabled",
"restart-by-peer", name="vpn_dpd_actions"),
nullable=False)
dpd_interval = sa.Column(sa.Integer, nullable=False)
dpd_timeout = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vpnservice_id = sa.Column(sa.String(36),
sa.ForeignKey('vpnservices.id'),
nullable=False)
ipsecpolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ipsecpolicies.id'),
nullable=False)
ikepolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ikepolicies.id'),
nullable=False)
ipsecpolicy = orm.relationship(
IPsecPolicy, backref='ipsec_site_connection')
ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection')
peer_cidrs = orm.relationship(IPsecPeerCidr,
backref='ipsec_site_connection',
lazy='joined',
cascade='all, delete, delete-orphan')
class VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 VPNService Object."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
nullable=False)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
nullable=False)
subnet = orm.relationship(models_v2.Subnet)
router = orm.relationship(l3_db.Router)
ipsec_site_connections = orm.relationship(
IPsecSiteConnection,
backref='vpnservice',
cascade="all, delete-orphan")
class VPNPluginDb(vpnaas.VPNPluginBase, base_db.CommonDbMixin):
"""VPN plugin database class using SQLAlchemy models."""
def _get_validator(self):
"""Obtain validator to use for attribute validation.
Subclasses may override this with a different valdiator, as needed.
Note: some UTs will directly create a VPNPluginDb object and then
call its methods, instead of creating a VPNDriverPlugin, which
will have a service driver associated that will provide a
validator object. As a result, we use the reference validator here.
"""
return vpn_validator.VpnReferenceValidator()
def update_status(self, context, model, v_id, status):
with context.session.begin(subtransactions=True):
v_db = self._get_resource(context, model, v_id)
v_db.update({'status': status})
def _get_resource(self, context, model, v_id):
try:
r = self._get_by_id(context, model, v_id)
except exc.NoResultFound:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
if issubclass(model, IPsecSiteConnection):
raise vpnaas.IPsecSiteConnectionNotFound(
ipsec_site_conn_id=v_id
)
elif issubclass(model, IKEPolicy):
raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id)
elif issubclass(model, IPsecPolicy):
raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id)
elif issubclass(model, VPNService):
raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id)
ctx.reraise = True
return r
def assert_update_allowed(self, obj):
status = getattr(obj, 'status', None)
_id = getattr(obj, 'id', None)
if utils.in_pending_status(status):
raise vpnaas.VPNStateInvalidToUpdate(id=_id, state=status)
def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None):
res = {'id': ipsec_site_conn['id'],
'tenant_id': ipsec_site_conn['tenant_id'],
'name': ipsec_site_conn['name'],
'description': ipsec_site_conn['description'],
'peer_address': ipsec_site_conn['peer_address'],
'peer_id': ipsec_site_conn['peer_id'],
'route_mode': ipsec_site_conn['route_mode'],
'mtu': ipsec_site_conn['mtu'],
'auth_mode': ipsec_site_conn['auth_mode'],
'psk': ipsec_site_conn['psk'],
'initiator': ipsec_site_conn['initiator'],
'dpd': {
'action': ipsec_site_conn['dpd_action'],
'interval': ipsec_site_conn['dpd_interval'],
'timeout': ipsec_site_conn['dpd_timeout']
},
'admin_state_up': ipsec_site_conn['admin_state_up'],
'status': ipsec_site_conn['status'],
'vpnservice_id': ipsec_site_conn['vpnservice_id'],
'ikepolicy_id': ipsec_site_conn['ikepolicy_id'],
'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'],
'peer_cidrs': [pcidr['cidr']
for pcidr in ipsec_site_conn['peer_cidrs']]
}
return self._fields(res, fields)
def _get_subnet_ip_version(self, context, vpnservice_id):
vpn_service_db = self._get_vpnservice(context, vpnservice_id)
subnet = vpn_service_db.subnet['cidr']
ip_version = netaddr.IPNetwork(subnet).version
return ip_version
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
validator = self._get_validator()
validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
tenant_id = self._get_tenant_id_for_create(context, ipsec_sitecon)
with context.session.begin(subtransactions=True):
#Check permissions
self._get_resource(context,
VPNService,
ipsec_sitecon['vpnservice_id'])
self._get_resource(context,
IKEPolicy,
ipsec_sitecon['ikepolicy_id'])
self._get_resource(context,
IPsecPolicy,
ipsec_sitecon['ipsecpolicy_id'])
vpnservice_id = ipsec_sitecon['vpnservice_id']
ip_version = self._get_subnet_ip_version(context, vpnservice_id)
validator.validate_ipsec_site_connection(context,
ipsec_sitecon,
ip_version)
ipsec_site_conn_db = IPsecSiteConnection(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsec_sitecon['name'],
description=ipsec_sitecon['description'],
peer_address=ipsec_sitecon['peer_address'],
peer_id=ipsec_sitecon['peer_id'],
route_mode='static',
mtu=ipsec_sitecon['mtu'],
auth_mode='psk',
psk=ipsec_sitecon['psk'],
initiator=ipsec_sitecon['initiator'],
dpd_action=ipsec_sitecon['dpd_action'],
dpd_interval=ipsec_sitecon['dpd_interval'],
dpd_timeout=ipsec_sitecon['dpd_timeout'],
admin_state_up=ipsec_sitecon['admin_state_up'],
status=constants.PENDING_CREATE,
vpnservice_id=vpnservice_id,
ikepolicy_id=ipsec_sitecon['ikepolicy_id'],
ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id']
)
context.session.add(ipsec_site_conn_db)
for cidr in ipsec_sitecon['peer_cidrs']:
peer_cidr_db = IPsecPeerCidr(
cidr=cidr,
ipsec_site_connection_id=ipsec_site_conn_db['id']
)
context.session.add(peer_cidr_db)
return self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
def update_ipsec_site_connection(
self, context,
ipsec_site_conn_id, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
changed_peer_cidrs = False
validator = self._get_validator()
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context,
IPsecSiteConnection,
ipsec_site_conn_id)
vpnservice_id = ipsec_site_conn_db['vpnservice_id']
ip_version = self._get_subnet_ip_version(context, vpnservice_id)
validator.assign_sensible_ipsec_sitecon_defaults(
ipsec_sitecon, ipsec_site_conn_db)
validator.validate_ipsec_site_connection(
context,
ipsec_sitecon,
ip_version)
self.assert_update_allowed(ipsec_site_conn_db)
if "peer_cidrs" in ipsec_sitecon:
changed_peer_cidrs = True
old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs']
old_peer_cidr_dict = dict(
(peer_cidr['cidr'], peer_cidr)
for peer_cidr in old_peer_cidr_list)
new_peer_cidr_set = set(ipsec_sitecon["peer_cidrs"])
old_peer_cidr_set = set(old_peer_cidr_dict)
new_peer_cidrs = list(new_peer_cidr_set)
for peer_cidr in old_peer_cidr_set - new_peer_cidr_set:
context.session.delete(old_peer_cidr_dict[peer_cidr])
for peer_cidr in new_peer_cidr_set - old_peer_cidr_set:
pcidr = IPsecPeerCidr(
cidr=peer_cidr,
ipsec_site_connection_id=ipsec_site_conn_id)
context.session.add(pcidr)
del ipsec_sitecon["peer_cidrs"]
if ipsec_sitecon:
ipsec_site_conn_db.update(ipsec_sitecon)
result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
if changed_peer_cidrs:
result['peer_cidrs'] = new_peer_cidrs
return result
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id
)
context.session.delete(ipsec_site_conn_db)
def _get_ipsec_site_connection(
self, context, ipsec_site_conn_id):
return self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id)
def get_ipsec_site_connection(self, context,
ipsec_site_conn_id, fields=None):
ipsec_site_conn_db = self._get_ipsec_site_connection(
context, ipsec_site_conn_id)
return self._make_ipsec_site_connection_dict(
ipsec_site_conn_db, fields)
def get_ipsec_site_connections(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecSiteConnection,
self._make_ipsec_site_connection_dict,
filters=filters, fields=fields)
def update_ipsec_site_conn_status(self, context, conn_id, new_status):
with context.session.begin():
self._update_connection_status(context, conn_id, new_status, True)
def _update_connection_status(self, context, conn_id, new_status,
updated_pending):
"""Update the connection status, if changed.
If the connection is not in a pending state, unconditionally update
the status. Likewise, if in a pending state, and have an indication
that the status has changed, then update the database.
"""
try:
conn_db = self._get_ipsec_site_connection(context, conn_id)
except vpnaas.IPsecSiteConnectionNotFound:
return
if not utils.in_pending_status(conn_db.status) or updated_pending:
conn_db.status = new_status
def _make_ikepolicy_dict(self, ikepolicy, fields=None):
res = {'id': ikepolicy['id'],
'tenant_id': ikepolicy['tenant_id'],
'name': ikepolicy['name'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'],
'lifetime': {
'units': ikepolicy['lifetime_units'],
'value': ikepolicy['lifetime_value'],
},
'ike_version': ikepolicy['ike_version'],
'pfs': ikepolicy['pfs']
}
return self._fields(res, fields)
def create_ikepolicy(self, context, ikepolicy):
ike = ikepolicy['ikepolicy']
tenant_id = self._get_tenant_id_for_create(context, ike)
lifetime_info = ike.get('lifetime', [])
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ike_db = IKEPolicy(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ike['name'],
description=ike['description'],
auth_algorithm=ike['auth_algorithm'],
encryption_algorithm=ike['encryption_algorithm'],
phase1_negotiation_mode=ike['phase1_negotiation_mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
ike_version=ike['ike_version'],
pfs=ike['pfs']
)
context.session.add(ike_db)
return self._make_ikepolicy_dict(ike_db)
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
ike = ikepolicy['ikepolicy']
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
if ike:
lifetime_info = ike.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ike['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ike['lifetime_value'] = lifetime_info['value']
ike_db.update(ike)
return self._make_ikepolicy_dict(ike_db)
def delete_ikepolicy(self, context, ikepolicy_id):
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
context.session.delete(ike_db)
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
return self._make_ikepolicy_dict(ike_db, fields)
def get_ikepolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IKEPolicy,
self._make_ikepolicy_dict,
filters=filters, fields=fields)
def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None):
res = {'id': ipsecpolicy['id'],
'tenant_id': ipsecpolicy['tenant_id'],
'name': ipsecpolicy['name'],
'description': ipsecpolicy['description'],
'transform_protocol': ipsecpolicy['transform_protocol'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'lifetime': {
'units': ipsecpolicy['lifetime_units'],
'value': ipsecpolicy['lifetime_value'],
},
'pfs': ipsecpolicy['pfs']
}
return self._fields(res, fields)
def create_ipsecpolicy(self, context, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
tenant_id = self._get_tenant_id_for_create(context, ipsecp)
lifetime_info = ipsecp['lifetime']
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ipsecp_db = IPsecPolicy(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsecp['name'],
description=ipsecp['description'],
transform_protocol=ipsecp['transform_'
'protocol'],
auth_algorithm=ipsecp['auth_algorithm'],
encryption_algorithm=ipsecp['encryption_'
'algorithm'],
encapsulation_mode=ipsecp['encapsulation_'
'mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
pfs=ipsecp['pfs'])
context.session.add(ipsecp_db)
return self._make_ipsecpolicy_dict(ipsecp_db)
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsecp_db = self._get_resource(context,
IPsecPolicy,
ipsecpolicy_id)
if ipsecp:
lifetime_info = ipsecp.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ipsecp['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ipsecp['lifetime_value'] = lifetime_info['value']
ipsecp_db.update(ipsecp)
return self._make_ipsecpolicy_dict(ipsecp_db)
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
context.session.delete(ipsec_db)
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
return self._make_ipsecpolicy_dict(ipsec_db, fields)
def get_ipsecpolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecPolicy,
self._make_ipsecpolicy_dict,
filters=filters, fields=fields)
def _make_vpnservice_dict(self, vpnservice, fields=None):
res = {'id': vpnservice['id'],
'name': vpnservice['name'],
'description': vpnservice['description'],
'tenant_id': vpnservice['tenant_id'],
'subnet_id': vpnservice['subnet_id'],
'router_id': vpnservice['router_id'],
'admin_state_up': vpnservice['admin_state_up'],
'status': vpnservice['status']}
return self._fields(res, fields)
def create_vpnservice(self, context, vpnservice):
vpns = vpnservice['vpnservice']
tenant_id = self._get_tenant_id_for_create(context, vpns)
validator = self._get_validator()
with context.session.begin(subtransactions=True):
validator.validate_vpnservice(context, vpns)
vpnservice_db = VPNService(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=vpns['name'],
description=vpns['description'],
subnet_id=vpns['subnet_id'],
router_id=vpns['router_id'],
admin_state_up=vpns['admin_state_up'],
status=constants.PENDING_CREATE)
context.session.add(vpnservice_db)
return self._make_vpnservice_dict(vpnservice_db)
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpns = vpnservice['vpnservice']
with context.session.begin(subtransactions=True):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
self.assert_update_allowed(vpns_db)
if vpns:
vpns_db.update(vpns)
return self._make_vpnservice_dict(vpns_db)
def delete_vpnservice(self, context, vpnservice_id):
with context.session.begin(subtransactions=True):
if context.session.query(IPsecSiteConnection).filter_by(
vpnservice_id=vpnservice_id
).first():
raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
context.session.delete(vpns_db)
def _get_vpnservice(self, context, vpnservice_id):
return self._get_resource(context, VPNService, vpnservice_id)
def get_vpnservice(self, context, vpnservice_id, fields=None):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
return self._make_vpnservice_dict(vpns_db, fields)
def get_vpnservices(self, context, filters=None, fields=None):
return self._get_collection(context, VPNService,
self._make_vpnservice_dict,
filters=filters, fields=fields)
def check_router_in_use(self, context, router_id):
vpnservices = self.get_vpnservices(
context, filters={'router_id': [router_id]})
if vpnservices:
raise vpnaas.RouterInUseByVPNService(
router_id=router_id,
vpnservice_id=vpnservices[0]['id'])
def check_subnet_in_use(self, context, subnet_id):
with context.session.begin(subtransactions=True):
vpnservices = context.session.query(VPNService).filter_by(
subnet_id=subnet_id
).first()
if vpnservices:
raise vpnaas.SubnetInUseByVPNService(
subnet_id=subnet_id,
vpnservice_id=vpnservices['id'])
class VPNPluginRpcDbMixin():
def _get_agent_hosting_vpn_services(self, context, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(VPNService)
query = query.join(IPsecSiteConnection)
query = query.join(IKEPolicy)
query = query.join(IPsecPolicy)
query = query.join(IPsecPeerCidr)
query = query.join(l3_agent_db.RouterL3AgentBinding,
l3_agent_db.RouterL3AgentBinding.router_id ==
VPNService.router_id)
query = query.filter(
l3_agent_db.RouterL3AgentBinding.l3_agent_id == agent.id)
return query
def update_status_by_agent(self, context, service_status_info_list):
"""Updating vpnservice and vpnconnection status.
:param context: context variable
:param service_status_info_list: list of status
The structure is
[{id: vpnservice_id,
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
ipsec_site_connections: {
ipsec_site_connection_id: {
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
}
}]
The agent will set updated_pending_status as True,
when agent update any pending status.
"""
with context.session.begin(subtransactions=True):
for vpnservice in service_status_info_list:
try:
vpnservice_db = self._get_vpnservice(
context, vpnservice['id'])
except vpnaas.VPNServiceNotFound:
LOG.warn(_LW('vpnservice %s in db is already deleted'),
vpnservice['id'])
continue
if (not utils.in_pending_status(vpnservice_db.status)
or vpnservice['updated_pending_status']):
vpnservice_db.status = vpnservice['status']
for conn_id, conn in vpnservice[
'ipsec_site_connections'].items():
self._update_connection_status(
context, conn_id, conn['status'],
conn['updated_pending_status'])
| apache-2.0 | 1,094,159,555,559,583,400 | 46.23845 | 79 | 0.562482 | false |
LiveChains/Live-Coin | contrib/spendfrom/spendfrom.py | 1 | 10087 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a livecoind or Livecoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting LVC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_livecoin.config(dbdir):
"""Read the livecoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "livecoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 113144 if testnet else 13144
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the livecoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(livecoind):
info = livecoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
livecoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = livecoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(livecoind):
address_summary = dict()
address_to_account = dict()
for info in livecoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = livecoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = livecoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(livecoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(livecoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f LVC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to livecoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = livecoind.createrawtransaction(inputs, outputs)
signed_rawtx = livecoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(livecoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = livecoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(livecoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = livecoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(livecoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of livecoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_livecoin.config(options.datadir)
if options.testnet: config['testnet'] = True
livecoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(livecoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(livecoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(livecoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(livecoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = livecoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit | 6,781,185,291,713,153,000 | 36.779026 | 111 | 0.621096 | false |
Hiestaa/RLViz | src/problems/base.py | 1 | 6050 | # -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
import gym
from parametizable import Parametizable
from consts import ParamsTypes, Spaces
class ProblemException(Exception):
pass
class BaseProblem(Parametizable):
"""
Mostly a wrapper around gym's environment, but also provide additional
parameters and statistics to play with.
The class is setup for a default behaviour on any gym's environment. When
subclassing, part of the job should already be done by setting up the
right parameters. Additional specific behavior can be obtained by overriding
the functions but care should be taken to call the parent's corresponding
method using `super(<Class>, self)`
"""
# These will be or-ed at each step to know whether the environment
# considers the episode terminated
EPISODE_TERMINATION_CRITERIA = [
lambda self, **kwargs: self._done,
lambda self, stepI, **kwargs: stepI >= self.maxSteps
]
PARAMS = {
'maxSteps': ParamsTypes.Number
}
PARAMS_DOMAIN = {
'maxSteps': {
'range': (-1, float('inf')),
'values': [100, 500, 1000]
},
}
PARAMS_DEFAULT = {
'maxSteps': 500
}
PARAMS_DESCRIPTION = {
'maxSteps': "Maximum number of steps per episode. Set to -1 to disable."
}
# Override to specify a Gym environment that should be loaded.
GYM_ENVIRONMENT_NAME = None
# Override to specify compatible algorithm
DOMAIN = {
'action': Spaces.Discrete,
'state': Spaces.Discrete
}
# optional: override to give a specific name to each action
# action space is assumed to be discrete and 1 dimensional.
# first action should be in first position, second action in second,
# and so on.
ACTION_NAMES = []
# optional: override to give a specific name to each dimension of
# the state space. List should be in the same order of the dimensions
# of the state space (dimension 1 in first position, etc...)
STATE_DIMENSION_NAMES = []
def __init__(self, **kwargs):
super(BaseProblem, self).__init__(**kwargs)
self._done = False
self._env = None
self.observationSpace = None
self.actionSpace = None
@property
def env(self):
return self._env
def terminate(self):
self._done = True
def episodeDone(self, stepI):
return any(
crit(self, stepI=stepI)
for crit in self.EPISODE_TERMINATION_CRITERIA)
def setup(self):
"""
Setup the environment - this shouldn't be done in the constructor to
enable override.
This asusmes the problem uses a gym environment. Override otherwise.
"""
logger.info("[%s] Problem setup" % self.__class__.__name__)
if self.GYM_ENVIRONMENT_NAME is None:
raise NotImplementedError()
self._env = gym.make(self.GYM_ENVIRONMENT_NAME)
self.observationSpace = self._env.observation_space
self.actionSpace = self._env.action_space
###
# Some helper function to retrieve information about the environment.
# These are pre-implemented for any gym environment, and should
# be overriden otherwise
###
def getStatesList(self):
"""
Returns the list of possible states.
Override this function if you're not defining a gym environment.
This function should only be called if the problem bears a discrete
state space.
"""
if self.env is None:
raise NotImplementedError()
if self.DOMAIN['state'] == Spaces.Discrete:
return range(self.env.action_space.n)
raise ProblemException("Continuous state space")
def getStatesDim(self):
"""
Return the number of dimension of the state space
"""
if self.env is None:
raise NotImplementedError()
return len(self.env.observation_space.low)
def getStatesBounds(self):
"""
Returns the max and min values each dimension can take.
These are returned as two tuples, `low` and `high`, where both
are a list of as many elements as there is dimension to the state space.
"""
if self.env is None:
raise NotImplementedError()
return (
self.env.observation_space.low,
self.env.observation_space.high)
def getActionsList(self):
"""
Returns the list of possible actions.
Override this function if you're not defining a gym environment.
This function should only be called if the problem bears a discrete
state space.
"""
if self.env is None:
raise NotImplementedError()
if self.DOMAIN['action'] == Spaces.Discrete:
return range(self.env.action_space.n)
raise NotImplementedError()
# Problem execution methods
def step(self, action):
"""
The agent take the given action and receives back the new state,
the reward, whether the episode is terminated and optionally
some additional debug information.
Override this function if you're not defining a gym environment.
"""
newObservation, reward, self._done, info = self._env.step(action)
return newObservation, reward, self._done, info
def reset(self):
"""
Reset the state of the environment for a new episode.
Override this function if you're not defining a gym environment.
"""
self._done = False
return self._env.reset()
def render(self, close=False):
"""
Render the environment (server-side)
Override this function if you're not defining a gym environment.
"""
return self._env.render(close=close)
def release(self):
"""
Release handles and memory if manual intervention is required.
"""
pass
| mit | -6,629,210,410,368,838,000 | 31.180851 | 80 | 0.629421 | false |
JNRowe/cupage | setup.py | 1 | 2052 | #! /usr/bin/env python3
"""setup.py - Setuptools tasks and config for cupage."""
# Copyright © 2009-2014 James Rowe <[email protected]>
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from typing import List
from setuptools import setup
from setuptools.command.test import test
class PytestTest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = [
'tests/',
]
self.test_suite = True
def run_tests(self):
from sys import exit
from pytest import main
exit(main(self.test_args))
def parse_requires(file: str) -> List[str]:
deps = []
with open(f'extra/{file}') as req_file:
entries = [s.split('#')[0].strip() for s in req_file.readlines()]
for dep in entries:
if not dep or dep.startswith('#'):
continue
elif dep.startswith('-r '):
deps.extend(parse_requires(dep.split()[1]))
continue
deps.append(dep)
return deps
# Note: We can't use setuptool’s requirements support as it only a list value,
# and doesn’t support pip’s inclusion mechanism
install_requires = parse_requires('requirements.txt')
tests_require = parse_requires('requirements-test.txt')
if __name__ == '__main__':
setup(
install_requires=install_requires,
tests_require=tests_require,
cmdclass={'test': PytestTest},
)
| gpl-3.0 | -2,699,607,000,304,151,600 | 30.461538 | 78 | 0.669927 | false |
blabla1337/skf-flask | skf/rabbit_mq_workers/deletion-worker.py | 1 | 3009 | #!/usr/bin/env python
import pika, time, random, yaml
from os import path
from skf import settings
from kubernetes import client, config
creds = pika.PlainCredentials('admin', 'admin-skf-secret')
connection = pika.BlockingConnection(pika.ConnectionParameters(host=settings.RABBIT_MQ_CONN_STRING, credentials=creds))
channel = connection.channel()
channel.queue_declare(queue='deletion_qeue')
def delete_container(rpc_body):
user_id = string_split_user_id(rpc_body)
deployment = string_split_deployment(rpc_body)
delete_deployment(deployment, user_id)
delete_service(deployment, user_id)
time.sleep(3)
return {'message': 'If present, the container image was deleted from the cluster!'}
def delete_deployment(instance_name, user_id):
try:
config.load_kube_config()
api_instance = client.AppsV1Api()
api_response = api_instance.delete_namespaced_deployment(
name=instance_name,
namespace=user_id,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
print("Deployment deleted. status='%s'" % str(api_response.status))
return {'message': 'Deployment deleted.'}
except:
return {'message': 'Kubernetes configuration is either missing or done incorrectly, error deployment delete!'}
def delete_service(instance_name, user_id):
try:
config.load_kube_config()
api_instance = client.CoreV1Api()
api_response = api_instance.delete_namespaced_service(
name=instance_name,
namespace=user_id,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
print("Deployment deleted. status='%s'" % str(api_response.status))
return {'message': 'Deployment deleted.'}
except:
return {'message': 'Kubernetes configuration is either missing or done incorrectly, error service delete!'}
def string_split_user_id(body):
try:
user_id = body.split(':')
return user_id[1]
except:
return {'message': 'Failed to deploy, error no user_id found!'}
def string_split_deployment(body):
try:
deployment = body.split(':')
return deployment[0]
except:
return {'message': 'Failed to delete, error no deployment found!'}
def on_request(ch, method, props, body):
response = delete_container(str(body, 'utf-8'))
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id,
expiration='30000'),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='deletion_qeue', on_message_callback=on_request)
print(" [x] Awaiting RPC requests")
channel.start_consuming() | agpl-3.0 | 8,975,749,133,928,293,000 | 34.833333 | 119 | 0.648721 | false |
google-research/social_cascades | news/graph_processing.py | 1 | 1943 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph processing script."""
import os
from absl import app
from absl import flags
from absl import logging
import networkx as nx
import pandas as pd
from utils import graph_filter_with_degree
from utils import load_graph_from_edgelist_csv
FLAGS = flags.FLAGS
flags.DEFINE_string(
'g_file',
'../proj_Data/cat_data/test3/sr_timespan_post_graph-00000-of-00001.csv',
'raw graph edgelist csv file')
flags.DEFINE_integer('low', 40, 'low degree threshold')
flags.DEFINE_integer('high', 80, 'high degree threshold')
flags.DEFINE_string('data_file', '', 'raw data path')
flags.DEFINE_string('filename', '', 'graph filename')
flags.DEFINE_string('save_path', '', 'graph save path')
def main(_):
df = pd.read_csv(FLAGS.data_file)
author_set = set(df['author'].unique())
graph = load_graph_from_edgelist_csv(FLAGS.g_file)
logging.info('Original Graph size: %d nodes, %d edges',
graph.number_of_nodes(), graph.number_of_edges())
graph = graph_filter_with_degree(graph, FLAGS.low, FLAGS.high, author_set)
logging.info('Filtered Graph size: %d nodes, %d edges',
graph.number_of_nodes(), graph.number_of_edges())
nx.write_gpickle(graph, os.path.join(
FLAGS.save_path, FLAGS.filename + '%s_%s.gpickle' %
(FLAGS.low, FLAGS.high)))
logging.info('Saved graph.')
if __name__ == '__main__':
app.run(main)
| apache-2.0 | -6,876,329,124,565,391,000 | 33.087719 | 76 | 0.705095 | false |
euroscipy/www.euroscipy.org | papercall_grabbing.py | 1 | 4306 | """
Functions to grab info from papercall.io
"""
import os
import time
import requests
token = 'your_papercall_token' # ,<-- fill this in
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
des_template = """
Title: {title}
URL: 2017/descriptions/{id}.html
save_as: 2017/descriptions/{id}.html
{description}
""".lstrip()
def get_submission_ids():
# Query all submission ids
all_ids = []
for state in ('submitted', 'accepted', 'rejected', 'waitlist'):
url = 'https://www.papercall.io/api/v1/submissions?_token=%s&per_page=999&state=%s'
all = requests.get(url % (token, state)).json()
all_ids.extend([x['id'] for x in all])
return all_ids
def get_reviewer_list():
""" Print out the names of all people who did reviews.
"""
# Collect submission ids
all_ids = get_submission_ids()
# Collect all reviewers
reviewers = set()
for id in all_ids:
url = 'https://www.papercall.io/api/v1/submissions/%s/ratings?_token=%s'
ratings = requests.get(url % (id, token)).json()
for rating in ratings:
reviewers.add(rating['user']['name'])
# Print a list
for reviewer in sorted(reviewers):
print(reviewer)
def get_talk_descriptions():
""" Get talk descriptions and store each in a markdown file.
"""
# Collect submission ids
all_ids = get_submission_ids()
# Collect descriptions
index = {}
for id in all_ids:
url = 'https://www.papercall.io/api/v1/submissions/%s?_token=%s'
submission = requests.get(url % (id, token)).json()
id = str(submission['id'])
title = submission['talk']['title']
page = des_template.format(description=submission['talk']['description'],
title=title, id=id)
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'descriptions', id + '.md')
with open(fname, 'wb') as f:
f.write(page.encode())
index[id] = title
time.sleep(0.1)
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'descriptions', 'index.md')
with open(fname, 'wb') as f:
for id in sorted(index):
line = id + ' - ' + index[id] + '\n'
f.write(line.encode())
def make_links_in_program():
""" Make the talk titles in the program link to description pages,
as far as we can, anyway. The rest should be done by hand by making use of
the descriptions.index.md.
Beware, this is ugly, and makes all kinds of assumptions about how the program
table is formatted, and it needs manual corrections, and it does not work after
it has applied the changes. We should probably just throw it away.
"""
# Build reverse index
rindex = {}
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'descriptions', 'index.md')
with open(fname, 'rb') as f:
for line in f.read().decode().splitlines():
if line.strip():
id, _, title = line.partition('-')
rindex[title.strip().lower()] = 'descriptions/' + id.strip() + '.html'
default_link = 'descriptions/oops.html'
# Add links
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'program.md')
text = open(fname, 'rb').read().decode()
lines = text.splitlines()
for i in range(len(lines)-1):
line = lines[i]
if line.lstrip().startswith("<td>") and not line.rstrip().endswith(">"):
if ' ' not in lines[i+1]:
title = line.lstrip()[4:]
id = rindex.get(title.strip().lower(), default_link)
lines[i] = " <td><a href='%s'>%s</a>" % (id, title)
if line.lstrip().startswith("<td>") and line.rstrip().endswith("</td>"):
if '<br>' in line and ' ' not in line:
title, _, rest = line.lstrip()[4:].partition('<br>')
id = rindex.get(title.strip().lower(), default_link)
lines[i] = " <td><a href='%s'>%s</a><br>%s" % (id, title, rest)
with open(fname, 'wb') as f:
text = '\n'.join(lines)
f.write(text.encode())
if __name__ == '__main__':
pass
# get_reviewer_list()
# get_talk_descriptions()
# make_links_in_program()
| mit | 2,127,778,262,498,097,200 | 33.448 | 94 | 0.571064 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-batchai/azure/mgmt/batchai/operations/file_servers_operations.py | 1 | 22642 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class FileServersOperations(object):
"""FileServersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Specifies the version of API used for this request. Constant value: "2018-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-03-01"
self.config = config
def _create_initial(
self, resource_group_name, file_server_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FileServerCreateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FileServer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, file_server_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a file server.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_server_name: The name of the file server within the
specified resource group. File server names can only contain a
combination of alphanumeric characters along with dash (-) and
underscore (_). The name must be from 1 through 64 characters long.
:type file_server_name: str
:param parameters: The parameters to provide for file server creation.
:type parameters:
~azure.mgmt.batchai.models.FileServerCreateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns FileServer or
ClientRawResponse<FileServer> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.batchai.models.FileServer]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.batchai.models.FileServer]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_initial(
resource_group_name=resource_group_name,
file_server_name=file_server_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('FileServer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers/{fileServerName}'}
def _delete_initial(
self, resource_group_name, file_server_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, file_server_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Delete a file Server.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_server_name: The name of the file server within the
specified resource group. File server names can only contain a
combination of alphanumeric characters along with dash (-) and
underscore (_). The name must be from 1 through 64 characters long.
:type file_server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
file_server_name=file_server_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers/{fileServerName}'}
def get(
self, resource_group_name, file_server_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified Cluster.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_server_name: The name of the file server within the
specified resource group. File server names can only contain a
combination of alphanumeric characters along with dash (-) and
underscore (_). The name must be from 1 through 64 characters long.
:type file_server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: FileServer or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.batchai.models.FileServer or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FileServer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers/{fileServerName}'}
def list(
self, file_servers_list_options=None, custom_headers=None, raw=False, **operation_config):
"""To list all the file servers available under the given subscription
(and across all resource groups within that subscription).
:param file_servers_list_options: Additional parameters for the
operation
:type file_servers_list_options:
~azure.mgmt.batchai.models.FileServersListOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of FileServer
:rtype:
~azure.mgmt.batchai.models.FileServerPaged[~azure.mgmt.batchai.models.FileServer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
filter = None
if file_servers_list_options is not None:
filter = file_servers_list_options.filter
select = None
if file_servers_list_options is not None:
select = file_servers_list_options.select
max_results = None
if file_servers_list_options is not None:
max_results = file_servers_list_options.max_results
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FileServerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FileServerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.BatchAI/fileServers'}
def list_by_resource_group(
self, resource_group_name, file_servers_list_by_resource_group_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets a formatted list of file servers and their properties associated
within the specified resource group.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_servers_list_by_resource_group_options: Additional
parameters for the operation
:type file_servers_list_by_resource_group_options:
~azure.mgmt.batchai.models.FileServersListByResourceGroupOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of FileServer
:rtype:
~azure.mgmt.batchai.models.FileServerPaged[~azure.mgmt.batchai.models.FileServer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
filter = None
if file_servers_list_by_resource_group_options is not None:
filter = file_servers_list_by_resource_group_options.filter
select = None
if file_servers_list_by_resource_group_options is not None:
select = file_servers_list_by_resource_group_options.select
max_results = None
if file_servers_list_by_resource_group_options is not None:
max_results = file_servers_list_by_resource_group_options.max_results
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FileServerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FileServerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers'}
| mit | -3,045,874,100,569,090,000 | 47.072187 | 156 | 0.639122 | false |
GoogleChrome/chromium-dashboard | internals/processes_test.py | 1 | 9385 | from __future__ import division
from __future__ import print_function
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import testing_config # Must be imported before the module under test.
import mock
from internals import approval_defs
from internals import models
from internals import processes
BakeApproval = approval_defs.ApprovalFieldDef(
'Approval for baking',
'The head chef must approve of you using the oven',
9, approval_defs.ONE_LGTM, ['[email protected]'])
BAKE_APPROVAL_DEF_DICT = collections.OrderedDict([
('name', 'Approval for baking'),
('description', 'The head chef must approve of you using the oven'),
('field_id', 9),
('rule', approval_defs.ONE_LGTM),
('approvers', ['[email protected]']),
])
class HelperFunctionsTest(testing_config.CustomTestCase):
def test_process_to_dict(self):
process = processes.Process(
'Baking',
'This is how you make bread',
'Make it before you are hungry',
[processes.ProcessStage(
'Make dough',
'Mix it and kneed',
['Cold dough'],
[('Share kneeding video', 'https://example.com')],
[],
0, 1),
processes.ProcessStage(
'Bake it',
'Heat at 375 for 40 minutes',
['A loaf', 'A dirty pan'],
[],
[BakeApproval],
1, 2),
])
expected = {
'name': 'Baking',
'description': 'This is how you make bread',
'applicability': 'Make it before you are hungry',
'stages': [
{'name': 'Make dough',
'description': 'Mix it and kneed',
'progress_items': ['Cold dough'],
'actions': [('Share kneeding video', 'https://example.com')],
'approvals': [],
'incoming_stage': 0,
'outgoing_stage': 1},
{'name': 'Bake it',
'description': 'Heat at 375 for 40 minutes',
'progress_items': ['A loaf', 'A dirty pan'],
'actions': [],
'approvals': [BAKE_APPROVAL_DEF_DICT],
'incoming_stage': 1,
'outgoing_stage': 2},
]
}
actual = processes.process_to_dict(process)
self.assertEqual(expected['stages'][1]['approvals'],
actual['stages'][1]['approvals'])
self.assertEqual(expected, actual)
def test_review_is_done(self):
"""A review step is done if the review has completed or was N/a."""
self.assertFalse(processes.review_is_done(None))
self.assertFalse(processes.review_is_done(0))
self.assertFalse(processes.review_is_done(models.REVIEW_PENDING))
self.assertFalse(processes.review_is_done(models.REVIEW_ISSUES_OPEN))
self.assertTrue(processes.review_is_done(models.REVIEW_ISSUES_ADDRESSED))
self.assertTrue(processes.review_is_done(models.REVIEW_NA))
class ProgressDetectorsTest(testing_config.CustomTestCase):
def setUp(self):
self.feature_1 = models.Feature(
name='feature one', summary='sum', category=1, visibility=1,
standardization=1, web_dev_views=models.DEV_NO_SIGNALS,
impl_status_chrome=1,
intent_stage=models.INTENT_IMPLEMENT)
self.feature_1.put()
def tearDown(self):
self.feature_1.key.delete()
def test_initial_public_proposal_url(self):
detector = processes.PROGRESS_DETECTORS['Initial public proposal']
self.assertFalse(detector(self.feature_1))
self.feature_1.initial_public_proposal_url = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_explainer(self):
detector = processes.PROGRESS_DETECTORS['Explainer']
self.assertFalse(detector(self.feature_1))
self.feature_1.explainer_links = ['http://example.com']
self.assertTrue(detector(self.feature_1))
def test_security_review_completed(self):
detector = processes.PROGRESS_DETECTORS['Security review issues addressed']
self.assertFalse(detector(self.feature_1))
self.feature_1.security_review_status = models.REVIEW_ISSUES_ADDRESSED
self.assertTrue(detector(self.feature_1))
def test_privacy_review_completed(self):
detector = processes.PROGRESS_DETECTORS['Privacy review issues addressed']
self.assertFalse(detector(self.feature_1))
self.feature_1.privacy_review_status = models.REVIEW_ISSUES_ADDRESSED
self.assertTrue(detector(self.feature_1))
def test_intent_to_prototype_email(self):
detector = processes.PROGRESS_DETECTORS['Intent to Prototype email']
self.assertFalse(detector(self.feature_1))
self.feature_1.intent_to_implement_url = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_intent_to_ship_email(self):
detector = processes.PROGRESS_DETECTORS['Intent to Ship email']
self.assertFalse(detector(self.feature_1))
self.feature_1.intent_to_ship_url = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_ready_for_trial_email(self):
detector = processes.PROGRESS_DETECTORS['Ready for Trial email']
self.assertFalse(detector(self.feature_1))
self.feature_1.ready_for_trial_url = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_intent_to_experiment_email(self):
detector = processes.PROGRESS_DETECTORS['Intent to Experiment email']
self.assertFalse(detector(self.feature_1))
self.feature_1.intent_to_experiment_url = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_one_i2e_lgtm(self):
detector = processes.PROGRESS_DETECTORS['One LGTM on Intent to Experiment']
self.assertFalse(detector(self.feature_1))
self.feature_1.i2e_lgtms = ['[email protected]']
self.assertTrue(detector(self.feature_1))
def test_one_i2e_lgtm(self):
detector = processes.PROGRESS_DETECTORS[
'One LGTM on Request for Deprecation Trial']
self.assertFalse(detector(self.feature_1))
self.feature_1.i2e_lgtms = ['[email protected]']
self.assertTrue(detector(self.feature_1))
def test_three_i2s_lgtm(self):
detector = processes.PROGRESS_DETECTORS['Three LGTMs on Intent to Ship']
self.assertFalse(detector(self.feature_1))
self.feature_1.i2s_lgtms = [
'[email protected]',
'[email protected]',
'[email protected]']
self.assertTrue(detector(self.feature_1))
def test_samples(self):
detector = processes.PROGRESS_DETECTORS['Samples']
self.assertFalse(detector(self.feature_1))
self.feature_1.sample_links = ['http://example.com']
self.assertTrue(detector(self.feature_1))
def test_doc_links(self):
detector = processes.PROGRESS_DETECTORS['Doc links']
self.assertFalse(detector(self.feature_1))
self.feature_1.doc_links = ['http://example.com']
self.assertTrue(detector(self.feature_1))
def test_tag_review_requested(self):
detector = processes.PROGRESS_DETECTORS['TAG review requested']
self.assertFalse(detector(self.feature_1))
self.feature_1.tag_review = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_tag_review_completed(self):
detector = processes.PROGRESS_DETECTORS['TAG review issues addressed']
self.assertFalse(detector(self.feature_1))
self.feature_1.tag_review_status = models.REVIEW_ISSUES_ADDRESSED
self.assertTrue(detector(self.feature_1))
def test_web_dav_signals(self):
detector = processes.PROGRESS_DETECTORS['Web developer signals']
self.assertFalse(detector(self.feature_1))
self.feature_1.web_dev_views = models.PUBLIC_SUPPORT
self.assertTrue(detector(self.feature_1))
def test_vendor_signals(self):
detector = processes.PROGRESS_DETECTORS['Vendor signals']
self.assertFalse(detector(self.feature_1))
self.feature_1.ff_views = models.PUBLIC_SUPPORT
self.assertTrue(detector(self.feature_1))
def test_estimated_target_milestone(self):
detector = processes.PROGRESS_DETECTORS['Estimated target milestone']
self.assertFalse(detector(self.feature_1))
self.feature_1.shipped_milestone = 99
self.assertTrue(detector(self.feature_1))
def test_code_in_chromium(self):
detector = processes.PROGRESS_DETECTORS['Code in Chromium']
self.assertFalse(detector(self.feature_1))
self.feature_1.impl_status_chrome = models.ENABLED_BY_DEFAULT
self.assertTrue(detector(self.feature_1))
def test_motivation(self):
detector = processes.PROGRESS_DETECTORS['Motivation']
self.assertFalse(detector(self.feature_1))
self.feature_1.motivation = 'test motivation'
self.assertTrue(detector(self.feature_1))
def test_code_removed(self):
detector = processes.PROGRESS_DETECTORS['Code removed']
self.assertFalse(detector(self.feature_1))
self.feature_1.impl_status_chrome = models.REMOVED
self.assertTrue(detector(self.feature_1)) | apache-2.0 | 7,072,356,655,839,198,000 | 37.946058 | 79 | 0.686841 | false |
roopeshsivam/certify | certificates/CreateCertView.py | 1 | 5826 | from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import logout as django_logout
from django.shortcuts import redirect, render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.views import generic
from django.utils.decorators import method_decorator
from django.views.generic.edit import FormView
from django.forms import BaseModelFormSet
from django.views.generic.edit import CreateView
from django.views.decorators.http import condition
from django.views.generic.edit import FormMixin
from django.views.generic.edit import UpdateView
from .ContextData import *
@method_decorator(login_required(login_url="/in/login/"), name='dispatch')
class CreateCertificateView(CreateView):
def get_form_class(self, **kwargs):
"""
Returns an instance of the form to be used in this view.
kwarg from database
"""
return ContextData[self.kwargs['cert_id']]['FormName']
def get_template_names(self, **kwargs):
ShipID = self.request.GET.get('shipid')
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
# if str(ModelObject.objects.filter(CertState='c', ShipMainData__pk=ShipID)) == '<QuerySet []>':
# return 'pages/create-'+ContextData[self.kwargs['cert_id']]['TemplateName']
# else:
# return 'pages/active-certificate-error.html'
return 'pages/certificate-base-form.html'
def get_form(self, form_class=None):
form = super(CreateCertificateView, self).get_form()
return form
def form_valid(self, form, **kwargs):
ShipID = self.request.GET.get('shipid')
form.instance.DocAuthor = self.request.user
form.instance.ShipMainData = ShipMainData.objects.get(id=ShipID)
form.instance.CertState = 'd'
return super(CreateCertificateView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(CreateCertificateView, self).get_context_data(**kwargs)
ShipID = self.request.GET.get('shipid')
context['CertName'] = ContextData[self.kwargs['cert_id']]['CertName']
context['TemplateName'] = 'forms/update/update-'+ContextData[self.kwargs['cert_id']]['TemplateName']
context['State'] = "Create New Certificate"
context['ButtonState'] = "Add"
context['ShipName'] = ShipMainData.objects.get(id=ShipID)
return context
@method_decorator(login_required(login_url="/in/login/"), name='dispatch')
class UpdateCertificateView(UpdateView):
queryset = None
def get_form_class(self, **kwargs):
"""
Returns the form class to use in this view
"""
return ContextData[self.kwargs['cert_id']]['FormName']
def get_queryset(self, **kwargs):
"""
Return the `QuerySet` that will be used to look up the object.
Note that this method is called by the default implementation of
`get_object` and may not be called if `get_object` is overridden.
"""
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
return ModelObject.objects.all()
def get_template_names(self, **kwargs):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
ModelObject = ModelObject.objects.get(pk=self.kwargs['pk'])
if ModelObject.CertState=='d':
return 'pages/certificate-base-form.html'
# return 'forms/update/update-'+ContextData[self.kwargs['cert_id']]['TemplateName']
else:
return 'pages/form-error-update.html'
def form_valid(self, form):
form = self.get_form()
form.save()
return super(UpdateCertificateView, self).form_valid(form)
def get_success_url(self):
return "../"
def post(self, request, *args, **kwargs):
request.POST = (request.POST.copy())
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
Certificate = ModelObject.objects.get(pk=self.kwargs['pk'])
CertFilter = ModelObject.objects.filter(ShipMainData_id=Certificate.ShipMainData.id)
State = 'c'
for Certificates in CertFilter: #Check simultaneous confirmation of multiple certificates
if Certificates.CertState == "c":
State = 'd'
if 'save' in request.POST: #Check before editing or saving confirmed certificates
form = self.get_form()
if Certificate.CertState != "c":
return super(UpdateCertificateView, self).post(request, *args, **kwargs)
else:
return HttpResponseRedirect('../') # change to redirect
if 'confirm' in request.POST:
ModelObject.objects.filter(pk=self.kwargs['pk']).update(CertState=State)
return HttpResponseRedirect('../') # change to redirect
if 'deactivate' in request.POST:
ModelObject.objects.filter(pk=self.kwargs['pk']).update(CertState='x')
return HttpResponseRedirect('../') #change to redirect
def get_context_data(self, **kwargs):
context = super(UpdateCertificateView, self).get_context_data(**kwargs)
CertData = ContextData[self.kwargs['cert_id']]['ModelName']
Certificate= CertData.objects.get(pk=self.kwargs['pk'])
context['CertName'] = ContextData[self.kwargs['cert_id']]['CertName']
context['TemplateName'] = 'forms/update/update-'+ContextData[self.kwargs['cert_id']]['TemplateName']
context['State'] = "Edit Certificate"
context['ButtonState'] = "Update"
context['ShipName'] = Certificate.ShipMainData
return context | gpl-3.0 | -462,516,706,902,927,000 | 44.170543 | 108 | 0.659286 | false |
diN0bot/ProcrasDonate | lib/html_emailer.py | 1 | 1392 | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import settings
def send_email(sender, recipient, subject, text, html):
if settings.DJANGO_SERVER:
print "="*60
print "FROM:", sender
print "TO:", recipient
print "SUBJECT:", subject
print "========= TEXT MESSAGE =========\n", text
print "\n\n========= HTML MESSAGE ==========\n", html
else:
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipient
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
s = smtplib.SMTP('localhost')
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(sender, recipient, msg.as_string())
s.quit()
| agpl-3.0 | 2,829,949,119,464,530,000 | 35.631579 | 84 | 0.600575 | false |
JING-TIME/ustc-course | tests/resize_avatar.py | 1 | 1146 | #!/usr/bin/env python3
import sys
sys.path.append('..') # fix import directory
from app import app
from app.models import User
from PIL import Image
from app.utils import rand_str
ctx = app.test_request_context()
ctx.push()
users = User.query.all()
for u in users:
if u._avatar:
with Image.open('../uploads/images/' + u._avatar) as img:
image_width, image_height = img.size
thumbnail_width = 192
thumbnail_height = 192
if image_width <= thumbnail_width and image_height <= thumbnail_height:
continue
# generate thumbnail if the avatar is too large
new_filename = rand_str() + '.png'
try:
img.thumbnail((thumbnail_width, thumbnail_height), Image.ANTIALIAS)
img.save('../uploads/images/' + new_filename, "PNG")
except IOError:
print("Failed to create thumbnail from '" + u._avatar + "' to '" + new_filename + "'")
u._avatar = new_filename
u.save()
print('User ID ' + str(u.id) + ' original ' + u._avatar + ' thumbnail ' + new_filename)
| agpl-3.0 | -4,566,116,130,143,257,000 | 35.967742 | 102 | 0.576789 | false |
prechelt/pyth | pyth/__init__.py | 1 | 1207 | """
Pyth -- Python text markup and conversion
"""
from __future__ import absolute_import
import os.path
__version__ = '0.5.6'
writerMap = {
'.rtf': 'pyth.plugins.rtf15.writer.Rtf15Writer',
'.html': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.xhtml': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.txt': 'pyth.plugins.plaintext.writer.PlaintextWriter',
'.pdf': 'pyth.plugins.pdf.writer.PDFWriter',
}
mimeMap = {
'.rtf': 'application/rtf',
'.html': 'text/html',
'.xhtml': 'application/xhtml+xml',
'.txt': 'text/plain',
}
def write(doc, filename):
ext = os.path.splitext(filename)[1]
writer = namedObject(writerMap[ext])
buff = writer.write(doc)
buff.seek(0)
return (buff, mimeMap[ext])
# Stolen from twisted.python.reflect
def namedModule(name):
"""Return a module given its name."""
topLevel = __import__(name)
packages = name.split(".")[1:]
m = topLevel
for p in packages:
m = getattr(m, p)
return m
def namedObject(name):
"""Get a fully named module-global object.
"""
classSplit = name.split('.')
module = namedModule('.'.join(classSplit[:-1]))
return getattr(module, classSplit[-1])
| mit | -6,107,775,287,688,249,000 | 21.351852 | 60 | 0.624689 | false |
jonnyhtw/cylc | lib/cylc/batch_sys_manager.py | 1 | 33030 | #!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2018 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Manage submission, poll and kill of a job to the batch systems.
Export the BatchSysManager class.
Batch system handler (a.k.a. job submission method) modules should be placed
under the "cylc.batch_sys_handlers" package. Each module should export the
symbol "BATCH_SYS_HANDLER" for the singleton instance that implements the job
system handler logic.
Each batch system handler class should instantiate with no argument, and may
have the following constants and methods:
batch_sys.filter_poll_output(out, job_id) => boolean
* If this method is available, it will be called after the batch system's
poll command is called and returns zero. The method should read the
output to see if job_id is still alive in the batch system, and return
True if so.
batch_sys.filter_poll_many_output(out) => job_ids
* Called after the batch system's poll many command. The method should read
the output and return a list of job IDs that are still in the batch
system.
batch_sys.filter_submit_output(out, err) => new_out, new_err
* Filter the standard output and standard error of the job submission
command. This is useful if the job submission command returns information
that should just be ignored. See also "batch_sys.SUBMIT_CMD_TMPL".
batch_sys.format_directives(job_conf) => lines
* If relevant, this method formats the job directives for a job file, if
job file directives are relevant for the batch system. The argument
"job_conf" is a dict containing the job configuration.
batch_sys.get_fail_signals(job_conf) => list of strings
* Return a list of names of signals to trap for reporting errors. Default
is ["EXIT", "ERR", "TERM", "XCPU"]. ERR and EXIT are always recommended.
EXIT is used to report premature stopping of the job script, and its trap
is unset at the end of the script.
batch_sys.get_poll_many_cmd(job-id-list) => list
* Return a list containing the shell command to poll the jobs in the
argument list.
batch_sys.get_vacation_signal(job_conf) => str
* If relevant, return a string containing the name of the signal that
indicates the job has been vacated by the batch system.
batch_sys.submit(job_file_path) => ret_code, out, err
* Submit a job and return an instance of the Popen object for the
submission. This method is useful if the job submission requires logic
beyond just running a system or shell command. See also
"batch_sys.SUBMIT_CMD".
batch_sys.SHOULD_KILL_PROC_GROUP
* A boolean to indicate whether it is necessary to kill a job by sending
a signal to its Unix process group.
batch_sys.SHOULD_POLL_PROC_GROUP
* A boolean to indicate whether it is necessary to poll a job by its PID
as well as the job ID.
batch_sys.KILL_CMD_TMPL
* A Python string template for getting the batch system command to remove
and terminate a job ID. The command is formed using the logic:
batch_sys.KILL_CMD_TMPL % {"job_id": job_id}
batch_sys.REC_ID_FROM_SUBMIT_ERR
batch_sys.REC_ID_FROM_SUBMIT_OUT
* A regular expression (compiled) to extract the job "id" from the standard
output or standard error of the job submission command.
batch_sys.SUBMIT_CMD_ENV
* A Python dict (or an iterable that can be used to update a dict)
containing extra environment variables for getting the batch system
command to submit a job file.
batch_sys.SUBMIT_CMD_TMPL
* A Python string template for getting the batch system command to submit a
job file. The command is formed using the logic:
batch_sys.SUBMIT_CMD_TMPL % {"job": job_file_path}
See also "batch_sys._job_submit_impl".
"""
import os
import shlex
from shutil import rmtree
from signal import SIGKILL
import stat
from subprocess import Popen, PIPE
import sys
import traceback
from cylc.mkdir_p import mkdir_p
from cylc.task_message import (
CYLC_JOB_PID, CYLC_JOB_INIT_TIME, CYLC_JOB_EXIT_TIME, CYLC_JOB_EXIT,
CYLC_MESSAGE)
from cylc.task_outputs import TASK_OUTPUT_SUCCEEDED
from cylc.task_job_logs import (
JOB_LOG_JOB, JOB_LOG_OUT, JOB_LOG_ERR, JOB_LOG_STATUS)
from cylc.wallclock import get_current_time_string
class JobPollContext(object):
"""Context object for a job poll.
0 ctx.job_log_dir -- cycle/task/submit_num
1 ctx.batch_sys_name -- batch system name
2 ctx.batch_sys_job_id -- job ID in batch system
3 ctx.batch_sys_exit_polled -- 0 for false, 1 for true
4 ctx.run_status -- 0 for success, 1 for failure
5 ctx.run_signal -- signal received on run failure
6 ctx.time_submit_exit -- submit (exit) time
7 ctx.time_run -- run start time
8 ctx.time_run_exit -- run exit time
"""
def __init__(self, job_log_dir):
self.job_log_dir = job_log_dir
self.batch_sys_name = None
self.batch_sys_job_id = None
self.batch_sys_exit_polled = None
self.pid = None
self.run_status = None
self.run_signal = None
self.time_submit_exit = None
self.time_run = None
self.time_run_exit = None
self.messages = []
def get_summary_str(self):
"""Return the poll context as a summary string delimited by "|"."""
items = []
for item in [
self.job_log_dir,
self.batch_sys_name,
self.batch_sys_job_id,
self.batch_sys_exit_polled,
self.run_status,
self.run_signal,
self.time_submit_exit,
self.time_run,
self.time_run_exit]:
if item is None:
items.append("")
else:
items.append(str(item))
return "|".join(items)
class BatchSysManager(object):
"""Job submission, poll and kill.
Manage the importing of job submission method modules.
"""
CYLC_BATCH_SYS_NAME = "CYLC_BATCH_SYS_NAME"
CYLC_BATCH_SYS_JOB_ID = "CYLC_BATCH_SYS_JOB_ID"
CYLC_BATCH_SYS_JOB_SUBMIT_TIME = "CYLC_BATCH_SYS_JOB_SUBMIT_TIME"
CYLC_BATCH_SYS_EXIT_POLLED = "CYLC_BATCH_SYS_EXIT_POLLED"
LINE_PREFIX_CYLC_DIR = "export CYLC_DIR="
LINE_PREFIX_BATCH_SYS_NAME = "# Job submit method: "
LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL = "# Job submit command template: "
LINE_PREFIX_EXECUTION_TIME_LIMIT = "# Execution time limit: "
LINE_PREFIX_EOF = "#EOF: "
LINE_PREFIX_JOB_LOG_DIR = "# Job log directory: "
LINE_UPDATE_CYLC_DIR = (
"# N.B. CYLC_DIR has been updated on the remote host\n")
OUT_PREFIX_COMMAND = "[TASK JOB COMMAND]"
OUT_PREFIX_MESSAGE = "[TASK JOB MESSAGE]"
OUT_PREFIX_SUMMARY = "[TASK JOB SUMMARY]"
OUT_PREFIX_CMD_ERR = "[TASK JOB ERROR]"
_INSTANCES = {}
@classmethod
def configure_suite_run_dir(cls, suite_run_dir):
"""Add local python module paths if not already done."""
for sub_dir in ["python", os.path.join("lib", "python")]:
# TODO - eventually drop the deprecated "python" sub-dir.
suite_py = os.path.join(suite_run_dir, sub_dir)
if os.path.isdir(suite_py) and suite_py not in sys.path:
sys.path.append(suite_py)
def _get_sys(self, batch_sys_name):
"""Return an instance of the class for "batch_sys_name"."""
if batch_sys_name in self._INSTANCES:
return self._INSTANCES[batch_sys_name]
for key in [
"cylc.batch_sys_handlers." + batch_sys_name,
batch_sys_name]:
try:
mod_of_name = __import__(key, fromlist=[key])
self._INSTANCES[batch_sys_name] = getattr(
mod_of_name, "BATCH_SYS_HANDLER")
return self._INSTANCES[batch_sys_name]
except ImportError:
if key == batch_sys_name:
raise
def format_directives(self, job_conf):
"""Format the job directives for a job file, if relevant."""
batch_sys = self._get_sys(job_conf['batch_system_name'])
if hasattr(batch_sys, "format_directives"):
return batch_sys.format_directives(job_conf)
def get_fail_signals(self, job_conf):
"""Return a list of failure signal names to trap in the job file."""
batch_sys = self._get_sys(job_conf['batch_system_name'])
if hasattr(batch_sys, "get_fail_signals"):
return batch_sys.get_fail_signals(job_conf)
return ["EXIT", "ERR", "TERM", "XCPU"]
def get_vacation_signal(self, job_conf):
"""Return the vacation signal name for a job file."""
batch_sys = self._get_sys(job_conf['batch_system_name'])
if hasattr(batch_sys, "get_vacation_signal"):
return batch_sys.get_vacation_signal(job_conf)
def jobs_kill(self, job_log_root, job_log_dirs):
"""Kill multiple jobs.
job_log_root -- The log/job/ sub-directory of the suite.
job_log_dirs -- A list containing point/name/submit_num for task jobs.
"""
# Note: The more efficient way to do this is to group the jobs by their
# batch systems, and call the kill command for each batch system once.
# However, this will make it more difficult to determine if the kill
# command for a particular job is successful or not.
if "$" in job_log_root:
job_log_root = os.path.expandvars(job_log_root)
self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
now = get_current_time_string()
for job_log_dir in job_log_dirs:
ret_code, err = self.job_kill(
os.path.join(job_log_root, job_log_dir, JOB_LOG_STATUS))
sys.stdout.write("%s%s|%s|%d\n" % (
self.OUT_PREFIX_SUMMARY, now, job_log_dir, ret_code))
# Note: Print STDERR to STDOUT may look a bit strange, but it
# requires less logic for the suite to parse the output.
if err.strip():
for line in err.splitlines(True):
if not line.endswith("\n"):
line += "\n"
sys.stdout.write("%s%s|%s|%s" % (
self.OUT_PREFIX_CMD_ERR, now, job_log_dir, line))
def jobs_poll(self, job_log_root, job_log_dirs):
"""Poll multiple jobs.
job_log_root -- The log/job/ sub-directory of the suite.
job_log_dirs -- A list containing point/name/submit_num for task jobs.
"""
if "$" in job_log_root:
job_log_root = os.path.expandvars(job_log_root)
self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
ctx_list = [] # Contexts for all relevant jobs
ctx_list_by_batch_sys = {} # {batch_sys_name1: [ctx1, ...], ...}
for job_log_dir in job_log_dirs:
ctx = self._jobs_poll_status_files(job_log_root, job_log_dir)
if ctx is None:
continue
ctx_list.append(ctx)
if not ctx.batch_sys_name or not ctx.batch_sys_job_id:
# Lost batch system information for some reason.
# Mark the job as if it is no longer in the batch system.
ctx.batch_sys_exit_polled = 1
sys.stderr.write(
"%s/%s: incomplete batch system info\n" % (
ctx.job_log_dir, JOB_LOG_STATUS))
# We can trust:
# * Jobs previously polled to have exited the batch system.
# * Jobs succeeded or failed with ERR/EXIT.
if (ctx.batch_sys_exit_polled or ctx.run_status == 0 or
ctx.run_signal in ["ERR", "EXIT"]):
continue
if ctx.batch_sys_name not in ctx_list_by_batch_sys:
ctx_list_by_batch_sys[ctx.batch_sys_name] = []
ctx_list_by_batch_sys[ctx.batch_sys_name].append(ctx)
for batch_sys_name, my_ctx_list in ctx_list_by_batch_sys.items():
self._jobs_poll_batch_sys(
job_log_root, batch_sys_name, my_ctx_list)
cur_time_str = get_current_time_string()
for ctx in ctx_list:
for message in ctx.messages:
sys.stdout.write("%s%s|%s|%s\n" % (
self.OUT_PREFIX_MESSAGE,
cur_time_str,
ctx.job_log_dir,
message))
sys.stdout.write("%s%s|%s\n" % (
self.OUT_PREFIX_SUMMARY,
cur_time_str,
ctx.get_summary_str()))
def jobs_submit(self, job_log_root, job_log_dirs, remote_mode=False,
utc_mode=False):
"""Submit multiple jobs.
job_log_root -- The log/job/ sub-directory of the suite.
job_log_dirs -- A list containing point/name/submit_num for task jobs.
remote_mode -- am I running on the remote job host?
utc_mode -- is the suite running in UTC mode?
"""
if "$" in job_log_root:
job_log_root = os.path.expandvars(job_log_root)
self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
if remote_mode:
items = self._jobs_submit_prep_by_stdin(job_log_root, job_log_dirs)
else:
items = self._jobs_submit_prep_by_args(job_log_root, job_log_dirs)
now = get_current_time_string(override_use_utc=utc_mode)
for job_log_dir, batch_sys_name, submit_opts in items:
job_file_path = os.path.join(
job_log_root, job_log_dir, JOB_LOG_JOB)
if not batch_sys_name:
sys.stdout.write("%s%s|%s|1|\n" % (
self.OUT_PREFIX_SUMMARY, now, job_log_dir))
continue
ret_code, out, err, job_id = self._job_submit_impl(
job_file_path, batch_sys_name, submit_opts)
sys.stdout.write("%s%s|%s|%d|%s\n" % (
self.OUT_PREFIX_SUMMARY, now, job_log_dir, ret_code, job_id))
for key, value in [("STDERR", err), ("STDOUT", out)]:
if value is None or not value.strip():
continue
for line in value.splitlines(True):
if not value.endswith("\n"):
value += "\n"
sys.stdout.write("%s%s|%s|[%s] %s" % (
self.OUT_PREFIX_COMMAND, now, job_log_dir, key, line))
def job_kill(self, st_file_path):
"""Ask batch system to terminate the job specified in "st_file_path".
Return 0 on success, non-zero integer on failure.
"""
# SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job.status
self.configure_suite_run_dir(st_file_path.rsplit(os.sep, 6)[0])
try:
st_file = open(st_file_path)
for line in st_file:
if line.startswith(self.CYLC_BATCH_SYS_NAME + "="):
batch_sys = self._get_sys(line.strip().split("=", 1)[1])
break
else:
return (1,
"Cannot determine batch system from %s file" % (
JOB_LOG_STATUS))
st_file.seek(0, 0) # rewind
if getattr(batch_sys, "SHOULD_KILL_PROC_GROUP", False):
for line in st_file:
if line.startswith(CYLC_JOB_PID + "="):
pid = line.strip().split("=", 1)[1]
try:
os.killpg(os.getpgid(int(pid)), SIGKILL)
except (OSError, ValueError) as exc:
traceback.print_exc()
return (1, str(exc))
else:
return (0, "")
st_file.seek(0, 0) # rewind
if hasattr(batch_sys, "KILL_CMD_TMPL"):
for line in st_file:
if not line.startswith(self.CYLC_BATCH_SYS_JOB_ID + "="):
continue
job_id = line.strip().split("=", 1)[1]
command = shlex.split(
batch_sys.KILL_CMD_TMPL % {"job_id": job_id})
try:
proc = Popen(
command, stdin=open(os.devnull), stderr=PIPE)
except OSError as exc:
# subprocess.Popen has a bad habit of not setting the
# filename of the executable when it raises an OSError.
if not exc.filename:
exc.filename = command[0]
traceback.print_exc()
return (1, str(exc))
else:
return (proc.wait(), proc.communicate()[1])
return (1, "Cannot determine batch job ID from %s file" % (
JOB_LOG_STATUS))
except IOError as exc:
return (1, str(exc))
@classmethod
def _create_nn(cls, job_file_path):
"""Create NN symbolic link, if necessary.
If NN => 01, remove numbered directories with submit numbers greater
than 01.
Helper for "self._job_submit_impl".
"""
job_file_dir = os.path.dirname(job_file_path)
source = os.path.basename(job_file_dir)
task_log_dir = os.path.dirname(job_file_dir)
nn_path = os.path.join(task_log_dir, "NN")
try:
old_source = os.readlink(nn_path)
except OSError:
old_source = None
if old_source is not None and old_source != source:
os.unlink(nn_path)
old_source = None
if old_source is None:
os.symlink(source, nn_path)
# On submit 1, remove any left over digit directories from prev runs
if source == "01":
for name in os.listdir(task_log_dir):
if name != source and name.isdigit():
# Ignore errors, not disastrous if rmtree fails
rmtree(
os.path.join(task_log_dir, name), ignore_errors=True)
def _filter_submit_output(self, st_file_path, batch_sys, out, err):
"""Filter submit command output, if relevant."""
job_id = None
if hasattr(batch_sys, "REC_ID_FROM_SUBMIT_ERR"):
text = err
rec_id = batch_sys.REC_ID_FROM_SUBMIT_ERR
elif hasattr(batch_sys, "REC_ID_FROM_SUBMIT_OUT"):
text = out
rec_id = batch_sys.REC_ID_FROM_SUBMIT_OUT
if rec_id:
for line in str(text).splitlines():
match = rec_id.match(line)
if match:
job_id = match.group("id")
job_status_file = open(st_file_path, "a")
job_status_file.write("%s=%s\n" % (
self.CYLC_BATCH_SYS_JOB_ID, job_id))
job_status_file.write("%s=%s\n" % (
self.CYLC_BATCH_SYS_JOB_SUBMIT_TIME,
get_current_time_string()))
job_status_file.close()
break
if hasattr(batch_sys, "filter_submit_output"):
out, err = batch_sys.filter_submit_output(out, err)
return out, err, job_id
def _jobs_poll_status_files(self, job_log_root, job_log_dir):
"""Helper 1 for self.jobs_poll(job_log_root, job_log_dirs)."""
ctx = JobPollContext(job_log_dir)
try:
handle = open(os.path.join(
job_log_root, ctx.job_log_dir, JOB_LOG_STATUS))
except IOError as exc:
sys.stderr.write(str(exc) + "\n")
return
for line in handle:
if "=" not in line:
continue
key, value = line.strip().split("=", 1)
if key == self.CYLC_BATCH_SYS_NAME:
ctx.batch_sys_name = value
elif key == self.CYLC_BATCH_SYS_JOB_ID:
ctx.batch_sys_job_id = value
elif key == self.CYLC_BATCH_SYS_EXIT_POLLED:
ctx.batch_sys_exit_polled = 1
elif key == CYLC_JOB_PID:
ctx.pid = value
elif key == self.CYLC_BATCH_SYS_JOB_SUBMIT_TIME:
ctx.time_submit_exit = value
elif key == CYLC_JOB_INIT_TIME:
ctx.time_run = value
elif key == CYLC_JOB_EXIT_TIME:
ctx.time_run_exit = value
elif key == CYLC_JOB_EXIT:
if value == TASK_OUTPUT_SUCCEEDED.upper():
ctx.run_status = 0
else:
ctx.run_status = 1
ctx.run_signal = value
elif key == CYLC_MESSAGE:
ctx.messages.append(value)
handle.close()
return ctx
def _jobs_poll_batch_sys(self, job_log_root, batch_sys_name, my_ctx_list):
"""Helper 2 for self.jobs_poll(job_log_root, job_log_dirs)."""
exp_job_ids = [ctx.batch_sys_job_id for ctx in my_ctx_list]
bad_job_ids = list(exp_job_ids)
exp_pids = []
bad_pids = []
items = [[self._get_sys(batch_sys_name), exp_job_ids, bad_job_ids]]
if getattr(items[0][0], "SHOULD_POLL_PROC_GROUP", False):
exp_pids = [ctx.pid for ctx in my_ctx_list if ctx.pid is not None]
bad_pids.extend(exp_pids)
items.append([self._get_sys("background"), exp_pids, bad_pids])
for batch_sys, exp_ids, bad_ids in items:
if hasattr(batch_sys, "get_poll_many_cmd"):
# Some poll commands may not be as simple
cmd = batch_sys.get_poll_many_cmd(exp_ids)
else: # if hasattr(batch_sys, "POLL_CMD"):
# Simple poll command that takes a list of job IDs
cmd = [batch_sys.POLL_CMD] + exp_ids
try:
proc = Popen(
cmd, stdin=open(os.devnull), stderr=PIPE, stdout=PIPE)
except OSError as exc:
# subprocess.Popen has a bad habit of not setting the
# filename of the executable when it raises an OSError.
if not exc.filename:
exc.filename = cmd[0]
sys.stderr.write(str(exc) + "\n")
return
proc.wait()
out, err = proc.communicate()
sys.stderr.write(err)
if hasattr(batch_sys, "filter_poll_many_output"):
# Allow custom filter
for id_ in batch_sys.filter_poll_many_output(out):
try:
bad_ids.remove(id_)
except ValueError:
pass
else:
# Just about all poll commands return a table, with column 1
# being the job ID. The logic here should be sufficient to
# ensure that any table header is ignored.
for line in out.splitlines():
try:
head = line.split(None, 1)[0]
except IndexError:
continue
if head in exp_ids:
try:
bad_ids.remove(head)
except ValueError:
pass
for ctx in my_ctx_list:
ctx.batch_sys_exit_polled = int(
ctx.batch_sys_job_id in bad_job_ids)
# Exited batch system, but process still running
# This can happen to jobs in some "at" implementation
if (ctx.batch_sys_exit_polled and
ctx.pid in exp_pids and ctx.pid not in bad_pids):
ctx.batch_sys_exit_polled = 0
# Add information to "job.status"
if ctx.batch_sys_exit_polled:
try:
handle = open(os.path.join(
job_log_root, ctx.job_log_dir, JOB_LOG_STATUS), "a")
handle.write("%s=%s\n" % (
self.CYLC_BATCH_SYS_EXIT_POLLED,
get_current_time_string()))
handle.close()
except IOError as exc:
sys.stderr.write(str(exc) + "\n")
def _job_submit_impl(
self, job_file_path, batch_sys_name, submit_opts):
"""Helper for self.jobs_submit() and self.job_submit()."""
# Create NN symbolic link, if necessary
self._create_nn(job_file_path)
for name in JOB_LOG_ERR, JOB_LOG_OUT:
try:
os.unlink(os.path.join(job_file_path, name))
except OSError:
pass
# Start new status file
job_status_file = open(job_file_path + ".status", "w")
job_status_file.write(
"%s=%s\n" % (self.CYLC_BATCH_SYS_NAME, batch_sys_name))
job_status_file.close()
# Submit job
batch_sys = self._get_sys(batch_sys_name)
proc_stdin_arg = None
proc_stdin_value = open(os.devnull)
if hasattr(batch_sys, "get_submit_stdin"):
proc_stdin_arg, proc_stdin_value = batch_sys.get_submit_stdin(
job_file_path, submit_opts)
if hasattr(batch_sys, "submit"):
# batch_sys.submit should handle OSError, if relevant.
ret_code, out, err = batch_sys.submit(job_file_path, submit_opts)
else:
env = None
if hasattr(batch_sys, "SUBMIT_CMD_ENV"):
env = dict(os.environ)
env.update(batch_sys.SUBMIT_CMD_ENV)
batch_submit_cmd_tmpl = submit_opts.get("batch_submit_cmd_tmpl")
if batch_submit_cmd_tmpl:
# No need to catch OSError when using shell. It is unlikely
# that we do not have a shell, and still manage to get as far
# as here.
batch_sys_cmd = batch_submit_cmd_tmpl % {"job": job_file_path}
proc = Popen(
batch_sys_cmd,
stdin=proc_stdin_arg, stdout=PIPE, stderr=PIPE,
shell=True, env=env)
else:
command = shlex.split(
batch_sys.SUBMIT_CMD_TMPL % {"job": job_file_path})
try:
proc = Popen(
command,
stdin=proc_stdin_arg, stdout=PIPE, stderr=PIPE,
env=env)
except OSError as exc:
# subprocess.Popen has a bad habit of not setting the
# filename of the executable when it raises an OSError.
if not exc.filename:
exc.filename = command[0]
return 1, "", str(exc), ""
out, err = proc.communicate(proc_stdin_value)
ret_code = proc.wait()
# Filter submit command output, if relevant
# Get job ID, if possible
job_id = None
if out or err:
try:
out, err, job_id = self._filter_submit_output(
job_file_path + ".status", batch_sys, out, err)
except OSError:
ret_code = 1
self.job_kill(job_file_path + ".status")
return ret_code, out, err, job_id
def _jobs_submit_prep_by_args(self, job_log_root, job_log_dirs):
"""Prepare job files for submit by reading files in arguments.
Job files are specified in the arguments in local mode. Extract job
submission methods and job submission command templates from each job
file.
Return a list, where each element contains something like:
(job_log_dir, batch_sys_name, submit_opts)
"""
items = []
for job_log_dir in job_log_dirs:
job_file_path = os.path.join(job_log_root, job_log_dir, "job")
batch_sys_name = None
submit_opts = {}
for line in open(job_file_path):
if line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
batch_sys_name = line.replace(
self.LINE_PREFIX_BATCH_SYS_NAME, "").strip()
elif line.startswith(self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL):
submit_opts["batch_submit_cmd_tmpl"] = line.replace(
self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
elif line.startswith(self.LINE_PREFIX_EXECUTION_TIME_LIMIT):
submit_opts["execution_time_limit"] = float(line.replace(
self.LINE_PREFIX_EXECUTION_TIME_LIMIT, "").strip())
items.append((job_log_dir, batch_sys_name, submit_opts))
return items
def _jobs_submit_prep_by_stdin(self, job_log_root, job_log_dirs):
"""Prepare job files for submit by reading from STDIN.
Job files are uploaded via STDIN in remote mode. Modify job
files' CYLC_DIR for this host. Extract job submission methods
and job submission command templates from each job file.
Return a list, where each element contains something like:
(job_log_dir, batch_sys_name, submit_opts)
"""
items = [[job_log_dir, None, {}] for job_log_dir in job_log_dirs]
items_map = {}
for item in items:
items_map[item[0]] = item
handle = None
batch_sys_name = None
submit_opts = {}
job_log_dir = None
lines = []
# Get job files from STDIN.
# Modify CYLC_DIR in job file, if necessary.
# Get batch system name and batch submit command template from each job
# file.
# Write job file in correct location.
while True: # Note: "for cur_line in sys.stdin:" may hang
cur_line = sys.stdin.readline()
if not cur_line:
if handle is not None:
handle.close()
break
if cur_line.startswith(self.LINE_PREFIX_CYLC_DIR):
old_line = cur_line
cur_line = "%s'%s'\n" % (
self.LINE_PREFIX_CYLC_DIR, os.environ["CYLC_DIR"])
if old_line != cur_line:
lines.append(self.LINE_UPDATE_CYLC_DIR)
elif cur_line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
batch_sys_name = cur_line.replace(
self.LINE_PREFIX_BATCH_SYS_NAME, "").strip()
elif cur_line.startswith(self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL):
submit_opts["batch_submit_cmd_tmpl"] = cur_line.replace(
self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
elif cur_line.startswith(self.LINE_PREFIX_EXECUTION_TIME_LIMIT):
submit_opts["execution_time_limit"] = float(cur_line.replace(
self.LINE_PREFIX_EXECUTION_TIME_LIMIT, "").strip())
elif cur_line.startswith(self.LINE_PREFIX_JOB_LOG_DIR):
job_log_dir = cur_line.replace(
self.LINE_PREFIX_JOB_LOG_DIR, "").strip()
mkdir_p(os.path.join(job_log_root, job_log_dir))
handle = open(
os.path.join(job_log_root, job_log_dir, "job.tmp"), "wb")
if handle is None:
lines.append(cur_line)
else:
for line in lines + [cur_line]:
handle.write(line)
lines = []
if cur_line.startswith(self.LINE_PREFIX_EOF + job_log_dir):
handle.close()
# Make it executable
os.chmod(handle.name, (
os.stat(handle.name).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
# Rename from "*/job.tmp" to "*/job"
os.rename(handle.name, handle.name[:-4])
try:
items_map[job_log_dir][1] = batch_sys_name
items_map[job_log_dir][2] = submit_opts
except KeyError:
pass
handle = None
job_log_dir = None
batch_sys_name = None
submit_opts = {}
return items
| gpl-3.0 | 4,543,488,674,901,808,600 | 42.232984 | 79 | 0.550893 | false |
jhseu/tensorflow | tensorflow/python/kernel_tests/random/stateless_random_ops_test.py | 1 | 7104 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateless random ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops as stateless
from tensorflow.python.platform import test
def invert_philox(key, value):
"""Invert the Philox bijection."""
key = np.array(key, dtype=np.uint32)
value = np.array(value, dtype=np.uint32)
step = np.array([0x9E3779B9, 0xBB67AE85], dtype=np.uint32)
for n in range(10)[::-1]:
key0, key1 = key + n * step
v0 = value[3] * 0x991a7cdb & 0xffffffff
v2 = value[1] * 0x6d7cae67 & 0xffffffff
hi0 = v0 * 0xD2511F53 >> 32
hi1 = v2 * 0xCD9E8D57 >> 32
v1 = hi1 ^ value[0] ^ key0
v3 = hi0 ^ value[2] ^ key1
value = v0, v1, v2, v3
return np.array(value)
class StatelessOpsTest(test.TestCase):
def _test_match(self, cases):
# Stateless ops should be the same as stateful ops on the first call
# after seed scrambling.
cases = tuple(cases)
key = 0x3ec8f720, 0x02461e29
for seed in (7, 17), (11, 5), (2, 3):
preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64)
preseed = preseed[::2] | preseed[1::2] << 32
random_seed.set_random_seed(seed[0])
with test_util.use_gpu():
for stateless_op, stateful_op in cases:
stateful = stateful_op(seed=seed[1])
pure = stateless_op(seed=preseed)
self.assertAllEqual(self.evaluate(stateful), self.evaluate(pure))
def _test_determinism(self, cases):
# Stateless values should be equal iff the seeds are equal (roughly)
cases = tuple(cases)
with self.test_session(use_gpu=True):
for seed_type in [dtypes.int32, dtypes.int64]:
seed_t = array_ops.placeholder(seed_type, shape=[2])
seeds = [(x, y) for x in range(5) for y in range(5)] * 3
for stateless_op, _ in cases:
pure = stateless_op(seed=seed_t)
values = [
(seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds
]
for s0, v0 in values:
for s1, v1 in values:
self.assertEqual(s0 == s1, np.all(v0 == v1))
def _float_cases(self, shape_dtypes=(None,)):
float_cases = (
# Uniform distribution, with and without range
(stateless.stateless_random_uniform, random_ops.random_uniform, {}),
(stateless.stateless_random_uniform, random_ops.random_uniform,
dict(minval=2.2, maxval=7.1)),
# Normal distribution, with and without mean+stddev
(stateless.stateless_random_normal, random_ops.random_normal, {}),
(stateless.stateless_random_normal, random_ops.random_normal,
dict(mean=2, stddev=3)),
# Truncated normal distribution, with and without mean+stddev
(stateless.stateless_truncated_normal, random_ops.truncated_normal, {}),
(stateless.stateless_truncated_normal, random_ops.truncated_normal,
dict(mean=3, stddev=4)),
)
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
if shape_dtype is not None:
shape = constant_op.constant(shape, dtype=shape_dtype)
for stateless_op, stateful_op, kwds in float_cases:
kwds = dict(shape=shape, dtype=dtype, **kwds)
yield (functools.partial(stateless_op, **kwds),
functools.partial(stateful_op, **kwds))
def _int_cases(self, shape_dtypes=(None,)):
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
if shape_dtype is not None:
shape = constant_op.constant(shape, dtype=shape_dtype)
for dtype in dtypes.int32, dtypes.int64:
kwds = dict(minval=2, maxval=11111, dtype=dtype, shape=shape)
yield (functools.partial(stateless.stateless_random_uniform, **kwds),
functools.partial(random_ops.random_uniform, **kwds))
def _multinomial_cases(self):
num_samples = 10
for logits_dtype in np.float16, np.float32, np.float64:
for output_dtype in dtypes.int32, dtypes.int64:
for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],
[0.25, 0.75]]):
kwds = dict(
logits=constant_op.constant(logits, dtype=logits_dtype),
num_samples=num_samples,
output_dtype=output_dtype)
yield (functools.partial(stateless.stateless_multinomial, **kwds),
functools.partial(random_ops.multinomial, **kwds))
def _gamma_cases(self):
for dtype in np.float16, np.float32, np.float64:
for alpha in ([[.5, 1., 2.]], [[0.5, 0.5], [0.8, 0.2], [0.25, 0.75]]):
kwds = dict(alpha=constant_op.constant(alpha, dtype=dtype), dtype=dtype)
yield (functools.partial(
stateless.stateless_random_gamma,
shape=(10,) + tuple(np.shape(alpha)),
**kwds),
functools.partial(random_ops.random_gamma, shape=(10,), **kwds))
@test_util.run_deprecated_v1
def testMatchFloat(self):
self._test_match(self._float_cases())
@test_util.run_deprecated_v1
def testMatchInt(self):
self._test_match(self._int_cases())
@test_util.run_deprecated_v1
def testMatchMultinomial(self):
self._test_match(self._multinomial_cases())
@test_util.run_deprecated_v1
def testMatchGamma(self):
self._test_match(self._gamma_cases())
@test_util.run_deprecated_v1
def testDeterminismFloat(self):
self._test_determinism(
self._float_cases(shape_dtypes=(dtypes.int32, dtypes.int64)))
@test_util.run_deprecated_v1
def testDeterminismInt(self):
self._test_determinism(
self._int_cases(shape_dtypes=(dtypes.int32, dtypes.int64)))
@test_util.run_deprecated_v1
def testDeterminismMultinomial(self):
self._test_determinism(self._multinomial_cases())
@test_util.run_deprecated_v1
def testDeterminismGamma(self):
self._test_determinism(self._gamma_cases())
if __name__ == '__main__':
test.main()
| apache-2.0 | -5,237,965,412,197,628,000 | 38.910112 | 80 | 0.642596 | false |
polysquare/polysquare-ci-scripts | setup.py | 1 | 1485 | # /setup.py
#
# Installation and setup script for polysquare-ci-scripts
#
# See /LICENCE.md for Copyright information
"""Installation and setup script for polysquare-ci-scripts."""
from setuptools import (find_packages, setup)
setup(name="polysquare-ci-scripts",
version="0.0.1",
description="Polysquare Continuous Integration Scripts",
long_description_markdown_filename="README.md",
author="Sam Spilsbury",
author_email="[email protected]",
url="http://github.com/polysquare/polysquare-ci-scripts",
classifiers=["Development Status :: 3 - Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License"],
license="MIT",
keywords="development linters",
packages=find_packages(exclude=["test"]),
requires=[
"setuptools"
],
extras_require={
"upload": ["setuptools-markdown>=0.1"]
},
zip_safe=True,
include_package_data=True)
| mit | -5,243,586,838,271,640,000 | 38.078947 | 66 | 0.577778 | false |
aonotas/chainer | chainer/links/model/classifier.py | 1 | 4579 | from chainer.functions.evaluation import accuracy
from chainer.functions.loss import softmax_cross_entropy
from chainer import link
from chainer import reporter
class Classifier(link.Chain):
"""A simple classifier model.
This is an example of chain that wraps another chain. It computes the
loss and accuracy based on a given input/label pair.
Args:
predictor (~chainer.Link): Predictor network.
lossfun (function): Loss function.
accfun (function): Function that computes accuracy.
label_key (int or str): Key to specify label variable from arguments.
When it is ``int``, a variable in positional arguments is used.
And when it is ``str``, a variable in keyword arguments is used.
Attributes:
predictor (~chainer.Link): Predictor network.
lossfun (function): Loss function.
accfun (function): Function that computes accuracy.
y (~chainer.Variable): Prediction for the last minibatch.
loss (~chainer.Variable): Loss value for the last minibatch.
accuracy (~chainer.Variable): Accuracy for the last minibatch.
compute_accuracy (bool): If ``True``, compute accuracy on the forward
computation. The default value is ``True``.
.. note::
This link uses :func:`chainer.softmax_cross_entropy` with
default arguments as a loss function (specified by ``lossfun``),
if users do not explicitly change it. In particular, the loss function
does not support double backpropagation.
If you need second or higher order differentiation, you need to turn
it on with ``enable_double_backprop=True``:
>>> import chainer.functions as F
>>> import chainer.links as L
>>>
>>> def lossfun(x, t):
... return F.softmax_cross_entropy(
... x, t, enable_double_backprop=True)
>>>
>>> predictor = L.Linear(10)
>>> model = L.Classifier(predictor, lossfun=lossfun)
"""
compute_accuracy = True
def __init__(self, predictor,
lossfun=softmax_cross_entropy.softmax_cross_entropy,
accfun=accuracy.accuracy,
label_key=-1):
if not (isinstance(label_key, (int, str))):
raise TypeError('label_key must be int or str, but is %s' %
type(label_key))
super(Classifier, self).__init__()
self.lossfun = lossfun
self.accfun = accfun
self.y = None
self.loss = None
self.accuracy = None
self.label_key = label_key
with self.init_scope():
self.predictor = predictor
def __call__(self, *args, **kwargs):
"""Computes the loss value for an input and label pair.
It also computes accuracy and stores it to the attribute.
Args:
args (list of ~chainer.Variable): Input minibatch.
kwargs (dict of ~chainer.Variable): Input minibatch.
When ``label_key`` is ``int``, the correpoding element in ``args``
is treated as ground truth labels. And when it is ``str``, the
element in ``kwargs`` is used.
The all elements of ``args`` and ``kwargs`` except the ground trush
labels are features.
It feeds features to the predictor and compare the result
with ground truth labels.
Returns:
~chainer.Variable: Loss value.
"""
if isinstance(self.label_key, int):
if not (-len(args) <= self.label_key < len(args)):
msg = 'Label key %d is out of bounds' % self.label_key
raise ValueError(msg)
t = args[self.label_key]
if self.label_key == -1:
args = args[:-1]
else:
args = args[:self.label_key] + args[self.label_key + 1:]
elif isinstance(self.label_key, str):
if self.label_key not in kwargs:
msg = 'Label key "%s" is not found' % self.label_key
raise ValueError(msg)
t = kwargs[self.label_key]
del kwargs[self.label_key]
self.y = None
self.loss = None
self.accuracy = None
self.y = self.predictor(*args, **kwargs)
self.loss = self.lossfun(self.y, t)
reporter.report({'loss': self.loss}, self)
if self.compute_accuracy:
self.accuracy = self.accfun(self.y, t)
reporter.report({'accuracy': self.accuracy}, self)
return self.loss
| mit | 4,334,894,875,054,640,600 | 37.158333 | 78 | 0.591832 | false |
voc/voctomix | vocto/composites.py | 1 | 12816 | #!/usr/bin/env python3
# for debug logging
import logging
# use Frame
from vocto.frame import Frame, X, Y, L, T, R, B
# for cloning objects
import copy
# for parsing configuration items
import re
log = logging.getLogger('Composites')
class Composites:
""" a namespace for composite related methods
"""
def configure(cfg, size, add_swap=True):
""" read INI like configuration from <cfg> and return all the defined
composites. <size> is the overall frame size which all proportional
(floating point) coordinates are related to.
"""
# prepare resulting composites dictonary
composites = dict()
# walk through composites configuration
for c_name, c_val in cfg:
if '.' not in c_name:
raise RuntimeError("syntax error in composite config '{}' "
"(must be: 'name.attribute')"
.format(c_name))
# split name into name and attribute
name, attr = c_name.lower().rsplit('.', 1)
if name not in composites:
# add new composite
composites[name] = Composite(len(composites), name)
try:
# set attribute
composites[name].config(attr, c_val, size)
except RuntimeError as err:
raise RuntimeError(
"syntax error in composite config value at '{}':\n{}"
.format(name, err))
add_mirrored_composites(composites)
if add_swap:
# add any useful swapped targets
add_swapped_targets(composites)
return composites
def targets(composites):
""" return a list of all composites that are not intermediate
"""
result = []
for c_name, c in composites.items():
if not c.inter:
result.append(c)
return sorted(result, key=lambda c: c.order)
def intermediates(composites):
""" return a list of all composites that are intermediate
"""
result = []
for c_name, c in composites.items():
if c.inter:
result.append(c)
return sorted(result, key=lambda c: c.order)
class Composite:
def __init__(self, order, name, a=Frame(True), b=Frame(True)):
assert type(order) is int or order is None
assert type(name) is str or not name
self.name = name
self.frame = [copy.deepcopy(a), copy.deepcopy(b)]
self.default = [None, None]
self.inter = False
self.noswap = False
self.mirror = False
self.order = order
def str_title():
return "Key A%s\tB%s Name" % (Frame.str_title(), Frame.str_title())
def __str__(self):
def hidden( x, hidden ):
return str(x).replace(' ','_') if hidden else str(x)
return "%s A%s\tB%s %s" % (" * " if self.A().key else " ",
hidden(self.A(), self.A().invisible() or self.covered()),
hidden(self.B(), self.B().invisible()),
self.name)
def equals(self, other, treat_covered_as_invisible, swapped=False):
""" compare two composites if they are looking the same
(e.g. a rectangle with size 0x0=looks the same as one with alpha=0
and so it is treated as equal here)
"""
if not swapped:
if not (self.A() == other.A() or (treat_covered_as_invisible and self.covered() and other.covered())):
return False
elif not (self.B() == other.B() or (self.B().invisible() and other.B().invisible())):
return False
else:
if not (self.A() == other.B() or (treat_covered_as_invisible and self.covered() and other.B().invisible())):
return False
elif not (self.B() == other.A() or (self.B().invisible() and other.covered())):
return False
return True
def A(self):
return self.frame[0]
def B(self):
return self.frame[1]
def Az(self, zorder):
frame = copy.deepcopy(self.frame[0])
frame.zorder = zorder
return frame
def Bz(self, zorder):
frame = copy.deepcopy(self.frame[1])
frame.zorder = zorder
return frame
def swapped(self):
""" swap A and B source items
"""
if self.noswap:
return self
else:
# deep copy everything
s = copy.deepcopy(self)
# then swap frames
s.frame = self.frame[::-1]
s.name = swap_name(self.name)
return s
def mirrored(self):
""" mirror A and B source items
"""
# deep copy everything
s = copy.copy(self)
# then mirror frames
s.frame = [f.mirrored() for f in self.frame]
s.name = mirror_name(self.name)
return s
def key(self):
for f in self.frame:
if f.key:
return True
return False
def config(self, attr, value, size):
""" set value <value> from INI attribute <attr>.
<size> is the input channel size
"""
if attr == 'a':
self.frame[0].rect = str2rect(value, size)
elif attr == 'b':
self.frame[1].rect = str2rect(value, size)
elif attr == 'crop-a':
self.frame[0].crop = str2crop(value, size)
elif attr == 'crop-b':
self.frame[1].crop = str2crop(value, size)
elif attr == 'default-a':
self.default[0] = value
elif attr == 'default-b':
self.default[1] = value
elif attr == 'alpha-a':
self.frame[0].alpha = str2alpha(value)
elif attr == 'alpha-b':
self.frame[1].alpha = str2alpha(value)
elif attr == 'inter':
self.inter = value
elif attr == 'noswap':
self.noswap = value
elif attr == 'mirror':
self.mirror = value
self.frame[0].original_size = size
self.frame[1].original_size = size
def covered(self):
""" check if below (A) is invisible or covered by above (B)
(considers shape with cropping and transparency)
"""
below, above = self.frame
if below.invisible():
return True
if above.invisible():
return False
bc = below.cropped()
ac = above.cropped()
# return if above is (semi-)transparent or covers below completely
return (above.alpha == 255 and
bc[L] >= ac[L] and
bc[T] >= ac[T] and
bc[R] <= ac[R] and
bc[B] <= ac[B])
def single(self):
""" check if above (B) is invisible
"""
below, above = self.frame
return above.invisible()
def both(self):
return not (single() or covered())
def add_swapped_targets(composites):
result = dict()
for c_name, c in composites.items():
if not (c.inter or c.noswap):
inc = True
for v_name, v in composites.items():
if v.equals(c.swapped(), True) and not v.inter:
inc = False
break
if inc:
log.debug("Adding auto-swapped target %s from %s" %
(swap_name(c_name), c_name))
r = c.swapped()
r.order = len(composites) + len(result)
result[swap_name(c_name)] = r
return composites.update(result)
def add_mirrored_composites(composites):
result = dict()
for c_name, c in composites.items():
if c.mirror:
r = c.mirrored()
r.order = len(composites) + len(result)
result[mirror_name(c_name)] = r
return composites.update(result)
def swap_name(name): return name[1:] if name[0] == '^' else "^" + name
def mirror_name(name): return name[1:] if name[0] == '|' else "|" + name
def absolute(str, max):
if str == '*':
assert max
# return maximum value
return int(max)
elif '.' in str:
assert max
# return absolute (Pixel) value in proportion to max
return int(float(str) * max)
else:
# return absolute (Pixel) value
return int(str)
def str2rect(str, size):
""" read rectangle pair from string '*', 'X/Y WxH', 'X/Y', 'WxH', 'X/Y WH', 'X/Y WH' or 'XY WH'
"""
# check for '*'
if str == "*":
# return overall position and size
return [0, 0, size[X], size[Y]]
# check for 'X/Y'
r = re.match(r'^\s*([-.\d]+)\s*/\s*([-.\d]+)\s*$', str)
if r:
# return X,Y and overall size
return [absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y]),
size[X],
size[Y]]
# check for 'WxH'
r = re.match(r'^\s*([.\d]+)\s*x\s*([.\d]+)\s*$', str)
if r:
# return overall pos and W,H
return [0,
0,
absolute(r.group(3), size[X]),
absolute(r.group(4), size[Y])]
# check for 'X/Y WxH'
r = re.match(
r'^\s*([-.\d]+)\s*/\s*([-.\d]+)\s+([.\d]+)\s*x\s*([.\d]+)\s*$', str)
if r:
# return X,Y,X+W,Y+H
return [absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y]),
absolute(r.group(1), size[X]) + absolute(r.group(3), size[X]),
absolute(r.group(2), size[Y]) + absolute(r.group(4), size[Y])]
# check for 'XY WxH'
r = re.match(r'^\s*(-?\d+.\d+)\s+([.\d]+)\s*x\s*([.\d]+)\s*$', str)
if r:
# return XY,XY,XY+W,XY+H
return [absolute(r.group(1), size[X]),
absolute(r.group(1), size[Y]),
absolute(r.group(1), size[X]) + absolute(r.group(2), size[X]),
absolute(r.group(1), size[Y]) + absolute(r.group(3), size[Y])]
# check for 'X/Y WH'
r = re.match(r'^\s*([-.\d]+)\s*/\s*([-.\d]+)\s+(\d+.\d+)\s*$', str)
if r:
# return X,Y,X+WH,Y+WH
return [absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y]),
absolute(r.group(1), size[X]) + absolute(r.group(3), size[X]),
absolute(r.group(2), size[Y]) + absolute(r.group(3), size[Y])]
# check for 'XY WH'
r = re.match(r'^\s*(-?\d+.\d+)\s+(\d+.\d+)\s*$', str)
if r:
# return XY,XY,XY+WH,XY+WH
return [absolute(r.group(1), size[X]),
absolute(r.group(1), size[Y]),
absolute(r.group(1), size[X]) + absolute(r.group(2), size[X]),
absolute(r.group(1), size[Y]) + absolute(r.group(2), size[Y])]
# didn't get it
raise RuntimeError("syntax error in rectangle value '{}' "
"(must be either '*', 'X/Y WxH', 'X/Y', 'WxH', 'X/Y WH', 'X/Y WH' or 'XY WH' where X, Y, W, H may be int or float and XY, WH must be float)".format(str))
def str2crop(str, size):
""" read crop values pair from string '*' or 'L/T/R/B'
"""
# check for '*'
if str == "*":
# return zero borders
return [0, 0, 0, 0]
# check for L/T/R/B
r = re.match(
r'^\s*([.\d]+)\s*/\s*([.\d]+)\s*/\s*([.\d]+)\s*/\s*([.\d]+)\s*$', str)
if r:
return [absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y]),
absolute(r.group(3), size[X]),
absolute(r.group(4), size[Y])]
# check for LR/TB
r = re.match(
r'^\s*([.\d]+)\s*/\s*([.\d]+)\s*$', str)
if r:
return [absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y]),
absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y])]
# check for LTRB
r = re.match(
r'^\s*([.\d]+)\s*$', str)
if r:
return [absolute(r.group(1), size[X]),
absolute(r.group(1), size[Y]),
absolute(r.group(1), size[X]),
absolute(r.group(1), size[Y])]
# didn't get it
raise RuntimeError("syntax error in crop value '{}' "
"(must be either '*', 'L/T/R/B', 'LR/TB', 'LTRB' where L, T, R, B, LR/TB and LTRB must be int or float')".format(str))
def str2alpha(str):
""" read alpha values from string as float between 0.0 and 1.0 or as int between 0 an 255
"""
# check for floating point value
r = re.match(
r'^\s*([.\d]+)\s*$', str)
if r:
# return absolute proportional to 255
return absolute(r.group(1), 255)
# didn't get it
raise RuntimeError("syntax error in alpha value '{}' "
"(must be float or int)".format(str))
| mit | -1,553,340,667,530,149,400 | 33.920981 | 176 | 0.504448 | false |
mahmoud/wapiti | wapiti/operations/utils.py | 1 | 12249 | # -*- coding: utf-8 -*-
import sys
from heapq import heappush, heappop
import itertools
from functools import total_ordering
def is_scalar(obj):
return not hasattr(obj, '__iter__') or isinstance(obj, basestring)
def prefixed(arg, prefix=None):
if prefix and not arg.startswith(prefix):
arg = prefix + arg
return arg
@total_ordering
class MaxInt(long):
"""
A quite-large integer type that tries to be like float('inf')
(Infinity), but can be used for slicing and other integer
operations. float('inf') is generally more correct, except that
mixing a float and integer in arithmetic operations will result in
a float, which will raise an error on slicing.
"""
def __new__(cls, *a, **kw):
return super(MaxInt, cls).__new__(cls, sys.maxint + 1)
def __init__(self, name='MAX'):
self._name = str(name)
def __repr__(self):
return self._name
def __str__(self):
return repr(self)
# TODO: better math
for func in ('__add__', '__sub__', '__mul__', '__floordiv__', '__div__',
'__mod__', '__divmod__', '__pow__', '__lshift__',
'__rshift__'):
locals()[func] = lambda self, other: self
def __gt__(self, other):
return not self == other
def __eq__(self, other):
return isinstance(other, MaxInt)
def __int__(self):
return self
class OperationExample(object):
"""
Sort of like a partial, but specialer.
# other types of tests?
"""
def __init__(self,
param=None,
limit=None,
op_type=None,
**kw):
self.op_type = op_type
self.param = param
self.limit = limit
self.doc = kw.pop('doc', '')
self.test = kw.pop('test', None)
# test defaults to limit_equal_or_depleted in test_ops.py
if kw:
raise TypeError('got unexpected keyword arguments: %r' % kw)
@property
def op_name(self):
if self.op_type is None:
return None
return self.op_type.__name__
@property
def disp_name(self):
if not self.op_type:
return '(unbound OperationExample)'
tmpl = '%(type)s(%(param)r, limit=%(limit)s)'
if self.op_type.input_field is None:
tmpl = '%(type)s(limit=%(limit)s)'
return tmpl % {'type': self.op_type.__name__,
'param': self.param,
'limit': self.limit}
def bind_op_type(self, op_type):
if self.op_type is None:
self.op_type = op_type
if self.limit is None:
try:
pql = op_type.per_query_limit
except AttributeError:
pql = op_type.subop_chain[0].per_query_limit
self.limit = pql.get_limit()
return
def make_op(self, mag=None):
if not self.op_type:
raise TypeError('no Operation type assigned')
mag = int(mag or 1)
limit = self.limit * mag
if self.op_type.input_field is None:
return self.op_type(limit=limit)
return self.op_type(self.param, limit=limit)
def __repr__(self):
cn = self.__class__.__name__
kwargs = ['param', 'limit', 'test', 'doc']
kw_parts = ['op_type=%s' % self.op_name]
vals = [getattr(self, a) for a in kwargs if getattr(self, a)]
kw_parts.extend(['%s=%r' % (a, v) for a, v in zip(kwargs, vals)])
kwarg_str = ', '.join(kw_parts)
return '%s(%s)' % (cn, kwarg_str)
__str__ = __repr__
"""
TypeWrapper and MetaTypeWrapper are a pair of what are technically
metaclasses, but really just a very overwrought way of enabling
customized versions of types floating around in some
locations. Because Wapiti is a DSL, but also just a bunch of Python,
we have to deal with the fact that if you modify a type/class, it will
be modified everywhere that references it.
TL;DR: This overblown thing lets Operations use something like
Prioritized(GetCategory, key='total_count'), which sets a priority for
better queueing, without modifying the GetCategory Operation
itself. (Different operations will want to prioritiez different
things.)
(There is almost certainly a better way, but this was a bit of
fun. Ever made an object that is an instance and a subclass of
itself?)
"""
def make_type_wrapper(name, init_args=None):
init_args = init_args or []
args, defaults = [], {}
for ia in init_args:
try:
arg, _default = ia
defaults[arg] = _default
except ValueError:
arg = ia
if not isinstance(arg, basestring):
raise TypeError('expected string arg name, not %r' % arg)
args.append(arg)
attrs = {'_args': args, '_defaults': defaults}
return WrapperType(str(name), (Wrapper,), attrs)
class WrapperType(type):
@property
def _repr_args(self):
ret = []
for a in self._args:
try:
ret.append((a, self._defaults[a]))
except KeyError:
ret.append(a)
return ret
def __repr__(cls):
name, cname = cls.__name__, cls.__class__.__name__
if cls._repr_args:
return '%s(%r, %r)' % (cname, name, cls._repr_args)
else:
return '%s(%r)' % (cname, name)
class Wrapper(object):
__metaclass__ = WrapperType
_args, _defaults = [], {}
def __init__(self, to_wrap, *args, **kwargs):
wrapped_dict = {}
if isinstance(to_wrap, Wrapper):
wrapped_dict = dict(to_wrap._wrapped_dict)
to_wrap = to_wrap._wrapped
self.__dict__['_wrapped'] = to_wrap
self.__dict__['_wrapped_dict'] = wrapped_dict
cn = self.__name__
for arg_i, arg_name in enumerate(self._args):
try:
val = args[arg_i]
if arg_name in kwargs:
raise TypeError('%s got multiple values for arg %r'
% (cn, arg_name))
except IndexError:
try:
val = kwargs.pop(arg_name)
except KeyError:
try:
val = self._defaults[arg_name]
except KeyError:
raise TypeError('%s expected required arg %r'
% (cn, arg_name))
setattr(self, arg_name, val)
return
def __repr__(self):
kv = ', '.join(['%s=%r' % (k, v) for k, v
in self._wrapped_dict.items()])
tmpl = "<wrapped %r (%s)>"
return tmpl % (self._wrapped, kv)
def __getattr__(self, name):
return getattr(self._wrapped, name)
def __setattr__(self, name, val):
super(Wrapper, self).__setattr__(name, val)
self._wrapped_dict[name] = val
def __delattr__(self, name, val):
super(Wrapper, self).__delattr__(name, val)
self._wrapped_dict.pop(name, None)
def __call__(self, *a, **kw):
return self._wrapped(*a, **kw)
REMOVED = '<removed-task>'
class PriorityQueue(object):
"""
Real quick type based on the heapq docs.
"""
def __init__(self):
self._pq = []
self._entry_map = {}
self.counter = itertools.count()
def add(self, task, priority=None):
# larger numbers = higher priority
priority = -int(priority or 0)
if task in self._entry_map:
self.remove_task(task)
count = next(self.counter)
entry = [priority, count, task]
self._entry_map[task] = entry
heappush(self._pq, entry)
def remove(self, task):
entry = self._entry_map.pop(task)
entry[-1] = REMOVED
def _cull(self):
while self._pq:
priority, count, task = self._pq[0]
if task is REMOVED:
heappop(self._pq)
continue
return
raise IndexError('empty priority queue')
def peek(self, default=REMOVED):
try:
self._cull()
_, _, task = self._pq[0]
except IndexError:
if default is not REMOVED:
return default
raise IndexError('peek on empty queue')
return task
def pop(self, default=REMOVED):
try:
self._cull()
_, _, task = heappop(self._pq)
del self._entry_map[task]
except IndexError:
if default is not REMOVED:
return default
raise IndexError('pop on empty queue')
return task
def __len__(self):
return len(self._entry_map)
def chunked_iter(src, size, **kw):
"""
Generates 'size'-sized chunks from 'src' iterable. Unless
the optional 'fill' keyword argument is provided, iterables
not even divisible by 'size' will have a final chunk that is
smaller than 'size'.
Note that fill=None will in fact use None as the fill value.
>>> list(chunked_iter(range(10), 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(chunked_iter(range(10), 3, fill=None))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
"""
size = int(size)
if size <= 0:
raise ValueError('expected a positive integer chunk size')
do_fill = True
try:
fill_val = kw.pop('fill')
except KeyError:
do_fill = False
fill_val = None
if kw:
raise ValueError('got unexpected keyword arguments: %r' % kw.keys())
if not src:
return
cur_chunk = []
i = 0
for item in src:
cur_chunk.append(item)
i += 1
if i % size == 0:
yield cur_chunk
cur_chunk = []
if cur_chunk:
if do_fill:
lc = len(cur_chunk)
cur_chunk[lc:] = [fill_val] * (size - lc)
yield cur_chunk
return
# From http://en.wikipedia.org/wiki/Wikipedia:Namespace
NAMESPACES = {
'Main': 0,
'Talk': 1,
'User': 2,
'User talk': 3,
'Wikipedia': 4,
'Wikipedia talk': 5,
'File': 6,
'File talk': 7,
'MediaWiki': 8,
'MediaWiki talk': 9,
'Template': 10,
'Template talk': 11,
'Help': 12,
'Help talk': 13,
'Category': 14,
'Category talk': 15,
'Portal': 100,
'Portal talk': 101,
'Book': 108,
'Book talk': 109,
'Special': -1,
'Media': -2}
def bucketize(src, keyfunc=None):
"""
Group values in 'src' iterable by value returned by 'keyfunc'.
keyfunc defaults to bool, which will group the values by
truthiness; at most there will be two keys, True and False, and
each key will have a list with at least one item.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if keyfunc is None:
keyfunc = bool
if not callable(keyfunc):
raise TypeError('expected callable key function')
ret = {}
for val in src:
key = keyfunc(val)
ret.setdefault(key, []).append(val)
return ret
def bucketize_bool(src, keyfunc=None):
"""
Like bucketize, but for added convenience returns a tuple of
(truthy_values, falsy_values).
>>> nonempty, empty = bucketize_bool(['', '', 'hi', '', 'bye'])
>>> nonempty
['hi', 'bye']
keyfunc defaults to bool, but can be carefully overridden to
use any function that returns either True or False.
>>> import string
>>> is_digit = lambda x: x in string.digits
>>> decimal_digits, hexletters = bucketize_bool(string.hexdigits, is_digit)
>>> ''.join(decimal_digits), ''.join(hexletters)
('0123456789', 'abcdefABCDEF')
"""
bucketized = bucketize(src, keyfunc)
return bucketized.get(True, []), bucketized.get(False, [])
def coerce_namespace(ns_arg):
ns_str = str(ns_arg).capitalize()
return NAMESPACES.get(ns_str, ns_str)
| bsd-3-clause | 6,357,164,023,130,465,000 | 28.236277 | 79 | 0.5478 | false |
monuszko/django-anothercrm | anothercrm/models.py | 1 | 5607 | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Person(models.Model):
SEX_CHOICES = (
('M', 'Male'),
('F', 'Female'),
)
#TODO: validators for name, mobile...
firstname = models.CharField(max_length=30)
lastname = models.CharField(max_length=30)
sex = models.CharField(max_length=1, choices=SEX_CHOICES)
email = models.EmailField(
max_length=200, verbose_name=_('Email address'), blank=True)
mobile = models.CharField(
max_length=20, verbose_name=_('Mobile Phone Number'), blank=True)
address = models.CharField(max_length=100, verbose_name=_('Address'),
help_text=_('24 Badger Rd., etc.'), blank=True)
zipcode = models.CharField(max_length=10, verbose_name=_('Postal code'),
help_text=_("For example, '80-209' in Poland"), blank=True)
city = models.CharField(max_length=100, verbose_name=_('City'), blank=True)
state = models.CharField(
max_length=100, verbose_name=_('State'), blank=True)
country = models.CharField(
max_length=2, verbose_name=_('Country'), blank=True)
creation_date = models.DateTimeField(
verbose_name=_('Creation Date'), auto_now_add=True)
modification_date = models.DateTimeField(
verbose_name=_('Modification Date'), auto_now=True)
def get_absolute_url(self):
from django.core.urlresolvers import reverse
from django.utils.text import slugify
fname = slugify(self.firstname)
lname = slugify(self.lastname)
kwargs = {
'firstname': fname,
'lastname': lname,
'pk': self.id,
}
return reverse('anothercrm:person', kwargs=kwargs)
def __unicode__(self):
return u'{0} {1}'.format(self.firstname, self.lastname)
def employee_count(self):
'''
Returns the number of relationships where the person
is employed at a company.
'''
return self.relationship_set.filter(relatype__category='E').count()
def client_count(self):
'''
Returns the number of relationships where the person
is a cliento of a company.
'''
return self.relationship_set.filter(relatype__category='C').count()
def company_names(self):
'''
Returns the names of companies the person is involved with.
'''
return ', '.join(self.relationship_set.all().values_list(
'company__name', flat=True))
def employee_relationships(self):
'''
Returns the number of relationships where the person
is employed at a company.
'''
return self.relationship_set.filter(relatype__category='E')
def client_relationships(self):
'''
Returns the number of relationships where the person
is a cliento of a company.
'''
return self.relationship_set.filter(relatype__category='C')
class Trade(models.Model):
name = models.CharField(max_length=100, unique=True,
help_text="the industry the company is in.")
def __unicode__(self):
return self.name
class Company(models.Model):
name = models.CharField(max_length=100)
mission = models.TextField(blank=True, default="To make money.")
trades = models.ManyToManyField(Trade, blank=True)
def __unicode__(self):
return self.name
def get_absolute_url(self):
#TODO: ask on IRC about these imports
from django.core.urlresolvers import reverse
from django.utils.text import slugify
slug = slugify(self.name)
return reverse(
'anothercrm:company', kwargs={'name': slug, 'pk': self.id})
def get_trades(self):
return ', '.join(tr.name for tr in self.trades.all())
get_trades.short_description='Trade(s)'
get_trades.admin_order_field='trades'
def employees_by_position(self):
'''
Returns Relations with employees - not Persons.
'''
return self.relationship_set.filter(
relatype__category='E').order_by('relatype__name')
def clients_by_type(self):
'''
Returns Relations with clients, agents etc - not Persons.
'''
return self.relationship_set.filter(
relatype__category='C').order_by('relatype__name')
class Meta:
verbose_name_plural = _('companies')
class RelationshipType(models.Model):
CATEGORY_CHOICES = (
('E', 'Employee'),
('C', 'Client'),
)
category = models.CharField(max_length=1, choices=CATEGORY_CHOICES)
name = models.CharField(max_length=50, unique=True,
help_text=("For employees, this is position. For customers, it can"
" be 'regular customer', etc."))
notes = models.TextField(blank=True)
def __unicode__(self):
return u'{0} ({1})'.format(self.name, self.get_category_display())
class Relationship(models.Model):
relatype = models.ForeignKey(RelationshipType,
verbose_name=_('relationship type'))
company = models.ForeignKey(Company)
person = models.ForeignKey(Person)
def __unicode__(self):
return u'{0} {1} {2} {3}'.format(self.person.firstname,
self.person.lastname, self.relatype, self.company)
| agpl-3.0 | 7,618,149,162,945,209,000 | 34.713376 | 79 | 0.588193 | false |
jaeilepp/eggie | mne/viz/_3d.py | 1 | 24122 | """Functions to make 3D plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
from ..externals.six import string_types, advance_iterator
from distutils.version import LooseVersion
import os
import inspect
import warnings
from itertools import cycle
import numpy as np
from scipy import linalg
from ..io.pick import pick_types
from ..surface import get_head_surf, get_meg_helmet_surf, read_surface
from ..transforms import read_trans, _find_trans, apply_trans
from ..utils import get_subjects_dir, logger, _check_subject
from .utils import mne_analyze_colormap, _prepare_trellis, COLORS
def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to eggie in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
time_idx = None
if time is None:
time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
if not evoked.times[0] <= time <= evoked.times[-1]:
raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
time_idx = np.argmin(np.abs(evoked.times - time))
types = [sm['kind'] for sm in surf_maps]
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
colormap = mne_analyze_colormap(format='mayavi')
colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
np.tile([0., 0., 0., 255.], (2, 1)),
np.tile([255., 0., 0., 255.], (127, 1))])
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, this_map in enumerate(surf_maps):
surf = this_map['surf']
map_data = this_map['data']
map_type = this_map['kind']
map_ch_names = this_map['ch_names']
if map_type == 'eeg':
pick = pick_types(evoked.info, meg=False, eeg=True)
else:
pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
ch_names = [evoked.ch_names[k] for k in pick]
set_ch_names = set(ch_names)
set_map_ch_names = set(map_ch_names)
if set_ch_names != set_map_ch_names:
message = ['Channels in map and data do not match.']
diff = set_map_ch_names - set_ch_names
if len(diff):
message += ['%s not in data file. ' % list(diff)]
diff = set_ch_names - set_map_ch_names
if len(diff):
message += ['%s not in map file.' % list(diff)]
raise RuntimeError(' '.join(message))
data = np.dot(map_data, evoked.data[pick, time_idx])
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
vlim = np.max(np.abs(data))
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
# Now show our field pattern
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim)
fsurf.module_manager.scalar_lut_manager.lut.table = colormap
# And the field lines on top
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
cont = mlab.pipeline.contour_surface(mesh, contours=21,
line_width=1.0,
vmin=-vlim, vmax=vlim,
opacity=alpha)
cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
if '%' in time_label:
time_label %= (1e3 * evoked.times[time_idx])
mlab.text(0.01, 0.01, time_label, width=0.4)
mlab.view(10, 60)
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'transverse' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Call pyplot.show() at the end.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
fig, axs = _prepare_trellis(len(slices), 4)
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
if orientation == 'coronal':
ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 2],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'axial':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
surf['tris'], surf['rr'][:, 1],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'sagittal':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 0],
levels=[sl], colors='yellow', linewidths=2.0)
if show:
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt.show()
return fig
def plot_trans(info, trans_fname='auto', subject=None, subjects_dir=None,
ch_type=None, source='bem'):
"""Plot MEG/EEG head surface and helmet in 3D.
Parameters
----------
info : dict
The measurement info.
trans_fname : str | 'auto'
The full path to the `*-trans.fif` file produced during
coregistration.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, both the MEG helmet and EEG electrodes will be shown.
If 'meg', only the MEG helmet will be shown. If 'eeg', only the
EEG electrodes will be shown.
source : str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
to 'bem'. Note. For single layer bems it is recommended to use 'head'.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
if ch_type not in [None, 'eeg', 'meg']:
raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
% ch_type)
if trans_fname == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
trans_fname = _find_trans(subject, subjects_dir)
trans = read_trans(trans_fname)
surfs = [get_head_surf(subject, source=source, subjects_dir=subjects_dir)]
if ch_type is None or ch_type == 'meg':
surfs.append(get_meg_helmet_surf(info, trans))
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, surf in enumerate(surfs):
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
if ch_type is None or ch_type == 'eeg':
eeg_locs = [l['eeg_loc'][:, 0] for l in info['chs']
if l['eeg_loc'] is not None]
if len(eeg_locs) > 0:
eeg_loc = np.array(eeg_locs)
# Transform EEG electrodes to MRI coordinates
eeg_loc = apply_trans(trans['trans'], eeg_loc)
with warnings.catch_warnings(record=True): # traits
mlab.points3d(eeg_loc[:, 0], eeg_loc[:, 1], eeg_loc[:, 2],
color=(1.0, 0.0, 0.0), scale_factor=0.005)
else:
warnings.warn('EEG electrode locations not found. '
'Cannot plot EEG electrodes.')
mlab.view(90, 90)
return fig
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
colormap='hot', time_label='time=%0.2f ms',
smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
transparent=True, alpha=1.0, time_viewer=False,
config_opts={}, subjects_dir=None, figure=None,
views='lat', colorbar=True):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
stc : SourceEstimates
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str
The type of colormap to use.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing
fmin : float
The minimum value to display.
fmid : float
The middle value on the colormap.
fmax : float
The maximum value for the colormap.
transparent : bool
If True, use a linear transparency between fmin and fmid.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
import surfer
from surfer import Brain, TimeViewer
if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4':
raise NotImplementedError('hemi type "%s" not supported with your '
'version of pysurfer. Please upgrade to '
'version 0.4 or higher.' % hemi)
try:
import mayavi
from mayavi import mlab
except ImportError:
from enthought import mayavi
from enthought.mayavi import mlab
# import here to avoid circular import problem
from ..source_estimate import SourceEstimate
if not isinstance(stc, SourceEstimate):
raise ValueError('stc has to be a surface source estimate')
if hemi not in ['lh', 'rh', 'split', 'both']:
raise ValueError('hemi has to be either "lh", "rh", "split", '
'or "both"')
n_split = 2 if hemi == 'split' else 1
n_views = 1 if isinstance(views, string_types) else len(views)
if figure is not None:
# use figure with specified id or create new figure
if isinstance(figure, int):
figure = mlab.figure(figure, size=(600, 600))
# make sure it is of the correct type
if not isinstance(figure, list):
figure = [figure]
if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]):
raise TypeError('figure must be a mayavi scene or list of scenes')
# make sure we have the right number of figures
n_fig = len(figure)
if not n_fig == n_split * n_views:
raise RuntimeError('`figure` must be a list with the same '
'number of elements as PySurfer plots that '
'will be created (%s)' % n_split * n_views)
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
subject = _check_subject(stc.subject, subject, False)
if subject is None:
if 'SUBJECT' in os.environ:
subject = os.environ['SUBJECT']
else:
raise ValueError('SUBJECT environment variable not set')
if hemi in ['both', 'split']:
hemis = ['lh', 'rh']
else:
hemis = [hemi]
title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
args = inspect.getargspec(Brain.__init__)[0]
kwargs = dict(title=title, figure=figure, config_opts=config_opts,
subjects_dir=subjects_dir)
if 'views' in args:
kwargs['views'] = views
else:
logger.info('PySurfer does not support "views" argument, please '
'consider updating to a newer version (0.4 or later)')
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(subject, hemi, surface, **kwargs)
for hemi in hemis:
hemi_idx = 0 if hemi == 'lh' else 1
if hemi_idx == 0:
data = stc.data[:len(stc.vertno[0])]
else:
data = stc.data[len(stc.vertno[0]):]
vertices = stc.vertno[hemi_idx]
time = 1e3 * stc.times
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=smoothing_steps, time=time,
time_label=time_label, alpha=alpha, hemi=hemi,
colorbar=colorbar)
# scale colormap and set time (index) to display
brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax,
transparent=transparent)
if time_viewer:
TimeViewer(brain)
return brain
def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
fontsize=18, bgcolor=(.05, 0, .1),
opacity=0.2, brain_color=(0.7,) * 3,
show=True, high_resolution=False,
fig_name=None, fig_number=None, labels=None,
modes=['cone', 'sphere'],
scale_factors=[1, 0.6],
verbose=None, **kwargs):
"""Plot source estimates obtained with sparse solver
Active dipoles are represented in a "Glass" brain.
If the same source is active in multiple source estimates it is
displayed with a sphere otherwise with a cone in 3D.
Parameters
----------
src : dict
The source space.
stcs : instance of SourceEstimate or list of instances of SourceEstimate
The source estimates (up to 3).
colors : list
List of colors
linewidth : int
Line width in 2D plot.
fontsize : int
Font size.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
show : bool
Show figures if True.
fig_name :
Mayavi figure name.
fig_number :
Matplotlib figure number.
labels : ndarray or list of ndarrays
Labels to show sources in clusters. Sources with the same
label and the waveforms within each cluster are presented in
the same color. labels should be a list of ndarrays when
stcs is a list ie. one label for each stc.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
kwargs : kwargs
Keyword arguments to pass to mlab.triangular_mesh.
"""
if not isinstance(stcs, list):
stcs = [stcs]
if labels is not None and not isinstance(labels, list):
labels = [labels]
if colors is None:
colors = COLORS
linestyles = ['-', '--', ':']
# Show 3D
lh_points = src[0]['rr']
rh_points = src[1]['rr']
points = np.r_[lh_points, rh_points]
lh_normals = src[0]['nn']
rh_normals = src[1]['nn']
normals = np.r_[lh_normals, rh_normals]
if high_resolution:
use_lh_faces = src[0]['tris']
use_rh_faces = src[1]['tris']
else:
use_lh_faces = src[0]['use_tris']
use_rh_faces = src[1]['use_tris']
use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
points *= 170
vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
for stc in stcs]
unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
try:
from mayavi import mlab
except ImportError:
from enthought.mayavi import mlab
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
mlab.clf()
if mlab.options.backend != 'test':
f.scene.disable_render = True
with warnings.catch_warnings(record=True): # traits warnings
surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
points[:, 2], use_faces,
color=brain_color,
opacity=opacity, **kwargs)
import matplotlib.pyplot as plt
# Show time courses
plt.figure(fig_number)
plt.clf()
colors = cycle(colors)
logger.info("Total number of active sources: %d" % len(unique_vertnos))
if labels is not None:
colors = [advance_iterator(colors) for _ in
range(np.unique(np.concatenate(labels).ravel()).size)]
for idx, v in enumerate(unique_vertnos):
# get indices of stcs it belongs to
ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
is_common = len(ind) > 1
if labels is None:
c = advance_iterator(colors)
else:
# if vertex is in different stcs than take label from first one
c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
mode = modes[1] if is_common else modes[0]
scale_factor = scale_factors[1] if is_common else scale_factors[0]
if (isinstance(scale_factor, (np.ndarray, list, tuple))
and len(unique_vertnos) == len(scale_factor)):
scale_factor = scale_factor[idx]
x, y, z = points[v]
nx, ny, nz = normals[v]
with warnings.catch_warnings(record=True): # traits
mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
mode=mode, scale_factor=scale_factor)
for k in ind:
vertno = vertnos[k]
mask = (vertno == v)
assert np.sum(mask) == 1
linestyle = linestyles[k]
plt.plot(1e3 * stc.times, 1e9 * stcs[k].data[mask].ravel(), c=c,
linewidth=linewidth, linestyle=linestyle)
plt.xlabel('Time (ms)', fontsize=18)
plt.ylabel('Source amplitude (nAm)', fontsize=18)
if fig_name is not None:
plt.title(fig_name)
if show:
plt.show()
surface.actor.property.backface_culling = True
surface.actor.property.shading = True
return surface
| bsd-2-clause | 3,978,885,979,952,764,000 | 36.053763 | 79 | 0.576652 | false |
jamespcole/home-assistant | homeassistant/components/arlo/alarm_control_panel.py | 1 | 4381 | """Support for Arlo Alarm Control Panels."""
import logging
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
PLATFORM_SCHEMA, AlarmControlPanel)
from homeassistant.const import (
ATTR_ATTRIBUTION, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT, STATE_ALARM_DISARMED)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ATTRIBUTION, DATA_ARLO, SIGNAL_UPDATE_ARLO
_LOGGER = logging.getLogger(__name__)
ARMED = 'armed'
CONF_HOME_MODE_NAME = 'home_mode_name'
CONF_AWAY_MODE_NAME = 'away_mode_name'
CONF_NIGHT_MODE_NAME = 'night_mode_name'
DEPENDENCIES = ['arlo']
DISARMED = 'disarmed'
ICON = 'mdi:security'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOME_MODE_NAME, default=ARMED): cv.string,
vol.Optional(CONF_AWAY_MODE_NAME, default=ARMED): cv.string,
vol.Optional(CONF_NIGHT_MODE_NAME, default=ARMED): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Arlo Alarm Control Panels."""
arlo = hass.data[DATA_ARLO]
if not arlo.base_stations:
return
home_mode_name = config.get(CONF_HOME_MODE_NAME)
away_mode_name = config.get(CONF_AWAY_MODE_NAME)
night_mode_name = config.get(CONF_NIGHT_MODE_NAME)
base_stations = []
for base_station in arlo.base_stations:
base_stations.append(ArloBaseStation(base_station, home_mode_name,
away_mode_name, night_mode_name))
add_entities(base_stations, True)
class ArloBaseStation(AlarmControlPanel):
"""Representation of an Arlo Alarm Control Panel."""
def __init__(self, data, home_mode_name, away_mode_name, night_mode_name):
"""Initialize the alarm control panel."""
self._base_station = data
self._home_mode_name = home_mode_name
self._away_mode_name = away_mode_name
self._night_mode_name = night_mode_name
self._state = None
@property
def icon(self):
"""Return icon."""
return ICON
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ARLO, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Update the state of the device."""
_LOGGER.debug("Updating Arlo Alarm Control Panel %s", self.name)
mode = self._base_station.mode
if mode:
self._state = self._get_state_from_mode(mode)
else:
self._state = None
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
self._base_station.mode = DISARMED
async def async_alarm_arm_away(self, code=None):
"""Send arm away command. Uses custom mode."""
self._base_station.mode = self._away_mode_name
async def async_alarm_arm_home(self, code=None):
"""Send arm home command. Uses custom mode."""
self._base_station.mode = self._home_mode_name
async def async_alarm_arm_night(self, code=None):
"""Send arm night command. Uses custom mode."""
self._base_station.mode = self._night_mode_name
@property
def name(self):
"""Return the name of the base station."""
return self._base_station.name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
'device_id': self._base_station.device_id
}
def _get_state_from_mode(self, mode):
"""Convert Arlo mode to Home Assistant state."""
if mode == ARMED:
return STATE_ALARM_ARMED_AWAY
if mode == DISARMED:
return STATE_ALARM_DISARMED
if mode == self._home_mode_name:
return STATE_ALARM_ARMED_HOME
if mode == self._away_mode_name:
return STATE_ALARM_ARMED_AWAY
if mode == self._night_mode_name:
return STATE_ALARM_ARMED_NIGHT
return mode
| apache-2.0 | 8,865,541,382,343,930,000 | 31.213235 | 78 | 0.640037 | false |
google/physical-web | web-service/handlers.py | 1 | 3651 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from urllib import unquote_plus
import helpers
import json
import logging
import models
import webapp2
################################################################################
class Index(webapp2.RequestHandler):
def get(self):
self.response.out.write('')
def head(self):
pass
################################################################################
class GoUrl(webapp2.RequestHandler):
def get(self):
return self._redirect()
def head(self):
return self._redirect()
def _redirect(self):
url = self.request.get('url')
url = url.encode('ascii', 'ignore')
self.redirect(url)
################################################################################
class RefreshUrl(webapp2.RequestHandler):
def post(self):
url = self.request.get('url')
helpers.RefreshUrl(url)
################################################################################
class FaviconUrl(webapp2.RequestHandler):
def get(self):
url = unquote_plus(self.request.get('url'))
response = helpers.FaviconUrl(url)
if response:
self.response.headers['Content-Type'] = response.headers['Content-Type']
self.response.write(response.content)
else:
self.error('404')
################################################################################
class ResolveScan(webapp2.RequestHandler):
def post(self):
input_data = self.request.body
try:
input_object = json.loads(input_data) # TODO: Data is not sanitised.
objects = input_object.get('objects', [])
secure_only = bool(input_object.get('secureOnly', helpers.DEFAULT_SECURE_ONLY))
except:
objects = []
secure_only = helpers.DEFAULT_SECURE_ONLY
output = helpers.BuildResponse(objects, secure_only)
self.response.headers['Content-Type'] = 'application/json'
json_data = json.dumps(output);
self.response.write(json_data)
################################################################################
class DemoMetadata(webapp2.RequestHandler):
def get(self):
objects = [
{'url': 'http://www.caltrain.com/schedules/realtime/stations/mountainviewstation-mobile.html'},
{'url': 'http://benfry.com/distellamap/'},
{'url': 'http://en.wikipedia.org/wiki/Le_D%C3%A9jeuner_sur_l%E2%80%99herbe'},
{'url': 'http://sfmoma.org'}
]
output = helpers.BuildResponse(objects)
self.response.headers['Content-Type'] = 'application/json'
json_data = json.dumps(output);
self.response.write(json_data)
def head(self):
pass
################################################################################
app = webapp2.WSGIApplication([
('/', Index),
('/resolve-scan', ResolveScan),
('/refresh-url', RefreshUrl),
('/favicon', FaviconUrl),
('/go', GoUrl),
('/demo', DemoMetadata)
], debug=True)
| apache-2.0 | 3,967,501,291,462,436,400 | 31.026316 | 107 | 0.539852 | false |
sacharya/nova | nova/virt/baremetal/base.py | 1 | 2612 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.baremetal import baremetal_states
class NodeDriver(object):
def __init__(self, virtapi):
self.virtapi = virtapi
def cache_images(self, context, node, instance, **kwargs):
raise NotImplementedError()
def destroy_images(self, context, node, instance):
raise NotImplementedError()
def activate_bootloader(self, context, node, instance, **kwargs):
raise NotImplementedError()
def deactivate_bootloader(self, context, node, instance):
raise NotImplementedError()
def activate_node(self, context, node, instance):
"""For operations after power on."""
raise NotImplementedError()
def deactivate_node(self, context, node, instance):
"""For operations before power off."""
raise NotImplementedError()
def get_console_output(self, node, instance):
raise NotImplementedError()
def dhcp_options_for_instance(self, instance):
"""Optional override to return the DHCP options to use for instance.
If no DHCP options are needed, this should not be overridden or None
should be returned.
"""
return None
class PowerManager(object):
def __init__(self, **kwargs):
self.state = baremetal_states.DELETED
pass
def activate_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def reboot_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def deactivate_node(self):
self.state = baremetal_states.DELETED
return self.state
def is_power_on(self):
"""Returns True or False according as the node's power state."""
return True
# TODO(NTTdocomo): split out console methods to its own class
def start_console(self):
pass
def stop_console(self):
pass
| apache-2.0 | -3,919,610,494,057,842,000 | 29.372093 | 78 | 0.672282 | false |
bitmazk/django-user-tags | user_tags/tests/forms_tests.py | 1 | 7599 | """Tests for the forms of the ``user_tags`` app."""
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from django_libs.tests.factories import UserFactory
from user_tags.tests.test_app.forms import DummyModelForm
from user_tags.models import TaggedItem, UserTag, UserTagGroup
class UserTagsFormMixinTestCase(TestCase):
"""Tests for the ``UserTagsFormMixin`` mixin class."""
longMessage = True
def setUp(self):
"""Creates a user and valid set of form data."""
self.user = UserFactory()
self.data = {
'name': 'dummy',
'tags': 'great day,family, cinema ',
'global_tags': 'foo, bar',
}
def test_adds_fields_to_the_form(self):
"""
A form that inherits from ``UserTagsFormMixin`` should have the
fields that are defined on the model's ``TAG_FIELDS`` options dict
as form fields.
"""
form = DummyModelForm(self.user)
self.assertTrue('tags' in form.fields)
self.assertEqual(form.fields['tags'].help_text.encode(), b'Help text')
self.assertTrue('global_tags' in form.fields)
def test_form_valid(self):
"""Form should be valid when valid data is given."""
form = DummyModelForm(self.user, data=self.data)
self.assertTrue(form.is_valid())
def test_save_returns_instance(self):
"""
Save should return the saved instance when creating a new object.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
self.assertTrue(instance.pk)
def test_creates_tag_group(self):
"""
If the user has entered tags for a given tag field, the correct
user tags related objects should be created.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
tag_group = UserTagGroup.objects.get(name='tags')
user_tags = UserTag.objects.filter(user_tag_group=tag_group)
self.assertEqual(user_tags.count(), 3)
global_tag_group = UserTagGroup.objects.get(name='global_tags')
global_tags = UserTag.objects.filter(user_tag_group=global_tag_group)
self.assertEqual(global_tags.count(), 2)
tagged_item = TaggedItem.objects.get(
content_type=ContentType.objects.get_for_model(instance),
object_id=instance.pk)
self.assertEqual(tagged_item.user_tags.all().count(), 5)
def test_tag_group_without_user(self):
"""
For a tag group that has ``'with_user': False`` in the ``TAG_FIELDS``
option dict, the created tag group should not be bound to any user.
"""
form = DummyModelForm(self.user, data=self.data)
form.save()
global_tag_group = UserTagGroup.objects.get(name='global_tags')
self.assertEqual(global_tag_group.user, None)
def test_form_should_be_valid_when_instance_given(self):
"""
When instantiated with an instance, the form should, of course,
be valid.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
form = DummyModelForm(self.user, data=self.data, instance=instance)
self.assertTrue(form.is_valid())
def test_save_instance_re_creates_everything(self):
"""
When instantiated with an instance that already has tags, those tags
should be deleted when the form is saved. Only the newly submitted
tags will get re-created.
In this test we don't touch the two existing 'global_tags' but we
re-submit two new 'tags' (before that group had three tags). So in
total we should have four tags now, not five.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
data2 = self.data.copy()
data2.update({'tags': 'family, cinema', })
form = DummyModelForm(self.user, data=data2, instance=instance)
instance = form.save()
tagged_item = TaggedItem.objects.get(
content_type=ContentType.objects.get_for_model(instance),
object_id=instance.pk)
self.assertEqual(tagged_item.user_tags.all().count(), 4)
def test_get_user_from_instance(self):
"""
If form was not instanciated with user parameter, it will try to get
the user from the instance.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
instance.user = self.user
form = DummyModelForm(instance=instance, data=self.data)
self.assertTrue(form.is_valid())
self.assertTrue(form.save())
def test_get_user_method(self):
"""
If form was not instantiated with suer parameter and the instance does
not have a user field, it will try to call a ``get_user`` method on
the form.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
form = DummyModelForm(instance=instance, data=self.data)
def get_user():
return self.user
form.get_user = get_user
self.assertTrue(form.is_valid())
self.assertTrue(form.save())
def test_no_user_given(self):
"""
If form was not instanciated with user parameter and instance has no
user attribute and not get_user method, so be it. This tag is probably
supposed to be global to the project.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
form = DummyModelForm(instance=instance, data=self.data)
self.assertTrue(form.is_valid())
self.assertTrue(form.save())
def test_no_tags(self):
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
TaggedItem.objects.all().delete()
form = DummyModelForm(instance=instance, data=self.data)
self.assertTrue(form.is_valid())
def test_split_tags(self):
tags = DummyModelForm.split_tags('great day,family, cinema, ')
self.assertEqual(len(tags), 3)
self.assertEqual(tags[0], 'great day')
self.assertEqual(tags[1], 'family')
self.assertEqual(tags[2], 'cinema')
def test_adds_tag_list_to_form(self):
"""
Should add the available tags for each given tag field to the form.
This enables users to do this in their templates::
$(document).ready(function() {
$('#id_skills').tagit({
allowSpaces: true
,availableTags:
{{ form.available_tags_technical_skills|safe }}
,caseSensitive: false
,removeConfirmation: true
});
}
"""
form = DummyModelForm(self.user, data=self.data)
form.save()
result = form.tags_tags_values()
self.assertEqual(result, '["cinema", "family", "great day"]')
result = form.global_tags_tags_values()
self.assertEqual(result, '["bar", "foo"]')
user2 = UserFactory()
form = DummyModelForm(user2)
result = form.tags_tags_values()
self.assertEqual(result, '[]', msg=(
'A user should not be able to see the private tags of another'
' user.'))
form = DummyModelForm()
result = form.tags_tags_values()
self.assertEqual(result, '[]', msg=(
'An anonymous user should not be able to see user specific tags.'))
| mit | 2,542,328,559,561,466,000 | 35.185714 | 79 | 0.610607 | false |
Xonshiz/comic-dl | comic_dl/sites/readcomicOnlineli.py | 1 | 7993 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import globalFunctions
import re
import os
import logging
class ReadComicOnlineLi(object):
def __init__(self, manga_url, download_directory, chapter_range, **kwargs):
current_directory = kwargs.get("current_directory")
conversion = kwargs.get("conversion")
keep_files = kwargs.get("keep_files")
self.logging = kwargs.get("log_flag")
self.sorting = kwargs.get("sorting_order")
self.image_quality = kwargs.get("image_quality")
self.comic_name = self.name_cleaner(manga_url)
self.print_index = kwargs.get("print_index")
url_split = str(manga_url).split("/")
if len(url_split) in [5]: # Sometimes, this value came out to be 6, instead of 5. Hmmmmmmmm weird.
# Removing "6" from here, because it caused #47
self.full_series(comic_url=manga_url.replace("&readType=1", ""), comic_name=self.comic_name,
sorting=self.sorting, download_directory=download_directory, chapter_range=chapter_range,
conversion=conversion, keep_files=keep_files)
else:
if "&readType=0" in manga_url:
manga_url = str(manga_url).replace("&readType=0", "&readType=1") # All Images in one page!
# disabled to fix #132 and #145
# elif "&readType=1" not in manga_url:
# manga_url = str(manga_url) + "&readType=1" # All Images in one page!
self.single_chapter(manga_url, self.comic_name, download_directory, conversion=conversion,
keep_files=keep_files)
def single_chapter(self, comic_url, comic_name, download_directory, conversion, keep_files):
# print("Received Comic Url : {0}".format(comic_url))
print("Fooling CloudFlare...Please Wait...")
chapter_number = str(comic_url).split("/")[5].split("?")[0].replace("-", " - ")
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url, scrapper_delay=10)
img_list = re.findall(r"lstImages.push\(\"(.*?)\"\);", str(source))
file_directory = globalFunctions.GlobalFunctions().create_file_directory(chapter_number, comic_name)
# directory_path = os.path.realpath(file_directory)
directory_path = os.path.realpath(str(download_directory) + "/" + str(file_directory))
if not os.path.exists(directory_path):
os.makedirs(directory_path)
# image_len = len(image_list)
if str(self.image_quality).lower().strip() in ["low", "worst", "bad", "cancer", "mobile"]:
print("Downloading In Low Quality...")
links = []
file_names = []
for current_chapter, image_link in enumerate(img_list):
image_link = image_link.replace("\\", "")
logging.debug("Image Link : %s" % image_link)
image_link = image_link.replace("=s1600", "=s0").replace("/s1600", "/s0") # Change low quality to best.
if str(self.image_quality).lower().strip() in ["low", "worst", "bad", "cancer", "mobile"]:
image_link = image_link.replace("=s0", "=s1600").replace("/s0", "/s1600")
current_chapter += 1
file_name = str(globalFunctions.GlobalFunctions().prepend_zeroes(current_chapter, len(img_list))) + ".jpg"
file_names.append(file_name)
links.append(image_link)
globalFunctions.GlobalFunctions().multithread_download(chapter_number, comic_name, comic_url, directory_path,
file_names, links, self.logging)
globalFunctions.GlobalFunctions().conversion(directory_path, conversion, keep_files, comic_name,
chapter_number)
return 0
def name_cleaner(self, url):
initial_name = str(url).split("/")[4].strip()
safe_name = re.sub(r"[0-9][a-z][A-Z]\ ", "", str(initial_name))
manga_name = str(safe_name.title()).replace("-", " ")
return manga_name
def full_series(self, comic_url, comic_name, sorting, download_directory, chapter_range, conversion, keep_files):
print("Fooling CloudFlare...Please Wait...")
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url, scrapper_delay=10)
all_links = []
listing_table = source.find_all("table", {"class": "listing"})
# print(listing_table)
for elements in listing_table:
x = elements.findAll('a')
for a in x:
all_links.append(str(a['href']).strip())
"""Readcomiconline.li shows the chapters in the Descending order. The 1st chapter is at the bottom, hence, at
the end of the list. So, we'll reverse the list, to perform the ranging functionality properly.
This is a fix for issues like #74.
"""
all_links.reverse()
# print("All Links : {0}".format(all_links))
logging.debug("All Links : %s" % all_links)
# Uh, so the logic is that remove all the unnecessary chapters beforehand
# and then pass the list for further operations.
if chapter_range != "All":
# -1 to shift the episode number accordingly to the INDEX of it. List starts from 0 xD!
starting = int(str(chapter_range).split("-")[0]) - 1
if str(chapter_range).split("-")[1].isdigit():
ending = int(str(chapter_range).split("-")[1])
else:
ending = len(all_links)
indexes = [x for x in range(starting, ending)]
all_links = [all_links[x] for x in indexes][::-1]
else:
all_links = all_links
if self.print_index:
idx = 0
for chap_link in all_links:
idx = idx + 1
print(str(idx) + ": " + chap_link)
return
if str(sorting).lower() in ['new', 'desc', 'descending', 'latest']:
for chap_link in all_links:
chap_link = "http://readcomiconline.li" + chap_link
try:
self.single_chapter(comic_url=chap_link, comic_name=comic_name, download_directory=download_directory,
conversion=conversion, keep_files=keep_files)
except Exception as ex:
logging.error("Error downloading : %s" % chap_link)
break # break to continue processing other mangas
# if chapter range contains "__EnD__" write new value to config.json
# @Chr1st-oo - modified condition due to some changes on automatic download and config.
if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3):
globalFunctions.GlobalFunctions().addOne(comic_url)
elif str(sorting).lower() in ['old', 'asc', 'ascending', 'oldest', 'a']:
for chap_link in all_links[::-1]:
chap_link = "http://readcomiconline.to" + chap_link
try:
self.single_chapter(comic_url=chap_link, comic_name=comic_name, download_directory=download_directory,
conversion=conversion, keep_files=keep_files)
except Exception as ex:
logging.error("Error downloading : %s" % chap_link)
break # break to continue processing other mangas
# if chapter range contains "__EnD__" write new value to config.json
# @Chr1st-oo - modified condition due to some changes on automatic download and config.
if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3):
globalFunctions.GlobalFunctions().addOne(comic_url)
return 0
| mit | 6,136,911,422,190,837,000 | 47.150602 | 127 | 0.575378 | false |
zhiwehu/django-countries | countries/models.py | 1 | 2033 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Country(models.Model):
"""
International Organization for Standardization (ISO) 3166-1 Country list
* ``iso`` = ISO 3166-1 alpha-2
* ``name`` = Official country names used by the ISO 3166/MA in capital letters
* ``printable_name`` = Printable country names for in-text use
* ``iso3`` = ISO 3166-1 alpha-3
* ``numcode`` = ISO 3166-1 numeric
Note::
This model is fixed to the database table 'country' to be more general.
Change ``db_table`` if this cause conflicts with your database layout.
Or comment out the line for default django behaviour.
"""
iso = models.CharField(_('ISO alpha-2'), max_length=2, primary_key=True)
name = models.CharField(_('Official name (CAPS)'), max_length=128)
printable_name = models.CharField(_('Country name'), max_length=128)
iso3 = models.CharField(_('ISO alpha-3'), max_length=3, null=True)
numcode = models.PositiveSmallIntegerField(_('ISO numeric'), null=True)
class Meta:
db_table = 'country'
verbose_name = _('Country')
verbose_name_plural = _('Countries')
ordering = ('name',)
class Admin:
list_display = ('printable_name', 'iso',)
def __unicode__(self):
return self.printable_name
class UsState(models.Model):
"""
United States Postal Service (USPS) State Abbreviations
Note::
This model is fixed to the database table 'usstate' to be more general.
Change ``db_table`` if this cause conflicts with your database layout.
Or comment out the line for default django behaviour.
"""
id = models.AutoField(primary_key=True)
name = models.CharField(_('State name'), max_length=50, null=False)
abbrev = models.CharField(_('Abbreviation'), max_length=2, null=False)
class Meta:
db_table = 'usstate'
verbose_name = _('US State')
verbose_name_plural = _('US States')
ordering = ('name',)
class Admin:
list_display = ('name', 'abbrev',)
def __unicode__(self):
return self.name
| bsd-3-clause | 316,511,480,541,855,040 | 30.276923 | 80 | 0.689129 | false |
nils-wisiol/pypuf | pypuf/property_test/example.py | 1 | 3649 | """This module is used to store some examples for the documentation"""
from numpy import array, reshape
from pypuf.simulation.arbiter_based.ltfarray import NoisyLTFArray
from pypuf.property_test.base import PropertyTest
from pypuf.tools import sample_inputs
def main():
"""This method is used to execute all example functions."""
example_reliability()
example_reliability_statistic()
def example_reliability():
"""This method shows how to use the PropertyTest.reliability function."""
n = 8
k = 8
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instance = NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, 0.5)
)
challenge = array([-1, 1, 1, 1, -1, 1, 1, 1])
reliability = PropertyTest.reliability(instance, reshape(challenge, (1, n)))
print('The reliability is {}.'.format(reliability))
def example_reliability_statistic():
"""This method shows hot to use the PropertyTest.reliability_statistic."""
n = 8
k = 1
N = 2 ** n
instance_count = 3
measurements = 100
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instances = [
NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, 0.5)
) for _ in range(instance_count)
]
challenges = array(list(sample_inputs(n, N)))
property_test = PropertyTest(instances)
reliability_statistic = property_test.reliability_statistic(challenges, measurements=measurements)
print('The reliability statistic is {}.'.format(reliability_statistic))
def example_uniqueness():
"""
This method shows the function which can be used to calculate the uniqueness of a set of simulation instances.
"""
n = 8
k = 1
instance_count = 3
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instances = [
NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, weights)
) for _ in range(instance_count)
]
challenge = array([-1, 1, 1, 1, -1, 1, 1, 1])
uniqueness = PropertyTest.uniqueness(instances, reshape(challenge, (1, n)))
print('The uniqueness is {}.'.format(uniqueness))
def example_uniqueness_statistic():
"""This method shows the uniqueness statistic function."""
n = 8
k = 1
N = 2 ** n
instance_count = 11
measurements = 1
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instances = [
NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, weights)
) for _ in range(instance_count)
]
challenges = array(list(sample_inputs(n, N)))
property_test = PropertyTest(instances)
uniqueness_statistic = property_test.uniqueness_statistic(challenges, measurements=measurements)
print('The uniqueness statistic is {}.'.format(uniqueness_statistic))
if __name__ == '__main__':
main()
| gpl-3.0 | -5,675,355,466,002,031,000 | 33.752381 | 114 | 0.671691 | false |
LabAdvComp/dish | test/test_logging.py | 1 | 2154 | from IPython.parallel.error import CompositeError
import os
from nose.tools import assert_raises
from .utils import assert_eventually_equal
from .test_pipeline import PipelineTest
class TestLogging(PipelineTest):
def test_logging(self):
"""Test that logging information is propagated and stored
correctly.
"""
def logs_things(job, logger):
logger.info(job["description"]+"loggingtest")
self.p.map(logs_things)
# TODO abstract out this logging testing stuff
pipeline_log = open(os.path.join(self.p.logdir, "dish.log")).read()
for job in self.p.jobs:
job_log = open(os.path.join(job["workdir"],
job["description"]+".log")).read()
assert_eventually_equal(job["description"]+"loggingtest" in job_log,
True)
assert_eventually_equal(job["description"]+"loggingtest" in pipeline_log,
True)
def test_stdout_is_logged(self):
"""p.run should log stdout of the command."""
self.p.run("echo testing123")
pipeline_log = open(os.path.join(self.p.logdir, "dish.log")).read()
assert_eventually_equal("testing123" in pipeline_log, True)
for job in self.p.jobs:
job_log = open(os.path.join(job["workdir"],
job["description"]+".log")).read()
assert_eventually_equal("testing123" in job_log, True)
def test_logging_gets_traceback(self):
"""When a call fails, we should log traceback info."""
def failing(job, logger):
raise RuntimeError(job["description"]+"error")
with assert_raises(CompositeError):
self.p.map(failing)
pipeline_log = open(os.path.join(self.p.logdir, "dish.log")).read()
for job in self.p.jobs:
job_log = open(os.path.join(job["workdir"],
job["description"]+".log")).read()
assert job["description"]+"error" in job_log
assert job["description"]+"error" in pipeline_log
| mit | 5,790,359,276,004,510,000 | 42.08 | 85 | 0.577066 | false |
denverfoundation/storybase | apps/storybase_user/migrations/0006_auto__add_contact.py | 1 | 14929 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Contact'
db.create_table('storybase_user_contact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('storybase.fields.ShortTextField')(blank=True)),
('info', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('storybase_user', ['Contact'])
def backwards(self, orm):
# Deleting model 'Contact'
db.delete_table('storybase_user_contact')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'storybase_user.contact': {
'Meta': {'object_name': 'Contact'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.organizationtranslation': {
'Meta': {'unique_together': "(('organization', 'language'),)", 'object_name': 'OrganizationTranslation'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.projecttranslation': {
'Meta': {'unique_together': "(('project', 'language'),)", 'object_name': 'ProjectTranslation'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['storybase_user']
| mit | -9,083,987,629,193,060,000 | 82.402235 | 268 | 0.557907 | false |
omaciel/mangonel | mangonel/system.py | 1 | 2191 | from common import *
import datetime
import json
import sys
import time
try:
from katello.client.api.system import SystemAPI
except ImportError, e:
print "Please install Katello CLI package."
sys.exit(-1)
class System(SystemAPI):
def __init__(self):
super(System, self).__init__()
def create(self, org, env, name=None, ak=None, type='system',
release=None, sla=None, facts=None, view_id=None, installed_products=None):
if name is None:
name = "%s.example.com" % generate_name(8)
if facts is None:
facts = generate_facts(name)
sys1 = super(System, self).register(name, org['label'], env['id'], ak, type, release, sla, facts, view_id, installed_products)
logger.debug("Created system '%s'" % sys1['name'])
return sys1
def get_or_create_system(self, org, env, name=None, ak=None, type='system',
release=None, sla=None, facts=None, view_id=None, installed_products=None):
sys = None
query = {}
if name is not None:
query['name'] = name
if query != {}:
systems = super(System, self).systems_by_env(env['id'], query)
if systems != []:
sys = systems[0]
else:
sys = self.create(org, env, name, ak, type,
release, sla, facts, view_id, installed_products)
return sys
def delete_system(self, system):
return super(System, self).unregister(system['uuid'])
def checkin(self, system):
return super(System, self).checkin(system['uuid'])
def update_packages(self, system, packages=None):
if packages is None:
packages = packages_list()
return super(System, self).update_packages(system['uuid'], packages)
def available_pools(self, sId, match_system=False, match_installed=False, no_overlap=False):
return super(System, self).available_pools(sId, match_system, match_installed, no_overlap)['pools']
def subscribe(self, sId, pool=None, qty=1):
return super(System, self).subscribe(sId, pool, qty)
| gpl-2.0 | 7,369,417,449,111,537,000 | 29.430556 | 134 | 0.591967 | false |
wujuguang/motor | test/tornado_tests/test_motor_web.py | 1 | 9001 | # Copyright 2012-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
"""Test utilities for using Motor with Tornado web applications."""
import datetime
import email
import hashlib
import time
import re
import unittest
import gridfs
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application
import motor
import motor.web
import test
from test.test_environment import env, CA_PEM, CLIENT_PEM
# We're using Tornado's AsyncHTTPTestCase instead of our own MotorTestCase for
# the convenience of self.fetch().
class GridFSHandlerTestBase(AsyncHTTPTestCase):
def setUp(self):
super(GridFSHandlerTestBase, self).setUp()
self.fs = gridfs.GridFS(test.env.sync_cx.motor_test)
# Make a 500k file in GridFS with filename 'foo'
self.contents = b'Jesse' * 100 * 1024
self.contents_hash = hashlib.md5(self.contents).hexdigest()
# Record when we created the file, to check the Last-Modified header
self.put_start = datetime.datetime.utcnow().replace(microsecond=0)
self.file_id = 'id'
self.fs.delete(self.file_id)
self.fs.put(
self.contents, _id='id', filename='foo', content_type='my type')
self.put_end = datetime.datetime.utcnow().replace(microsecond=0)
self.assertTrue(self.fs.get_last_version('foo'))
def motor_db(self, **kwargs):
if env.mongod_started_with_ssl:
kwargs.setdefault('ssl_certfile', CLIENT_PEM)
kwargs.setdefault('ssl_ca_certs', CA_PEM)
kwargs.setdefault('ssl', env.mongod_started_with_ssl)
client = motor.MotorClient(
test.env.uri,
io_loop=self.io_loop,
**kwargs)
return client.motor_test
def tearDown(self):
self.fs.delete(self.file_id)
super(GridFSHandlerTestBase, self).tearDown()
def get_app(self):
return Application([
('/(.+)', motor.web.GridFSHandler, {'database': self.motor_db()})])
def stop(self, *args, **kwargs):
# A stop() method more permissive about the number of its positional
# arguments than AsyncHTTPTestCase.stop
if len(args) == 1:
AsyncHTTPTestCase.stop(self, args[0], **kwargs)
else:
AsyncHTTPTestCase.stop(self, args, **kwargs)
def parse_date(self, d):
date_tuple = email.utils.parsedate(d)
return datetime.datetime.fromtimestamp(time.mktime(date_tuple))
def last_mod(self, response):
"""Parse the 'Last-Modified' header from an HTTP response into a
datetime.
"""
return self.parse_date(response.headers['Last-Modified'])
def expires(self, response):
return self.parse_date(response.headers['Expires'])
class GridFSHandlerTest(GridFSHandlerTestBase):
def test_basic(self):
# First request
response = self.fetch('/foo')
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
self.assertEqual(
len(self.contents), int(response.headers['Content-Length']))
self.assertEqual('my type', response.headers['Content-Type'])
self.assertEqual('public', response.headers['Cache-Control'])
self.assertTrue('Expires' not in response.headers)
etag = response.headers['Etag']
last_mod_dt = self.last_mod(response)
self.assertEqual(self.contents_hash, etag.strip('"'))
self.assertTrue(self.put_start <= last_mod_dt <= self.put_end)
# Now check we get 304 NOT MODIFIED responses as appropriate
for ims_value in (
last_mod_dt,
last_mod_dt + datetime.timedelta(seconds=1)
):
response = self.fetch('/foo', if_modified_since=ims_value)
self.assertEqual(304, response.code)
self.assertEqual(b'', response.body)
# If-Modified-Since in the past, get whole response back
response = self.fetch(
'/foo',
if_modified_since=last_mod_dt - datetime.timedelta(seconds=1))
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
# Matching Etag
response = self.fetch('/foo', headers={'If-None-Match': etag})
self.assertEqual(304, response.code)
self.assertEqual(b'', response.body)
# Mismatched Etag
response = self.fetch('/foo', headers={'If-None-Match': etag + 'a'})
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
def test_404(self):
response = self.fetch('/bar')
self.assertEqual(404, response.code)
def test_head(self):
response = self.fetch('/foo', method='HEAD')
# Get Etag and parse Last-Modified into a datetime
etag = response.headers['Etag']
last_mod_dt = self.last_mod(response)
# Test the result
self.assertEqual(200, response.code)
self.assertEqual(b'', response.body) # Empty body for HEAD request
self.assertEqual(
len(self.contents), int(response.headers['Content-Length']))
self.assertEqual('my type', response.headers['Content-Type'])
self.assertEqual(self.contents_hash, etag.strip('"'))
self.assertTrue(self.put_start <= last_mod_dt <= self.put_end)
self.assertEqual('public', response.headers['Cache-Control'])
def test_content_type(self):
# Check that GridFSHandler uses file extension to guess Content-Type
# if not provided
for filename, expected_type in [
('foo.jpg', 'jpeg'),
('foo.png', 'png'),
('ht.html', 'html'),
('jscr.js', 'javascript'),
]:
# 'fs' is PyMongo's blocking GridFS
self.fs.put(b'', filename=filename)
for method in 'GET', 'HEAD':
response = self.fetch('/' + filename, method=method)
self.assertEqual(200, response.code)
# mimetypes are platform-defined, be fuzzy
self.assertTrue(
response.headers['Content-Type'].lower().endswith(
expected_type))
class TZAwareGridFSHandlerTest(GridFSHandlerTestBase):
def motor_db(self):
return super(TZAwareGridFSHandlerTest, self).motor_db(tz_aware=True)
def test_tz_aware(self):
now = datetime.datetime.utcnow()
ago = now - datetime.timedelta(minutes=10)
hence = now + datetime.timedelta(minutes=10)
response = self.fetch('/foo', if_modified_since=ago)
self.assertEqual(200, response.code)
response = self.fetch('/foo', if_modified_since=hence)
self.assertEqual(304, response.code)
class CustomGridFSHandlerTest(GridFSHandlerTestBase):
def get_app(self):
class CustomGridFSHandler(motor.web.GridFSHandler):
def get_gridfs_file(self, bucket, filename, request):
# Test overriding the get_gridfs_file() method, path is
# interpreted as file_id instead of filename.
return bucket.open_download_stream(file_id=filename)
def get_cache_time(self, path, modified, mime_type):
return 10
def set_extra_headers(self, path, gridout):
self.set_header('quux', 'fizzledy')
return Application([
('/(.+)', CustomGridFSHandler, {'database': self.motor_db()})])
def test_get_gridfs_file(self):
# We overrode get_gridfs_file so we expect getting by filename *not* to
# work now; we'll get a 404. We have to get by file_id now.
response = self.fetch('/foo')
self.assertEqual(404, response.code)
response = self.fetch('/' + str(self.file_id))
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
cache_control = response.headers['Cache-Control']
self.assertTrue(re.match(r'max-age=\d+', cache_control))
self.assertEqual(10, int(cache_control.split('=')[1]))
expires = self.expires(response)
# It should expire about 10 seconds from now
self.assertTrue(
datetime.timedelta(seconds=8)
< expires - datetime.datetime.utcnow()
< datetime.timedelta(seconds=12))
self.assertEqual('fizzledy', response.headers['quux'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,804,103,411,489,840,000 | 35.738776 | 79 | 0.631263 | false |
OmnesRes/pan_cancer | paper/figures/figure_1/bar_graphs/CESC.py | 1 | 1883 | ##script for creating a histogram
## Load necessary modules
import pylab as plt
import numpy as np
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
pvalues=map(float,pvalues)
##decide how man bins, 100 is the maximum possible due to only having two sig figs
number=100.0
counts={}
##use a dictionary to populate the bins
for i in range(int(number)):
for j in pvalues:
if i/number<j<=(i+1)/number:
counts[i]=counts.get(i,0)+1
##convert the dictionary to a list
mylist=zip(counts.keys(),counts.values())
##sort the list so that the bins are in order
mylist.sort()
##plot the data with pylab
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(bottom=.2)
ax.bar([i[0]/number for i in mylist],[i[1] for i in mylist],color='b',width=1/number,linewidth=2.0)
ax.set_xlim((0,1))
for item in ax.get_yticklabels():
item.set_fontsize(30)
for item in ax.get_xticklabels():
item.set_fontsize(30)
ax.tick_params(axis='x',length=15,width=3,direction='out',labelsize=30)
ax.tick_params(axis='y',length=15,width=3,direction='out',labelsize=30)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(3)
ax.spines['bottom'].set_linewidth(3)
ax.spines['bottom'].set_position(['outward',10])
ax.spines['left'].set_position(['outward',10])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xticks([i/10.0 for i in range(0,11)])
ax.set_xticklabels(['0']+[str(i/10.0) for i in range(1,11)])
ax.set_ylabel('Frequency',fontsize=60,labelpad=20)
ax.set_xlabel('Raw Cox P-value',fontsize=60,labelpad=20)
plt.show()
| mit | -5,518,581,456,476,088,000 | 29.868852 | 104 | 0.707913 | false |
jbzdarkid/HearthstonePro | Cards.py | 1 | 13333 | '''
Special:
"Anub'ar Ambusher"
"Blood Warriors"
"Burgly Bully"
"Captain's Parrot"
"Chromaggus"
"Echo of Mediv"
"Ethereal Peddler"
"Flame Leviathan"
"Getaway Kodo"
"Gnomish Experimenter"
"Headcrack"
"Holy Wrath"
"Ivory Knight"
"Kazakus"
"King's Elekk"
"Krul the Unshackled"
"Lock and Load"
"Lorewalker Cho"
"Sea Reaver"
"Shadowfiend"
"Small-Time Recruits"
"Thistle Tea"
"Tinkertown Technician"
"Trade Prince Gallywix"
"Vanish"
"Wilfred Fizzlebang"
"Wrathion"
'''
# Deathrattle: "Voidcaller", "The Skeleton Knight"
# Discard: "Succubus", "Darkshire Librarian", "Astral Communion", "Dark Bargain", "Deathwing"
# Buff: "Smuggler's Crate", "Hidden Cache", "Trogg Beastrager", "Grimscale Chum", "Grimestreet Outfitter", "Grimestreet Enforcer", "Grimestreet Gadgeteer", "Stolen Goods", "Grimestreet Pawnbroker", "Brass Knuckles", "Hobart Grapplehammer", "Grimestreet Smuggler", "Don Han'Cho"
# Within this file, I've separated out names of cards in "double quotes", so that I can search for them via splitter.py.
# It also means there won't be any \'s in card names.
import logging
import Hand, Utilities, Legendaries
# When a card hits the board, and we can see what its name is
def play2(entity):
if entity['player'] == Utilities.them:
if entity['name'] in ['Armor Up!', 'Ballista Shot', 'Dagger Mastery', 'DIE, INSECT!', 'Dire Shapeshift', 'INFERNO!', 'Life Tap', 'Poisoned Daggers', 'Reinforce', 'Shapeshift', 'Soul Tap', 'Steady Shot', 'Tank Up!', 'The Silver Hand', 'The Tidal Hand', 'Totemic Call', 'Totemic Slam']:
logging.info('Opponent uses their hero power')
else:
logging.info('Opponent plays %s' % entity['name'])
if entity['name'] in ["Crackle", "Dunemaul Shaman", "Finders Keepers", "Fireguard Destroyer", "Jinyu Waterspeaker", "Lightning Bolt", "Siltfin Spiritwalker", "Stormforged Axe", "Stormcrack", "Totem Golem"]:
Utilities.overload += 1
elif entity['name'] in ["Ancestral Knowledge", "Doomhammer", "Dust Devil", "Feral Spirit", "Flamewreathed Faceless", "Forked Lightning", "Lava Burst", "Lightning Storm"]:
Utilities.overload += 2
elif entity['name'] in ["Earth Elemental", "Neptulon"]:
Utilities.overload += 3
elif entity['name'] in ["Elemental Destruction"]:
Utilities.overload += 5
elif entity['name'] in ["Eternal Sentinel", "Lava Shock"]:
Utilities.overload = 0
elif entity['name'] in ["Astral Communion", "Dark Bargain", "Darkshire Librarian", "Deathwing", "Doomguard", "Soulfire", "Succubus"]:
global showentity
showentity = discard
elif entity['name'] == "Varian Wrynn":
Legendaries.varianWrynn = True
elif entity['name'] == "A Light in the Darkness":
Hand.draw(source='random', kind='minion', buff=+1)
elif entity['name'] == "Arch-Thief Rafaam":
Hand.draw(note='A powerful artifact', kind='spell')
elif entity['name'] == "Babbling Book":
Hand.draw(source='random', hero='mage', kind='spell')
elif entity['name'] == "Burgle":
Hand.draw(source='random', hero=Utilities.our_hero)
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Cabalist's Tomb":
Hand.draw(source='random', hero='mage', kind='spell')
Hand.draw(source='random', hero='mage', kind='spell')
Hand.draw(source='random', hero='mage', kind='spell')
elif entity['name'] == "Dark Peddler":
Hand.draw(source='discovered', note='A 1-cost card')
elif entity['name'] == "Ethereal Conjurer":
Hand.draw(source='discovered', hero='mage', kind='spell')
elif entity['name'] == "Finders Keepers":
Hand.draw(source='discovered', hero='shaman', note='A card with overload')
elif entity['name'] == "Gorillabot A-3":
Hand.draw(source='discovered', kind='mech minion')
elif entity['name'] == "Grand Crusader":
Hand.draw(source='random', hero='paladin')
elif entity['name'] == "Grimestreet Informant":
Hand.draw(source='discovered', hero='hunter, paladin, or warrior')
elif entity['name'] == "I Know a Guy":
Hand.draw(source='discovered', kind='taunt minion')
elif entity['name'] == "Jeweled Scarab":
Hand.draw(source='discovered', note='A 3-cost card')
elif entity['name'] == "Journey Below":
Hand.draw(source='discovered', note='A deathrattle card')
elif entity['name'] == "Kabal Chemist":
Hand.draw(source='random', kind='potion spell')
elif entity['name'] == "Kabal Courier":
Hand.draw(source='discovered', hero='mage, priest, or warlock')
elif entity['name'] == "Lotus Agents":
Hand.draw(source='discovered', hero='druid, rogue, or shaman')
elif entity['name'] == "Mind Vision":
Hand.draw(note='A card from your hand')
elif entity['name'] == "Mukla, Tyrant of the Vale":
Hand.draw(note='Banana', kind='spell')
Hand.draw(note='Banana', kind='spell')
elif entity['name'] == "Museum Curator":
# I'm ignoring "Tentacles For Arms" because it's bad
Hand.draw(source='discovered', note='A deathrattle card', kind='minion')
elif entity['name'] == "Nefarian":
Hand.draw(source='random', hero=Utilities.our_hero)
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Neptulon":
Hand.draw(source='random', kind='murloc minion')
Hand.draw(source='random', kind='murloc minion')
Hand.draw(source='random', kind='murloc minion')
Hand.draw(source='random', kind='murloc minion')
elif entity['name'] == "Raven Idol":
Hand.draw(source='discovered', kind='minion or spell')
elif entity['name'] == "Sense Demons":
Hand.draw(kind='demon minion')
Hand.draw(kind='demon minion')
elif entity['name'] == "Swashburglar":
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Thoughtsteal":
Hand.draw(note='A random card from your deck')
Hand.draw(note='A random card from your deck')
elif entity['name'] == "Tomb Spider":
Hand.draw(source='discovered', kind='beast minion')
elif entity['name'] == "Toshley":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Unstable Portal":
Hand.draw(source='random', kind='minion', cost=-3)
elif entity['name'] == "Wild Growth":
if Utilities.resources == '10':
Hand.draw(note='Excess Mana', hero='druid', kind='spell')
elif entity['name'] == "Xaril, Poisoned Mind":
Hand.draw(source='random', kind='toxin spell')
elif entity['name'] == "Call Pet":
Hand.notes.append('If it\'s a beast, cost -4')
elif entity['name'] == "Far Sight":
Hand.notes.append('Costs (3) less')
elif entity['player'] == Utilities.us:
if entity['name'] == "King Mukla":
Hand.draw(kind='Banana')
Hand.draw(kind='Banana')
elif entity['name'] == "Mulch":
Hand.draw(source='random', kind='minion')
# if entity['player'] in [Utilities.us, Utilities.them]:
if entity['name'] == "Elite Tauren Chieftain":
Hand.draw(kind='Power Chord spell')
elif entity['name'] == "Lord Jaraxxus":
Utilities.set_hero(entity)
elif entity['name'] == "Spellslinger":
Hand.draw(source='random', kind='spell')
# When a card hits the board and we can see what its name and its target's name is.
def play3(entity, target):
if entity['player'] == Utilities.them:
if entity['name'] in ['Fireblast', 'Fireblast Rank 2', 'Lesser Heal', 'Lightning Jolt', 'Mind Shatter', 'Mind Spike', 'Heal']:
logging.info('Opponent uses their hero power, targetting %s' % target['name'])
else:
logging.info('Opponent plays %s targetting %s' % (entity['name'], target['name']))
if entity['name'] == "Soulfire":
global showentity
showentity = discard
if entity['name'] in ["Ancient Brewmaster", "Convert", "Gadgetzan Ferryman", "Time Rewinder", "Youthful Brewmaster"]:
Hand.draw(note=target['name'], kind='minion')
elif entity['name'] in ["Bloodthistle Toxin", "Shadowstep"]:
Hand.draw(note=target['name'], kind='minion', cost=-2)
elif entity['name'] == "Convert":
Hand.draw(note=target['name'], kind='minion')
elif entity['name'] == "Shadowcaster":
Hand.draw(note='A 1/1 copy of %s which costs (1)' % target['name'], kind='minion')
elif entity['player'] == Utilities.us:
if entity['name'] == "Freezing Trap":
Hand.draw(note=target['name'], kind='minion', cost=+2)
elif entity['name'] == "Sap":
Hand.draw(note=target['name'], kind='minion')
if target['player'] == Utilities.them:
if entity['name'] in ["Dream", "Kindapper"]:
Hand.draw(note=target['name'], kind='minion')
def die(entity):
if entity['player'] == Utilities.them:
logging.info('Opponent\'s %s dies' % entity['name'])
if entity['name'] == "Anub'arak":
Hand.draw(note='Anub\'arak')
elif entity['name'] == "Clockwork Gnome":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Deadly Fork":
Hand.draw(note='Sharp Fork', kind='weapon')
elif entity['name'] == "Rhonin":
Hand.draw(note='Arcane Missles', hero='mage', kind='spell')
Hand.draw(note='Arcane Missles', hero='mage', kind='spell')
Hand.draw(note='Arcane Missles', hero='mage', kind='spell')
elif entity['name'] == "Shifting Shade":
Hand.draw(note='A card from your deck')
elif entity['name'] == "Tentacles for Arms":
Hand.draw(note='Tentacles for Arms')
elif entity['name'] == "Tomb Pillager":
Hand.draw(note='The Coin', kind='spell')
elif entity['name'] == "Toshley":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Undercity Huckster":
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Xaril, Poisoned Mind":
Hand.draw(source='random', kind='toxin spell')
elif entity['name'] == "Webspinner":
Hand.draw(source='random', kind='beast minion')
# if entity['player'] in [Utilities.us, Utilities.them]:
if entity['name'] == "Mechanical Yeti":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Majordomo Executus":
Utilities.set_hero(entity)
def die2(entity):
if entity['player'] == Utilities.them:
if entity['name'] == "Explorer's Hat":
Hand.draw(note='Explorer\'s Hat', hero='Hunter', kind='spell')
elif entity['name'] == "Nerubian Spores": # "Infest"
Hand.draw(source='random', kind='beast minion')
# Be careful of Blessing of Wisdom (others?) which can 'trigger' an effect on a card that already has a triggered effect.
def trigger(entity):
if entity['player'] == Utilities.them:
logging.info('Opponent\'s %s triggers' % entity['name'])
if entity['name'] == "Alarm-o-Bot":
Hand.draw(note='Alarm-o-Bot', kind='minion')
elif entity['name'] == "Archmage Antonidas":
Hand.draw(note='Fireball', hero='mage', kind='spell')
elif entity['name'] == "Colliseum Manager":
Hand.draw(note='Colliseum Manager', kind='minion')
elif entity['name'] == "Cutpurse":
Hand.draw(note='The Coin', kind='spell')
elif entity['name'] == "Emperor Thaurissan":
for card in Hand.hand:
card.cost -= 1
elif entity['name'] == "Gazlowe":
Hand.draw(source='random', kind='mech minion')
elif entity['name'] == "Kabal Trafficker":
Hand.draw(source='random', kind='demon minion')
elif entity['name'] == "Mech-Bear-Cat":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Nexus-Champion Saraad":
Hand.draw(source='random', kind='spell')
elif entity['name'] == "Recruiter":
Hand.draw(note='Squire', kind='minion')
elif entity['name'] == "Shaku, the Collector":
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Ysera":
Hand.draw(note='A Dream card', kind='spell')
# Show Entity blocks are used for a number of things. Here, this is used for
# getting the hand position of discarded cards, and determining cards drawn for
# King's Elekk Joust victories.
def blockEnd():
global showentity
def showentity(data):
pass
blockEnd()
def discard(data):
logging.info('Opponent discards %s' % data['CardID'])
Hand.hand.pop(int(data['Entity']['zonePos'])-1)
def turnover():
if Utilities.overload != 0:
logging.info('Overload next turn: %d' % Utilities.overload)
Utilities.overload = 0
| apache-2.0 | -6,323,426,377,481,312,000 | 48.199262 | 292 | 0.59769 | false |
lliendo/Radar | radar/network/monitor/select_monitor.py | 1 | 1307 | # -*- coding: utf-8 -*-
"""
This file is part of Radar.
Radar is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Radar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Lesser GNU General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with Radar. If not, see <http://www.gnu.org/licenses/>.
Copyright 2015 Lucas Liendo.
"""
from . import NetworkMonitor, NetworkMonitorError
class SelectMonitor(NetworkMonitor):
def __new__(cls, *args, **kwargs):
try:
global select
from select import select
except ImportError:
raise NetworkMonitorError(cls.__name__)
return super(SelectMonitor, cls).__new__(cls, *args, **kwargs)
def watch(self):
sockets = [self._server.socket] + [c.socket for c in self._server._clients]
ready_fds, _, _ = select([s.fileno() for s in sockets], [], [], self._timeout)
super(SelectMonitor, self)._watch(ready_fds)
| lgpl-3.0 | 4,419,965,365,318,617,000 | 32.512821 | 86 | 0.694721 | false |
WoLpH/dropbox | dropbox/util.py | 1 | 1940 | import os
class AnalyzeFileObjBug(Exception):
msg = ("\n"
"Expected file object to have %d bytes, instead we read %d bytes.\n"
"File size detection may have failed (see dropbox.util.AnalyzeFileObj)\n")
def __init__(self, expected, actual):
self.expected = expected
self.actual = actual
def __str__(self):
return self.msg % (self.expected, self.actual)
def analyze_file_obj(obj):
''' Get the size and contents of a file-like object.
Returns: (size, raw_data)
size: The amount of data waiting to be read
raw_data: If not None, the entire contents of the stream (as a string).
None if the stream should be read() in chunks.
'''
pos = 0
if hasattr(obj, 'tell'):
pos = obj.tell()
# Handle cStringIO and StringIO
if hasattr(obj, 'getvalue'):
# Why using getvalue() makes sense:
# For StringIO, this string is pre-computed anyway by read().
# For cStringIO, getvalue() is the only way
# to determine the length without read()'ing the whole thing.
raw_data = obj.getvalue()
if pos == 0:
return (len(raw_data), raw_data)
else:
# We could return raw_data[pos:], but that could drastically
# increase memory usage. Better to read it block at a time.
size = max(0, len(raw_data) - pos)
return (size, None)
# Handle real files
if hasattr(obj, 'fileno'):
size = max(0, os.fstat(obj.fileno()).st_size - pos)
return (size, None)
# User-defined object with len()
if hasattr(obj, '__len__'):
size = max(0, len(obj) - pos)
return (size, None)
# We don't know what kind of stream this is.
# To determine the size, we must read the whole thing.
raw_data = obj.read()
return (len(raw_data), raw_data)
| mit | 5,704,691,118,987,827,000 | 33.642857 | 89 | 0.57732 | false |
asaolabs/python-lambda | aws_lambda/aws_lambda.py | 1 | 25699 | # -*- coding: utf-8 -*-
from __future__ import print_function
import hashlib
import json
import logging
import os
import sys
import time
from collections import defaultdict
from imp import load_source
from shutil import copy
from shutil import copyfile
from shutil import copystat
from shutil import copytree
from tempfile import mkdtemp
import boto3
import botocore
import yaml
import subprocess
from .helpers import archive
from .helpers import get_environment_variable_value
from .helpers import mkdir
from .helpers import read
from .helpers import timestamp
from .helpers import LambdaContext
ARN_PREFIXES = {
'cn-north-1': 'aws-cn',
'cn-northwest-1': 'aws-cn',
'us-gov-west-1': 'aws-us-gov',
}
log = logging.getLogger(__name__)
def cleanup_old_versions(
src, keep_last_versions,
config_file='config.yaml', profile_name=None,
):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param int keep_last_versions:
The number of recent versions to keep and not delete
"""
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
response = client.list_versions_by_function(
FunctionName=cfg.get('function_name'),
)
versions = response.get('Versions')
if len(response.get('Versions')) < keep_last_versions:
print('Nothing to delete. (Too few versions published)')
else:
version_numbers = [elem.get('Version') for elem in
versions[1:-keep_last_versions]]
for version_number in version_numbers:
try:
client.delete_function(
FunctionName=cfg.get('function_name'),
Qualifier=version_number,
)
except botocore.exceptions.ClientError as e:
print('Skipping Version {}: {}'
.format(version_number, e.message))
def deploy(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
preserve_vpc=False
):
"""Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src, config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc)
else:
create_function(cfg, path_to_zip_file)
def deploy_s3(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
preserve_vpc=False
):
"""Deploys a new function via AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src, config_file=config_file, requirements=requirements,
local_package=local_package,
)
use_s3 = True
s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
existing_config = get_function_config(cfg)
if existing_config:
update_function(cfg, path_to_zip_file, existing_config, use_s3=use_s3,
s3_file=s3_file, preserve_vpc=preserve_vpc)
else:
create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file)
def upload(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
):
"""Uploads a new function to AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src, config_file=config_file, requirements=requirements,
local_package=local_package,
)
upload_s3(cfg, path_to_zip_file)
def invoke(
src, event_file='event.json',
config_file='config.yaml', profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ['AWS_PROFILE'] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get('environment_variables')
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get('handler')
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get('timeout')
if timeout:
context = LambdaContext(cfg.get('function_name'),timeout)
else:
context = LambdaContext(cfg.get('function_name'))
start = time.time()
results = fn(event, context)
end = time.time()
print('{0}'.format(results))
if verbose:
print('\nexecution time: {:.8f}s\nfunction execution '
'timeout: {:2}s'.format(end - start, cfg.get('timeout', 15)))
def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'project_templates',
)
for filename in os.listdir(templates_path):
if (minimal and filename == 'event.json') or filename.endswith('.pyc'):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src)
def build(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
):
"""Builds the file bundle.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Get the absolute path to the output directory and create it if it doesn't
# already exist.
dist_directory = cfg.get('dist_directory', 'dist')
path_to_dist = os.path.join(src, dist_directory)
mkdir(path_to_dist)
# Combine the name of the Lambda function with the current timestamp to use
# for the output filename.
function_name = cfg.get('function_name')
output_filename = '{0}-{1}.zip'.format(timestamp(), function_name)
path_to_temp = mkdtemp(prefix='aws-lambda')
pip_install_to_target(
path_to_temp,
requirements=requirements,
local_package=local_package,
)
# Hack for Zope.
if 'zope' in os.listdir(path_to_temp):
print(
'Zope packages detected; fixing Zope package paths to '
'make them importable.',
)
# Touch.
with open(os.path.join(path_to_temp, 'zope/__init__.py'), 'wb'):
pass
# Gracefully handle whether ".zip" was included in the filename or not.
output_filename = (
'{0}.zip'.format(output_filename)
if not output_filename.endswith('.zip')
else output_filename
)
# Allow definition of source code directories we want to build into our
# zipped package.
build_config = defaultdict(**cfg.get('build', {}))
build_source_directories = build_config.get('source_directories', '')
build_source_directories = (
build_source_directories
if build_source_directories is not None
else ''
)
source_directories = [
d.strip() for d in build_source_directories.split(',')
]
files = []
for filename in os.listdir(src):
if os.path.isfile(filename):
if filename == '.DS_Store':
continue
if filename == config_file:
continue
print('Bundling: %r' % filename)
files.append(os.path.join(src, filename))
elif os.path.isdir(filename) and filename in source_directories:
print('Bundling directory: %r' % filename)
files.append(os.path.join(src, filename))
# "cd" into `temp_path` directory.
os.chdir(path_to_temp)
for f in files:
if os.path.isfile(f):
_, filename = os.path.split(f)
# Copy handler file into root of the packages folder.
copyfile(f, os.path.join(path_to_temp, filename))
copystat(f, os.path.join(path_to_temp, filename))
elif os.path.isdir(f):
destination_folder = os.path.join(path_to_temp, f[len(src) + 1:])
copytree(f, destination_folder)
# Zip them together into a single file.
# TODO: Delete temp directory created once the archive has been compiled.
path_to_zip_file = archive('./', path_to_dist, output_filename)
return path_to_zip_file
def get_callable_handler_function(src, handler):
"""Tranlate a string of the form "module.function" into a callable
function.
:param str src:
The path to your Lambda project containing a valid handler file.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
# "cd" into `src` directory.
os.chdir(src)
module_name, function_name = handler.split('.')
filename = get_handler_filename(handler)
path_to_module_file = os.path.join(src, filename)
module = load_source(module_name, path_to_module_file)
return getattr(module, function_name)
def get_handler_filename(handler):
"""Shortcut to get the filename from the handler string.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
module_name, _ = handler.split('.')
return '{0}.py'.format(module_name)
def _install_packages(path, packages):
"""Install all packages listed to the target directory.
Ignores any package that includes Python itself and python-lambda as well
since its only needed for deploying and not running the code
:param str path:
Path to copy installed pip packages to.
:param list packages:
A list of packages to be installed via pip.
"""
def _filter_blacklist(package):
blacklist = ['-i', '#', 'Python==', 'python-lambda==']
return all(package.startswith(entry) is False for entry in blacklist)
filtered_packages = filter(_filter_blacklist, packages)
for package in filtered_packages:
if package.startswith('-e '):
package = package.replace('-e ', '')
print('Installing {package}'.format(package=package))
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package, '-t', path, '--ignore-installed'])
print ('Install directory contents are now: {directory}'.format(directory=os.listdir(path)))
def pip_install_to_target(path, requirements=None, local_package=None):
"""For a given active virtualenv, gather all installed pip packages then
copy (re-install) them to the path provided.
:param str path:
Path to copy installed pip packages to.
:param str requirements:
If set, only the packages in the supplied requirements file are
installed.
If not set then installs all packages found via pip freeze.
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
packages = []
if not requirements:
print('Gathering pip packages')
pkgStr = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
packages.extend(pkgStr.decode('utf-8').splitlines())
else:
if os.path.exists(requirements):
print('Gathering requirement packages')
data = read(requirements)
packages.extend(data.splitlines())
if not packages:
print('No dependency packages installed!')
if local_package is not None:
if not isinstance(local_package, (list, tuple)):
local_package = [local_package]
for l_package in local_package:
packages.append(l_package)
_install_packages(path, packages)
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, 'aws')
return 'arn:{0}:iam::{1}:role/{2}'.format(prefix, account_id, role)
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key,
region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
'sts', profile_name, aws_access_key_id, aws_secret_access_key,
region,
)
return client.get_caller_identity().get('Account')
def get_client(
client, profile_name, aws_access_key_id, aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client)
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print('Creating your new Lambda function')
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
account_id = get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, cfg.get(
'region',
),
)
role = get_role_name(
cfg.get('region'), account_id,
cfg.get('role', 'lambda_basic_execution'),
)
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
# Do we prefer development variable over config?
buck_name = (
os.environ.get('S3_BUCKET_NAME') or cfg.get('bucket_name')
)
func_name = (
os.environ.get('LAMBDA_FUNCTION_NAME') or cfg.get('function_name')
)
print('Creating lambda function with name: {}'.format(func_name))
if use_s3:
kwargs = {
'FunctionName': func_name,
'Runtime': cfg.get('runtime', 'python2.7'),
'Role': role,
'Handler': cfg.get('handler'),
'Code': {
'S3Bucket': '{}'.format(buck_name),
'S3Key': '{}'.format(s3_file),
},
'Description': cfg.get('description', ''),
'Timeout': cfg.get('timeout', 15),
'MemorySize': cfg.get('memory_size', 512),
'VpcConfig': {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
},
'Publish': True,
}
else:
kwargs = {
'FunctionName': func_name,
'Runtime': cfg.get('runtime', 'python2.7'),
'Role': role,
'Handler': cfg.get('handler'),
'Code': {'ZipFile': byte_stream},
'Description': cfg.get('description', ''),
'Timeout': cfg.get('timeout', 15),
'MemorySize': cfg.get('memory_size', 512),
'VpcConfig': {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
},
'Publish': True,
}
if 'tags' in cfg:
kwargs.update(
Tags={
key: str(value)
for key, value in cfg.get('tags').items()
}
)
if 'environment_variables' in cfg:
kwargs.update(
Environment={
'Variables': {
key: get_environment_variable_value(value)
for key, value
in cfg.get('environment_variables').items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(FunctionName=func_name, ReservedConcurrentExecutions=concurrency)
def update_function(
cfg, path_to_zip_file, existing_cfg, use_s3=False, s3_file=None, preserve_vpc=False
):
"""Updates the code of an existing Lambda function"""
print('Updating your Lambda function')
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
account_id = get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, cfg.get(
'region',
),
)
role = get_role_name(
cfg.get('region'), account_id,
cfg.get('role', 'lambda_basic_execution'),
)
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
# Do we prefer development variable over config?
buck_name = (
os.environ.get('S3_BUCKET_NAME') or cfg.get('bucket_name')
)
if use_s3:
client.update_function_code(
FunctionName=cfg.get('function_name'),
S3Bucket='{}'.format(buck_name),
S3Key='{}'.format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get('function_name'),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
'FunctionName': cfg.get('function_name'),
'Role': role,
'Runtime': cfg.get('runtime'),
'Handler': cfg.get('handler'),
'Description': cfg.get('description', ''),
'Timeout': cfg.get('timeout', 15),
'MemorySize': cfg.get('memory_size', 512),
}
if preserve_vpc:
kwargs['VpcConfig'] = existing_cfg.get('Configuration', {}).get('VpcConfig')
if kwargs['VpcConfig'] is None:
kwargs['VpcConfig'] = {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
}
else:
del kwargs['VpcConfig']['VpcId']
else:
kwargs['VpcConfig'] = {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
}
if 'environment_variables' in cfg:
kwargs.update(
Environment={
'Variables': {
key: str(get_environment_variable_value(value))
for key, value
in cfg.get('environment_variables').items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(FunctionName=cfg.get('function_name'), ReservedConcurrentExecutions=concurrency)
elif 'Concurrency' in existing_cfg:
client.delete_function_concurrency(FunctionName=cfg.get('function_name'))
if 'tags' in cfg:
tags = {
key: str(value)
for key, value in cfg.get('tags').items()
}
if tags != existing_cfg.get('Tags'):
if existing_cfg.get('Tags'):
client.untag_resource(Resource=ret['FunctionArn'],
TagKeys=list(existing_cfg['Tags'].keys()))
client.tag_resource(Resource=ret['FunctionArn'], Tags=tags)
def upload_s3(cfg, path_to_zip_file, *use_s3):
"""Upload a function to AWS S3."""
print('Uploading your new Lambda function')
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
client = get_client(
's3', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
byte_stream = b''
with open(path_to_zip_file, mode='rb') as fh:
byte_stream = fh.read()
s3_key_prefix = cfg.get('s3_key_prefix', '/dist')
checksum = hashlib.new('md5', byte_stream).hexdigest()
timestamp = str(time.time())
filename = '{prefix}{checksum}-{ts}.zip'.format(
prefix=s3_key_prefix, checksum=checksum, ts=timestamp,
)
# Do we prefer development variable over config?
buck_name = (
os.environ.get('S3_BUCKET_NAME') or cfg.get('bucket_name')
)
func_name = (
os.environ.get('LAMBDA_FUNCTION_NAME') or cfg.get('function_name')
)
kwargs = {
'Bucket': '{}'.format(buck_name),
'Key': '{}'.format(filename),
'Body': byte_stream,
}
client.put_object(**kwargs)
print('Finished uploading {} to S3 bucket {}'.format(func_name, buck_name))
if use_s3:
return filename
def get_function_config(cfg):
"""Check whether a function exists or not and return its config"""
function_name = cfg.get('function_name')
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
try:
return client.get_function(FunctionName=function_name)
except client.exceptions.ResourceNotFoundException as e:
if 'Function not found' in str(e):
return False
def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get('concurrency', 0))
return max(0, concurrency)
def read_cfg(path_to_config_file, profile_name):
cfg = read(path_to_config_file, loader=yaml.load)
if profile_name is not None:
cfg['profile'] = profile_name
elif 'AWS_PROFILE' in os.environ:
cfg['profile'] = os.environ['AWS_PROFILE']
return cfg
| isc | 8,724,808,778,220,268,000 | 32.814474 | 120 | 0.613643 | false |
Rhoana/rh_aligner | old/filter_tiles.py | 1 | 2329 | # Takes a json file that contains many tiles with their bounding boxes (Tile-Spec format)
# and a bounding box, and outputs a json file for each tile that is overlapping with the bounding box
import sys
import os
import argparse
import json
from bounding_box import BoundingBox
# common functions
def load_tiles(tiles_spec_fname, bbox):
relevant_tiles = []
with open(tiles_spec_fname, 'r') as data_file:
data = json.load(data_file)
for tile in data:
tile_bbox = BoundingBox.fromList(tile['bbox'])
if bbox.overlap(tile_bbox):
relevant_tiles.append(tile)
return relevant_tiles
def filter_tiles(tiles_fname, out_fname, bbox):
# parse the bounding box arguments
bbox = BoundingBox.fromStr(bbox)
# load all tiles from the tile-spec json file that are relevant to our bounding box
relevant_tiles = load_tiles(tiles_fname, bbox)
# Create a tile-spec file that includes all relevant tiles
with open(out_fname, 'w') as outfile:
json.dump(relevant_tiles, outfile, sort_keys=True, indent=4)
def main():
# Command line parser
parser = argparse.ArgumentParser(description='Takes a json file that contains many tiles with their bounding boxes (Tile-Spec format)\
and a bounding box, and outputs a json file for each tile that is overlapping with the bounding box')
parser.add_argument('tiles_fname', metavar='tiles_json', type=str,
help='a tile_spec file that contains all the images to be aligned in json format')
parser.add_argument('-o', '--output_file', type=str,
help='an output tile_spec file, that will include only the relevant tiles (default: ./filtered.json)',
default='./filtered.json')
# the default bounding box is as big as the image can be
parser.add_argument('-b', '--bounding_box', type=str,
help='the bounding box of the part of image that needs to be aligned format: "from_x to_x from_y to_y" (default: all tiles)',
default='{0} {1} {2} {3}'.format((-sys.maxint - 1), sys.maxint, (-sys.maxint - 1), sys.maxint))
args = parser.parse_args()
#print args
filter_tiles(args.tiles_fname, args.output_file, args.bounding_box)
if __name__ == '__main__':
main()
| mit | 8,947,238,689,689,303,000 | 38.474576 | 149 | 0.660799 | false |
donovan-duplessis/pwnurl | docs/conf.py | 1 | 8358 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pwnurl
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pwnurl'
copyright = u'2014, Donovan du Plessis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pwnurl.__version__
# The full version, including alpha/beta/rc tags.
release = pwnurl.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pwnurldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pwnurl.tex', u'Pwnurl Documentation',
u'Donovan du Plessis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pwnurl', u'Pwnurl Documentation',
[u'Donovan du Plessis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pwnurl', u'Pwnurl Documentation',
u'Donovan du Plessis', 'pwnurl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | mit | 6,714,855,766,473,701,000 | 31.15 | 80 | 0.707107 | false |
dunkhong/grr | grr/server/setup.py | 1 | 7769 | #!/usr/bin/env python
"""This is the setup.py file for the GRR client.
This is just a meta-package which pulls in the minimal requirements to create a
full grr server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
import os
import shutil
import subprocess
import sys
from setuptools import find_packages
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.sdist import sdist
GRR_NO_MAKE_UI_FILES_VAR = "GRR_NO_MAKE_UI_FILES"
# TODO: Fix this import once support for Python 2 is dropped.
# pylint: disable=g-import-not-at-top
if sys.version_info.major == 2:
import ConfigParser as configparser
else:
import configparser
# pylint: enable=g-import-not-at-top
def find_data_files(source, ignore_dirs=None):
ignore_dirs = ignore_dirs or []
result = []
for directory, dirnames, files in os.walk(source):
dirnames[:] = [d for d in dirnames if d not in ignore_dirs]
files = [os.path.join(directory, x) for x in files]
result.append((directory, files))
return result
def make_ui_files():
"""Builds necessary assets from sources."""
# Install node_modules, but keep package(-lock).json frozen.
# Using shell=True, otherwise npm is not found in a nodeenv-built
# virtualenv on Windows.
subprocess.check_call(
"npm ci", shell=True, cwd="grr_response_server/gui/static")
subprocess.check_call(
"npm run gulp compile", shell=True, cwd="grr_response_server/gui/static")
def get_config():
"""Get INI parser with version.ini data."""
ini_path = os.path.join(THIS_DIRECTORY, "version.ini")
if not os.path.exists(ini_path):
ini_path = os.path.join(THIS_DIRECTORY, "../../version.ini")
if not os.path.exists(ini_path):
raise RuntimeError("Couldn't find version.ini")
config = configparser.SafeConfigParser()
config.read(ini_path)
return config
IGNORE_GUI_DIRS = ["node_modules", "tmp"]
THIS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
# If you run setup.py from the root GRR dir you get very different results since
# setuptools uses the MANIFEST.in from the root dir. Make sure we are in the
# package dir.
os.chdir(THIS_DIRECTORY)
VERSION = get_config()
class Develop(develop):
"""Build developer version (pip install -e)."""
user_options = develop.user_options + [
# TODO: This has to be `bytes` on Python 2. Remove this `str`
# call once support for Python 2 is dropped.
(str("no-make-ui-files"), None, "Don't build UI JS/CSS bundles."),
]
def initialize_options(self):
self.no_make_ui_files = None
develop.initialize_options(self)
def run(self):
# pip install -e . --install-option="--no-make-ui-files" passes the
# --no-make-ui-files flag to all GRR dependencies, which doesn't make
# much sense. Checking an environment variable to have an easy way
# to set the flag for grr-response-server package only.
if (not self.no_make_ui_files and
not os.environ.get(GRR_NO_MAKE_UI_FILES_VAR)):
make_ui_files()
develop.run(self)
class Sdist(sdist):
"""Build sdist."""
user_options = sdist.user_options + [
# TODO: This has to be `bytes` on Python 2. Remove this `str`
# call once support for Python 2 is dropped.
(str("no-make-ui-files"), None, "Don't build UI JS/CSS bundles."),
]
def initialize_options(self):
self.no_make_ui_files = None
sdist.initialize_options(self)
def run(self):
# For consistency, respsecting GRR_NO_MAKE_UI_FILES variable just like
# Develop command does.
if (not self.no_make_ui_files and
not os.environ.get(GRR_NO_MAKE_UI_FILES_VAR)):
make_ui_files()
sdist.run(self)
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
sdist_version_ini = os.path.join(base_dir, "version.ini")
if os.path.exists(sdist_version_ini):
os.unlink(sdist_version_ini)
shutil.copy(
os.path.join(THIS_DIRECTORY, "../../version.ini"), sdist_version_ini)
data_files = list(
itertools.chain(
find_data_files("grr_response_server/checks"),
find_data_files("grr_response_server/databases/mysql_migrations"),
find_data_files("grr_response_server/gui/templates"),
find_data_files(
"grr_response_server/gui/static", ignore_dirs=IGNORE_GUI_DIRS),
find_data_files(
"grr_response_server/gui/local/static",
ignore_dirs=IGNORE_GUI_DIRS),
# TODO: This has to be `bytes` on Python 2. Remove this
# `str` call once support for Python 2 is dropped.
[str("version.ini")],
))
setup_args = dict(
name="grr-response-server",
version=VERSION.get("Version", "packageversion"),
description="The GRR Rapid Response Server.",
license="Apache License, Version 2.0",
maintainer="GRR Development Team",
maintainer_email="[email protected]",
url="https://github.com/google/grr",
cmdclass={
"sdist": Sdist,
"develop": Develop
},
packages=find_packages(),
entry_points={
"console_scripts": [
"grr_console = "
"grr_response_server.distro_entry:Console",
"grr_api_shell_raw_access = "
"grr_response_server.distro_entry:ApiShellRawAccess",
"grr_config_updater = "
"grr_response_server.distro_entry:ConfigUpdater",
"grr_frontend = "
"grr_response_server.distro_entry:GrrFrontend",
"grr_server = "
"grr_response_server.distro_entry:GrrServer",
"grr_worker = "
"grr_response_server.distro_entry:Worker",
"grr_admin_ui = "
"grr_response_server.distro_entry:AdminUI",
]
},
install_requires=[
"google-api-python-client==1.7.11",
"google-auth==1.6.3",
"google-cloud-bigquery==1.20.0",
"grr-api-client==%s" % VERSION.get("Version", "packagedepends"),
"grr-response-client-builder==%s" %
VERSION.get("Version", "packagedepends"),
"grr-response-core==%s" % VERSION.get("Version", "packagedepends"),
"Jinja2==2.10.3",
"pexpect==4.7.0",
"portpicker==1.3.1",
"prometheus_client==0.7.1",
"pyjwt==1.7.1",
"pyopenssl==19.0.0", # https://github.com/google/grr/issues/704
"python-crontab==2.3.9",
"python-debian==0.1.36",
"Werkzeug==0.16.0",
],
extras_require={
# This is an optional component. Install to get MySQL data
# store support: pip install grr-response[mysqldatastore]
# When installing from .deb, the python-mysqldb package is used as
# dependency instead of this pip dependency. This is because we run into
# incompatibilities between the system mysqlclient/mariadbclient and the
# Python library otherwise. Thus, this version has to be equal to the
# python-mysqldb version of the system we support. This is currently
# Ubuntu Xenial, see https://packages.ubuntu.com/xenial/python-mysqldb
#
# NOTE: the Xenial-provided 1.3.7 version is not properly Python 3
# compatible. Versions 1.3.13 or later are API-compatible with 1.3.7
# when running on Python 2 and work correctly on Python 3. However,
# they don't have Python 2 wheels released, which makes GRR packaging
# for Python 2 much harder if one of these versions is used.
#
# TODO(user): Find a way to use the latest mysqlclient version
# in GRR server DEB.
"mysqldatastore": ["mysqlclient==1.3.10"],
},
data_files=data_files)
setup(**setup_args)
| apache-2.0 | 2,326,844,858,767,235,600 | 33.528889 | 80 | 0.65195 | false |
danijar/sets | sets/process/glove.py | 1 | 1218 | from zipfile import ZipFile
import numpy as np
from sets.core import Embedding
class Glove(Embedding):
"""
The pretrained word embeddings from the Standford NLP group computed by the
Glove model. From: http://nlp.stanford.edu/projects/glove/
"""
URL = 'http://nlp.stanford.edu/data/glove.6B.zip'
def __init__(self, size=100, depth=1):
assert size in (50, 100, 300)
words, embeddings = self.disk_cache('data', self._load, size)
super().__init__(words, embeddings, depth)
assert self.shape == (size,)
@classmethod
def _load(cls, size):
filepath = cls.download(cls.URL)
with ZipFile(filepath, 'r') as archive:
filename = 'glove.6B.{}d.txt'.format(size)
with archive.open(filename) as file_:
return cls._parse(file_)
@staticmethod
def _parse(file_):
words = []
embeddings = []
for line in file_:
chunks = line.split()
word = chunks[0].decode('utf-8')
embedding = np.array(chunks[1:]).astype(np.float32)
words.append(word)
embeddings.append(embedding)
return np.array(words), np.array(embeddings)
| mit | 2,540,729,255,029,553,700 | 31.052632 | 79 | 0.591954 | false |
obnam-mirror/obnam | obnamlib/__init__.py | 1 | 5952 | # Copyright (C) 2009-2017 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cliapp
from .version import __version__, __version_info__
from .structurederror import StructuredError
from .structurederror_finder import find_structured_errors
from .obnamerror import ObnamError
from .defaults import (
DEFAULT_NODE_SIZE,
DEFAULT_CHUNK_SIZE,
DEFAULT_UPLOAD_QUEUE_SIZE,
DEFAULT_LRU_SIZE,
DEFAULT_CHUNKIDS_PER_GROUP,
DEFAULT_NAGIOS_WARN_AGE,
DEFAULT_NAGIOS_CRIT_AGE,
DEFAULT_DIR_BAG_BYTES,
DEFAULT_DIR_CACHE_BYTES,
DEFAULT_CHUNK_CACHE_BYTES,
DEFAULT_CHUNK_BAG_BYTES,
IDPATH_DEPTH,
IDPATH_BITS,
IDPATH_SKIP,
MAX_ID,
)
# Import _obnam if it is there. We need to be able to do things without
# it, especially at build time, while we're generating manual pages.
# If _obnam is not there, substitute a dummy that throws an exception
# if used.
try:
import obnamlib._obnam
except ImportError:
class DummyExtension(object):
def __getattr__(self, name):
raise Exception('Trying to use _obnam, but that was not found.')
_obnam = DummyExtension()
from .sizeparse import SizeSyntaxError, UnitNameError, ByteSizeParser
from .encryption import (
generate_symmetric_key,
encrypt_symmetric,
decrypt_symmetric,
get_public_key,
get_public_key_user_ids,
Keyring,
SecretKeyring,
encrypt_with_keyring,
decrypt_with_secret_keys,
SymmetricKeyCache,
EncryptionError,
GpgError)
from .hooks import (
Hook, MissingFilterError, NoFilterTagError, FilterHook, HookManager)
from .pluginbase import ObnamPlugin
from .vfs import (
VirtualFileSystem,
VfsFactory,
VfsTests,
LockFail,
NEW_DIR_MODE,
NEW_FILE_MODE)
from .vfs_local import LocalFS
from .fsck_work_item import WorkItem
from .repo_fs import RepositoryFS
from .lockmgr import LockManager
from .forget_policy import ForgetPolicy
from .app import App, ObnamIOError, ObnamSystemError
from .humanise import humanise_duration, humanise_size, humanise_speed
from .chunkid_token_map import ChunkIdTokenMap
from .pathname_excluder import PathnameExcluder
from .splitpath import split_pathname
from .obj_serialiser import serialise_object, deserialise_object
from .bag import Bag, BagIdNotSetError, make_object_id, parse_object_id
from .bag_store import BagStore, serialise_bag, deserialise_bag
from .blob_store import BlobStore
from .repo_factory import (
RepositoryFactory,
UnknownRepositoryFormat,
UnknownRepositoryFormatWanted)
from .repo_interface import (
RepositoryInterface,
RepositoryInterfaceTests,
RepositoryClientAlreadyExists,
RepositoryClientDoesNotExist,
RepositoryClientListNotLocked,
RepositoryClientListLockingFailed,
RepositoryClientLockingFailed,
RepositoryClientNotLocked,
RepositoryClientKeyNotAllowed,
RepositoryClientGenerationUnfinished,
RepositoryGenerationKeyNotAllowed,
RepositoryGenerationDoesNotExist,
RepositoryClientHasNoGenerations,
RepositoryFileDoesNotExistInGeneration,
RepositoryFileKeyNotAllowed,
RepositoryChunkDoesNotExist,
RepositoryChunkContentNotInIndexes,
RepositoryChunkIndexesNotLocked,
RepositoryChunkIndexesLockingFailed,
repo_key_name,
REPO_CLIENT_TEST_KEY,
REPO_GENERATION_TEST_KEY,
REPO_GENERATION_STARTED,
REPO_GENERATION_ENDED,
REPO_GENERATION_IS_CHECKPOINT,
REPO_GENERATION_FILE_COUNT,
REPO_GENERATION_TOTAL_DATA,
REPO_GENERATION_INTEGER_KEYS,
REPO_FILE_TEST_KEY,
REPO_FILE_MODE,
REPO_FILE_MTIME_SEC,
REPO_FILE_MTIME_NSEC,
REPO_FILE_ATIME_SEC,
REPO_FILE_ATIME_NSEC,
REPO_FILE_NLINK,
REPO_FILE_SIZE,
REPO_FILE_UID,
REPO_FILE_USERNAME,
REPO_FILE_GID,
REPO_FILE_GROUPNAME,
REPO_FILE_SYMLINK_TARGET,
REPO_FILE_XATTR_BLOB,
REPO_FILE_BLOCKS,
REPO_FILE_DEV,
REPO_FILE_INO,
REPO_FILE_MD5,
REPO_FILE_SHA224,
REPO_FILE_SHA256,
REPO_FILE_SHA384,
REPO_FILE_SHA512,
REPO_FILE_INTEGER_KEYS,
metadata_file_key_mapping)
from .checksummer import (
checksum_algorithms,
get_checksum_algorithm,
get_checksum_algorithm_name,
get_checksum_algorithm_key,
)
from .whole_file_checksummer import WholeFileCheckSummer
from .delegator import RepositoryDelegator, GenerationId
from .backup_progress import BackupProgress
#
# Repository format green-albatross specific modules.
#
from .fmt_ga import (
GREEN_ALBATROSS_VERSION,
RepositoryFormatGA,
GAClientList,
GAClient,
GADirectory,
GAImmutableError,
create_gadirectory_from_dict,
GATree,
GAChunkStore,
GAChunkIndexes,
InMemoryLeafStore,
LeafStore,
CowLeaf,
CowTree,
LeafList,
CowDelta,
removed_key,
)
#
# Repository format 6 specific modules.
#
from .metadata import (
Metadata,
read_metadata,
set_metadata,
SetMetadataError,
metadata_fields)
from .fmt_6.repo_fmt_6 import RepositoryFormat6
from .fmt_6.repo_tree import RepositoryTree
from .fmt_6.chunklist import ChunkList
from .fmt_6.clientlist import ClientList
from .fmt_6.checksumtree import ChecksumTree
from .fmt_6.clientmetadatatree import ClientMetadataTree
option_group = {
'perf': 'Performance tweaking',
'devel': 'Development of Obnam itself',
}
__all__ = locals()
| gpl-3.0 | -1,614,306,495,632,513,000 | 25.690583 | 76 | 0.741263 | false |
sorenh/cc | nova/tests/network_unittest.py | 1 | 5207 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 Anso Labs, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import unittest
from nova import vendor
import IPy
from nova import flags
from nova import test
from nova.compute import network
from nova.auth import users
from nova import utils
class NetworkTestCase(test.TrialTestCase):
def setUp(self):
super(NetworkTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_storage=True,
fake_network=True,
network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = users.UserManager.instance()
try:
self.manager.create_user('netuser', 'netuser', 'netuser')
except: pass
for i in range(0, 6):
name = 'project%s' % i
if not self.manager.get_project(name):
self.manager.create_project(name, 'netuser', name)
self.network = network.PublicNetworkController()
def tearDown(self):
super(NetworkTestCase, self).tearDown()
for i in range(0, 6):
name = 'project%s' % i
self.manager.delete_project(name)
self.manager.delete_user('netuser')
def test_public_network_allocation(self):
pubnet = IPy.IP(flags.FLAGS.public_range)
address = self.network.allocate_ip("netuser", "project0", "public")
self.assertTrue(IPy.IP(address) in pubnet)
self.assertTrue(IPy.IP(address) in self.network.network)
def test_allocate_deallocate_ip(self):
address = network.allocate_ip(
"netuser", "project0", utils.generate_mac())
logging.debug("Was allocated %s" % (address))
self.assertEqual(True, address in self._get_project_addresses("project0"))
rv = network.deallocate_ip(address)
self.assertEqual(False, address in self._get_project_addresses("project0"))
def test_range_allocation(self):
address = network.allocate_ip(
"netuser", "project0", utils.generate_mac())
secondaddress = network.allocate_ip(
"netuser", "project1", utils.generate_mac())
self.assertEqual(True,
address in self._get_project_addresses("project0"))
self.assertEqual(True,
secondaddress in self._get_project_addresses("project1"))
self.assertEqual(False, address in self._get_project_addresses("project1"))
rv = network.deallocate_ip(address)
self.assertEqual(False, address in self._get_project_addresses("project0"))
rv = network.deallocate_ip(secondaddress)
self.assertEqual(False,
secondaddress in self._get_project_addresses("project1"))
def test_subnet_edge(self):
secondaddress = network.allocate_ip("netuser", "project0",
utils.generate_mac())
for project in range(1,5):
project_id = "project%s" % (project)
address = network.allocate_ip(
"netuser", project_id, utils.generate_mac())
address2 = network.allocate_ip(
"netuser", project_id, utils.generate_mac())
address3 = network.allocate_ip(
"netuser", project_id, utils.generate_mac())
self.assertEqual(False,
address in self._get_project_addresses("project0"))
self.assertEqual(False,
address2 in self._get_project_addresses("project0"))
self.assertEqual(False,
address3 in self._get_project_addresses("project0"))
rv = network.deallocate_ip(address)
rv = network.deallocate_ip(address2)
rv = network.deallocate_ip(address3)
rv = network.deallocate_ip(secondaddress)
def test_too_many_projects(self):
for i in range(0, 30):
name = 'toomany-project%s' % i
self.manager.create_project(name, 'netuser', name)
address = network.allocate_ip(
"netuser", name, utils.generate_mac())
rv = network.deallocate_ip(address)
self.manager.delete_project(name)
def _get_project_addresses(self, project_id):
project_addresses = []
for addr in network.get_project_network(project_id).list_addresses():
project_addresses.append(addr)
return project_addresses
| apache-2.0 | -6,371,485,918,494,928,000 | 40.991935 | 83 | 0.620895 | false |
overfl0/Bulletproof-Arma-Launcher | src/utils/popupchain.py | 1 | 2070 | # Bulletproof Arma Launcher
# Copyright (C) 2016 Lukasz Taczuk
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import unicode_literals
class PopupChain(object):
"""A chain of ChainedPopup objects.
After a ChainedPopup object has been appended to a PopupChain, try_open()
has to be called to ensure the popup will be shown at some point in time.
"""
def __init__(self):
self.chain = []
def append(self, popup):
"""Add the popup to the popup chain."""
popup.bind(on_dismiss=lambda instance: self.open_next())
self.chain.append(popup)
def try_open(self):
"""Ensure the popup will be shown at some point in time.
If there is no active popups, the popup will be shown now. If there are
active popups, the popup will be shown as soon as previous popus will be
closed.
"""
# If there is more than one element in the chain, it means a popup is
# already active.
if len(self.chain) != 1:
return
self.chain[0].open()
def open_next(self):
"""Callback that shows the first pending popup from the chain."""
try:
self.chain.pop(0)
if len(self.chain):
self.chain[0].open()
except IndexError:
# Sometimes Kivy will just block because of a CPU-intensive
# operation. Then clicking several times on the OK button will
# trigger several open_next callbacks.
# I don't see any other workaround for this than just ignoring
# the error :(.
pass
# Return True to keep the current window open
| gpl-3.0 | 4,467,453,358,008,327,700 | 34.084746 | 80 | 0.643961 | false |
elffersj/cnfgen | cnfformula/families/graphisomorphism.py | 1 | 4562 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Graph isomorphimsm/automorphism formulas
"""
from cnfformula.cnf import CNF
from cnfformula.cmdline import SimpleGraphHelper
from cnfformula.cmdline import register_cnfgen_subcommand
from cnfformula.families import register_cnf_generator
from cnfformula.graphs import enumerate_vertices
from itertools import combinations,product
def _graph_isomorphism_var(u, v):
"""Standard variable name"""
return "x_{{{0},{1}}}".format(u, v)
@register_cnf_generator
def GraphIsomorphism(G1, G2):
"""Graph Isomorphism formula
The formula is the CNF encoding of the statement that two simple
graphs G1 and G2 are isomorphic.
Parameters
----------
G1 : networkx.Graph
an undirected graph object
G2 : networkx.Graph
an undirected graph object
Returns
-------
A CNF formula which is satiafiable if and only if graphs G1 and G2
are isomorphic.
"""
F = CNF()
F.header = "Graph Isomorphism problem between graphs " +\
G1.name + " and " + G2.name + "\n" + F.header
U=enumerate_vertices(G1)
V=enumerate_vertices(G2)
var = _graph_isomorphism_var
for (u, v) in product(U,V):
F.add_variable(var(u, v))
# Defined on both side
for u in U:
F.add_clause([(True, var(u, v)) for v in V], strict=True)
for v in V:
F.add_clause([(True, var(u, v)) for u in U], strict=True)
# Injective on both sides
for u in U:
for v1, v2 in combinations(V, 2):
F.add_clause([(False, var(u, v1)),
(False, var(u, v2))], strict=True)
for v in V:
for u1, u2 in combinations(U, 2):
F.add_clause([(False, var(u1, v)),
(False, var(u2, v))], strict=True)
# Edge consistency
for u1, u2 in combinations(U, 2):
for v1, v2 in combinations(V, 2):
if G1.has_edge(u1, u2) != G2.has_edge(v1, v2):
F.add_clause([(False, var(u1, v1)),
(False, var(u2, v2))], strict=True)
F.add_clause([(False, var(u1, v2)),
(False, var(u2, v1))], strict=True)
return F
@register_cnf_generator
def GraphAutomorphism(G):
"""Graph Automorphism formula
The formula is the CNF encoding of the statement that a graph G
has a nontrivial automorphism, i.e. an automorphism different from
the idential one.
Parameter
---------
G : a simple graph
Returns
-------
A CNF formula which is satiafiable if and only if graph G has a
nontrivial automorphism.
"""
tmp = CNF()
header = "Graph automorphism formula for graph "+ G.name +"\n"+ tmp.header
F = GraphIsomorphism(G, G)
F.header = header
var = _graph_isomorphism_var
F.add_clause([(False, var(u, u)) for u in enumerate_vertices(G)], strict=True)
return F
@register_cnfgen_subcommand
class GAutoCmdHelper(object):
"""Command line helper for Graph Automorphism formula
"""
name='gauto'
description='graph automorphism formula'
@staticmethod
def setup_command_line(parser):
"""Setup the command line options for graph automorphism formula
Arguments:
- `parser`: parser to load with options.
"""
SimpleGraphHelper.setup_command_line(parser)
@staticmethod
def build_cnf(args):
"""Build a graph automorphism formula according to the arguments
Arguments:
- `args`: command line options
"""
G = SimpleGraphHelper.obtain_graph(args)
return GraphAutomorphism(G)
@register_cnfgen_subcommand
class GIsoCmdHelper(object):
"""Command line helper for Graph Isomorphism formula
"""
name='giso'
description='graph isomorphism formula'
@staticmethod
def setup_command_line(parser):
"""Setup the command line options for graph isomorphism formula
Arguments:
- `parser`: parser to load with options.
"""
SimpleGraphHelper.setup_command_line(parser,suffix="1",required=True)
SimpleGraphHelper.setup_command_line(parser,suffix="2",required=True)
@staticmethod
def build_cnf(args):
"""Build a graph automorphism formula according to the arguments
Arguments:
- `args`: command line options
"""
G1 = SimpleGraphHelper.obtain_graph(args,suffix="1")
G2 = SimpleGraphHelper.obtain_graph(args,suffix="2")
return GraphIsomorphism(G1,G2)
| gpl-3.0 | -2,192,167,595,266,328,300 | 25.994083 | 82 | 0.61815 | false |
lmb/Supermega | supermega/tests/test_session.py | 1 | 2433 | import unittest
import hashlib
import os
import random
from StringIO import StringIO
from .. import Session, User, File, Directory
from .. import errors
USERNAME = os.environ.get('MEGA_USERNAME', None)
PASSWORD = os.environ.get('MEGA_PASSWORD', None)
def random_string(length):
return (('%0'+str(length)+'x') % random.randrange(256**(length/2)))[:length]
def calculate_hash(string):
hash = hashlib.sha256()
hash.update(string)
return hash.hexdigest()
def verify_hash(file, chunks, obj, sha256):
hash = hashlib.sha256()
for chunk in chunks:
hash.update(chunk)
obj.assertEqual(hash.hexdigest(), sha256)
requires_account = unittest.skipUnless(USERNAME and PASSWORD,
"MEGA_USERNAME or MEGA_PASSWORD missing")
class TestSession(unittest.TestCase):
def setUp(self):
self.sess = Session()
def test_public_file_download(self):
url = 'https://mega.co.nz/#!2ctGgQAI!AkJMowjRiXVcSrRLn3d-e1vl47ZxZEK0CbrHGIKFY-E'
sha256 = '9431103cb989f2913cbc503767015ca22c0ae40942932186c59ffe6d6a69830d'
self.sess.download(verify_hash, url, self, sha256)
def test_ephemeral_account(self):
sess = Session.ephemeral()
sess.root # This triggers lazy-loading the datastore
def test_key_derivation(self):
self.assertEqual(User.derive_key("password"), 'd\x039r^n\xbd\x13\xa2_\x00R\x12\x9f|\xb1')
@requires_account
def test_create_from_env(self):
s = Session.from_env()
@requires_account
def test_print_tree(self):
sess = Session(USERNAME, PASSWORD)
sess.root.print_tree()
class TestFile(unittest.TestCase):
def setUp(self):
self.sess = Session(USERNAME, PASSWORD)
self.random_filename = random_string(5)
def tearDown(self):
try:
f = self.sess.root[self.random_filename]
f.delete()
except KeyError, errors.ObjectNotFound:
pass
@requires_account
def test_file_upload_download(self):
length = random.randint(120, 400) * 0x400
contents = chr(random.randint(0,256)) * length
sha256 = calculate_hash(contents)
fileobj = StringIO(contents)
uploaded_file = File.upload(self.sess.root, fileobj,
name=self.random_filename, size=length)
uploaded_file.download(verify_hash, self, sha256)
class TestDirectory(unittest.TestCase):
def setUp(self):
self.sess = Session(USERNAME, PASSWORD)
@requires_account
def test_create(self):
root = self.sess.root
d = None
try:
random_dir = random_string(5)
d = Directory.create(random_dir, root)
finally:
if d:
d.delete()
| bsd-3-clause | -1,089,188,042,587,898,900 | 24.882979 | 91 | 0.731196 | false |
HRF92/myflask | foobar/app.py | 1 | 1483 | # -*- coding: utf-8 -*-
'''The app module, containing the app factory function.'''
from flask import Flask, render_template
from foobar.settings import ProdConfig
from foobar.assets import assets
from foobar.extensions import (
bcrypt,
cache,
db,
login_manager,
migrate,
debug_toolbar,
)
from foobar import public, user
def create_app(config_object=ProdConfig):
'''An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
'''
app = Flask(__name__)
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
return app
def register_extensions(app):
assets.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
return None
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
| bsd-3-clause | 4,648,757,970,010,366,000 | 25.482143 | 73 | 0.687795 | false |
veltzer/demos-python | src/examples/short/multi_processing/single_process.py | 1 | 1245 | #!/usr/bin/env python
import fcntl
import os
import os.path
import sys
import time
'''
This is an example of how to make sure only a single python process is
running of a specific kind...
References:
- http://stackoverflow.com/questions/220525/ensure-a-single-instance-of-an-application-in-linux
'''
do_fork = False
def single_runner():
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
pid_file = '/tmp/{}.pid'.format(program_name)
try:
fp = os.open(pid_file, os.O_WRONLY | os.O_CREAT)
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# another instance is running
print('this program is already running...', file=sys.stderr)
sys.exit(1)
# this does not work
def single_runner_simple():
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
pid_file = '/tmp/{}.pid'.format(program_name)
# if os.path.isfile(pid_file):
# os.unlink(pid_file)
try:
os.open(pid_file, os.O_CREAT | os.O_EXCL)
except IOError as e:
print(e)
# another instance is running
print('this program is already running...', file=sys.stderr)
sys.exit(1)
single_runner()
while True:
time.sleep(3600)
| gpl-3.0 | 3,317,245,707,534,463,500 | 24.408163 | 95 | 0.648996 | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/config/__init__.py | 1 | 12349 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/policy-forwarding/policies/policy/rules/rule/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the match
rule.
"""
__slots__ = ("_path_helper", "_extmethods", "__sequence_id")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__sequence_id = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sequence-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"policy-forwarding",
"policies",
"policy",
"rules",
"rule",
"config",
]
def _get_sequence_id(self):
"""
Getter method for sequence_id, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/config/sequence_id (uint32)
YANG Description: Unique sequence number for the policy rule.
"""
return self.__sequence_id
def _set_sequence_id(self, v, load=False):
"""
Setter method for sequence_id, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/config/sequence_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sequence_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sequence_id() directly.
YANG Description: Unique sequence number for the policy rule.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sequence-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sequence_id must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sequence-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__sequence_id = t
if hasattr(self, "_set"):
self._set()
def _unset_sequence_id(self):
self.__sequence_id = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sequence-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
sequence_id = __builtin__.property(_get_sequence_id, _set_sequence_id)
_pyangbind_elements = OrderedDict([("sequence_id", sequence_id)])
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/policy-forwarding/policies/policy/rules/rule/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the match
rule.
"""
__slots__ = ("_path_helper", "_extmethods", "__sequence_id")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__sequence_id = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sequence-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"policy-forwarding",
"policies",
"policy",
"rules",
"rule",
"config",
]
def _get_sequence_id(self):
"""
Getter method for sequence_id, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/config/sequence_id (uint32)
YANG Description: Unique sequence number for the policy rule.
"""
return self.__sequence_id
def _set_sequence_id(self, v, load=False):
"""
Setter method for sequence_id, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/config/sequence_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sequence_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sequence_id() directly.
YANG Description: Unique sequence number for the policy rule.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sequence-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sequence_id must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sequence-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__sequence_id = t
if hasattr(self, "_set"):
self._set()
def _unset_sequence_id(self):
self.__sequence_id = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sequence-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
sequence_id = __builtin__.property(_get_sequence_id, _set_sequence_id)
_pyangbind_elements = OrderedDict([("sequence_id", sequence_id)])
| apache-2.0 | -7,797,545,819,435,877,000 | 37.114198 | 432 | 0.5662 | false |
tonysyu/deli | deli/layout/grid_layout.py | 1 | 4491 | """ Tick generator classes and helper functions for calculating axis
tick-related values (i.e., bounds and intervals).
"""
import numpy as np
from traits.api import (Array, HasStrictTraits, Instance, Property,
cached_property)
from .bounding_box import BoundingBox
class BaseGridLayout(HasStrictTraits):
#: The bounding box containing data added to plot.
data_bbox = Instance(BoundingBox)
#: The data limits of in the grid direction.
axial_limits = Property(Array, depends_on='data_bbox.updated')
#: The grid positions in data space.
axial_offsets = Property(Array, depends_on='axial_limits')
@cached_property
def _get_axial_offsets(self):
a_min, a_max = self.axial_limits
return np.array(auto_ticks(a_min, a_max), np.float64)
class XGridLayout(BaseGridLayout):
@cached_property
def _get_axial_limits(self):
return self.data_bbox.x_limits
class YGridLayout(BaseGridLayout):
@cached_property
def _get_axial_limits(self):
return self.data_bbox.y_limits
def auto_ticks(x_min, x_max):
""" Finds locations for axis tick marks.
Calculates the locations for tick marks on an axis. The *x_min*,
*x_max*, and *tick_interval* parameters specify how the axis end
points and tick interval are calculated.
Parameters
----------
x_min, x_max : 'auto', 'fit', or a number.
The lower and upper bounds of the axis. If the value is a number,
that value is used for the corresponding end point. If the value is
'auto', then the end point is calculated automatically. If the
value is 'fit', then the axis bound is set to the corresponding
*data_low* or *data_high* value.
Returns
-------
An array of tick mark locations. The first and last tick entries are the
axis end points.
"""
lower = float(x_min)
upper = float(x_max)
tick_interval = auto_interval(lower, upper)
# Compute the range of ticks values:
start = np.floor(lower / tick_interval) * tick_interval
end = np.floor(upper / tick_interval) * tick_interval
if upper > end:
end += tick_interval
ticks = np.arange(start, end + (tick_interval / 2.0), tick_interval)
return [tick for tick in ticks if tick >= x_min and tick <= x_max]
def auto_interval(data_low, data_high):
""" Calculates the tick interval for a range.
The function chooses the number of tick marks, which can be between
3 and 9 marks (including end points), and chooses tick intervals at
1, 2, 2.5, 5, 10, 20, ...
Returns
-------
interval : float
tick mark interval for axis
"""
x_range = float(data_high) - float(data_low)
# Choose from between 2 and 8 tick marks. Preference given to more ticks.
# Note: reverse order and see kludge below...
divisions = np.arange(8.0, 2.0, -1.0) # (7, 6, ..., 3)
# Calculate the intervals for the divisions:
candidate_intervals = x_range / divisions
# Get magnitudes and mantissas for each candidate:
magnitudes = 10.0 ** np.floor(np.log10(candidate_intervals))
mantissas = candidate_intervals / magnitudes
# List of "pleasing" intervals between ticks on graph.
# Only the first magnitude are listed, higher mags others are inferred:
magic_intervals = np.array((1.0, 2.0, 2.5, 5.0, 10.0))
# Calculate the absolute differences between the candidates
# (with magnitude removed) and the magic intervals:
differences = abs(magic_intervals[:, np.newaxis] - mantissas)
# Find the division and magic interval combo that produce the
# smallest differences:
# KLUDGE: 'np.argsort' doesn't preserve the order of equal values,
# so we subtract a small, index dependent amount from each difference
# to force correct ordering.
sh = np.shape(differences)
small = 2.2e-16 * np.arange(sh[1]) * np.arange(sh[0])[:, np.newaxis]
small = small[::-1, ::-1] # reverse the order
differences = differences - small
best_mantissa = np.minimum.reduce(differences, axis=0)
best_magic = np.minimum.reduce(differences, axis=-1)
magic_index = np.argsort(best_magic)[0]
mantissa_index = np.argsort(best_mantissa)[0]
# The best interval is the magic_interval multiplied by the magnitude
# of the best mantissa:
interval = magic_intervals[magic_index]
magnitude = magnitudes[mantissa_index]
result = interval * magnitude
return result
| bsd-3-clause | 4,950,409,187,474,593,000 | 32.266667 | 77 | 0.669784 | false |
dcneeme/droidcontroller | droidcontroller/uniscada.py | 1 | 36234 | # This Python file uses the following encoding: utf-8
# send and receive monitoring and control messages to from UniSCADA monitoring system
# udp kuulamiseks thread?
# neeme
import time, datetime
import sqlite3
import traceback
from socket import *
import sys
import os
import gzip
import tarfile
import requests
import logging
log = logging.getLogger(__name__)
class UDPchannel():
''' Sends away the messages, combining different key:value pairs and adding host id and time. Listens for incoming commands and setup data.
Several UDPchannel instances can be used in parallel, to talk with different servers.
Used by sqlgeneral.py
'''
def __init__(self, id = '000000000000', ip = '127.0.0.1', port = 44445, receive_timeout = 0.1, retrysend_delay = 5, loghost = '0.0.0.0', logport=514): # delays in seconds
#from droidcontroller.connstate import ConnState
from droidcontroller.statekeeper import StateKeeper
self.sk = StateKeeper(off_tout=300, on_tout=0) # conn state with up/down times.
# do hard reboot via 0xFEED when changed to down.
# what to do if never up? keep hard rebooting?
try:
from droidcontroller.gpio_led import GPIOLED
self.led = GPIOLED() # led alarm and conn
except:
log.warning('GPIOLED not imported')
self.host_id = id
self.ip = ip
self.port = port
self.loghost = loghost
self.logport = logport
self.logaddr = (self.loghost,self.logport) # tuple
self.traffic = [0,0] # UDP bytes in, out
self.UDPSock = socket(AF_INET,SOCK_DGRAM)
self.UDPSock.settimeout(receive_timeout)
self.retrysend_delay = retrysend_delay
self.inum = 0 # sent message counter
self.UDPlogSock = socket(AF_INET,SOCK_DGRAM)
self.UDPlogSock.settimeout(None) # for syslog
self.UDPlogSock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1) # broadcast allowed
print('init: created uniscada and syslog connections to '+ip+':'+str(port)+' and '+loghost+':'+str(logport))
self.table = 'buff2server' # can be anything, not accessible to other objects WHY? would be useful to know the queue length...
self.Initialize()
def Initialize(self):
''' initialize time/related variables and create buffer database with one table in memory '''
self.ts = round(time.time(),1)
#self.ts_inum = self.ts # inum increase time, is it used at all? NO!
self.ts_unsent = self.ts # last unsent chk
self.ts_udpsent=self.ts
self.ts_udpgot=self.ts
self.conn = sqlite3.connect(':memory:')
#self.cur=self.conn.cursor() # cursors to read data from tables / cursor can be local
self.makebuff() # create buffer table for unsent messages
self.setIP(self.ip)
self.setLogIP(self.loghost)
def setIP(self, invar):
''' Set the monitoring server ip address '''
self.ip = invar
self.saddr = (self.ip,self.port) # refresh needed
def setLogIP(self, invar):
''' Set the syslog monitor ip address '''
self.loghost = invar
self.logaddr = (self.loghost,self.logport) # refresh needed
def setPort(self, invar):
''' Set the monitoring server UDP port '''
self.port = invar
self.saddr = (self.ip,self.port) # refresh needed
def setID(self, invar):
''' Set the host id '''
self.host_id = invar
def setRetryDelay(self, invar):
''' Set the monitoring server UDP port '''
self.retrysend_delay = invar
def getTS(self):
''' returns timestamps for last send trial and successful receive '''
return self.ts_udpsent, self.ts_udpgot
def getID(self):
''' returns host id for this instance '''
return self.host_id
def getIP(self):
''' returns server ip for this instance '''
return self.ip
def getLogIP(self):
''' returns syslog server ip for this instance '''
return self.loghost
def get_traffic(self):
return self.traffic # tuple in, out
def set_traffic(self, bytes_in = None, bytes_out = None): # set UDP traffic counters (it is possible to update only one of them as well)
''' Restores UDP traffic counter'''
if bytes_in != None:
if not bytes_in < 0:
self.traffic[0] = bytes_in
else:
print('invalid bytes_in',bytes_in)
if bytes_out != None:
if not bytes_out < 0:
self.traffic[1] = bytes_out
else:
print('invalid bytes_out',bytes_out)
def set_inum(self,inum = 0): # set message counter
self.inum=inum
def get_inum(self): #get message counter
return self.inum
def get_ts_udpgot(self): #get ts of last ack from monitoring server
return self.ts_udpgot
def makebuff(self): # drops buffer table and creates
Cmd='drop table if exists '+self.table
sql="CREATE TABLE "+self.table+"(sta_reg,status NUMERIC,val_reg,value,ts_created NUMERIC,inum NUMERIC,ts_tried NUMERIC);" # semicolon needed for NPE for some reason!
try:
self.conn.execute(Cmd) # drop the table if it exists
self.conn.executescript(sql) # read table into database
self.conn.commit()
msg='sqlread: successfully (re)created table '+self.table
return 0
except:
msg='sqlread: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
time.sleep(1)
return 1
def delete_buffer(self): # empty buffer
Cmd='delete from '+self.table
try:
self.conn.execute(Cmd)
self.conn.commit()
print('buffer content deleted')
except:
traceback.print_exc()
def send(self, servicetuple): # store service components to buffer for send and resend
''' Adds service components into buffer table to be sent as a string message
the components are sta_reg = '', status = 0, val_reg = '', value = ''
'''
if servicetuple == None:
log.warning('ignored servicetuple with value None')
return 2
try:
sta_reg=str(servicetuple[0])
status=int(servicetuple[1])
val_reg=str(servicetuple[2])
value=str(servicetuple[3])
self.ts = round(time.time(),1)
Cmd="INSERT into "+self.table+" values('"+sta_reg+"',"+str(status)+",'"+val_reg+"','"+value+"',"+str(self.ts)+",0,0)" # inum and ts_tried left initially empty
#print(Cmd) # debug
self.conn.execute(Cmd)
return 0
except:
msg='FAILED to write svc into buffer'
#syslog(msg) # incl syslog
print(msg)
traceback.print_exc()
return 1
def unsent(self): # delete unsent for too long messages - otherwise the udp messages will contain older key:value duplicates!
''' Counts the non-acknowledged messages and removes older than 3 times retrysend_delay '''
if self.ts - self.ts_unsent < self.retrysend_delay / 2: # no need to recheck too early
return 0
self.ts = round(time.time(),1)
self.ts_unsent = self.ts
mintscreated=0
maxtscreated=0
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # buff2server
self.conn.execute(Cmd)
Cmd="SELECT count(sta_reg),min(ts_created),max(ts_created) from "+self.table+" where ts_created+0+"+str(3*self.retrysend_delay)+"<"+str(self.ts) # yle 3x regular notif
cur = self.conn.cursor()
cur.execute(Cmd)
for rida in cur: # only one line for count if any at all
delcount=rida[0] # int
if delcount>0: # stalled services found
#print repr(rida) # debug
mintscreated=rida[1]
maxtscreated=rida[2]
print(delcount,'services lines waiting ack for',10*self.retrysend_delay,' s to be deleted')
Cmd="delete from "+self.table+" where ts_created+0+"+str(10*self.retrysend_delay)+"<"+str(self.ts) # +" limit 10" # limit lisatud 23.03.2014 aga miks?
self.conn.execute(Cmd)
Cmd="SELECT count(sta_reg),min(ts_created),max(ts_created) from "+self.table
cur.execute(Cmd)
for rida in cur: # only one line for count if any at all
delcount=rida[0] # int
if delcount>50: # delete all!
Cmd="delete from "+self.table
self.conn.execute(Cmd)
msg='deleted '+str(delcount)+' unsent messages from '+self.table+'!'
print(msg)
#syslog(msg)
self.conn.commit() # buff2server transaction end
return delcount # 0
#time.sleep(1) # prooviks
except:
msg='problem with unsent, '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
#sys.stdout.flush()
time.sleep(1)
return 1
#unsent() end
def buff2server(self): # send the buffer content
''' UDP monitoring message creation and sending (using udpsend)
based on already existing buff2server data, does the retransmits too if needed.
buff2server rows successfully send will be deleted by udpread() based on in: contained in the received message
'''
timetoretry = 0 # local
ts_created = 0 # local
svc_count = 0 # local
sendstring = ''
timetoretry=int(self.ts-self.retrysend_delay) # send again services older than that
Cmd = "BEGIN IMMEDIATE TRANSACTION" # buff2server
try:
self.conn.execute(Cmd)
except:
print('could not start transaction on self.conn, '+self.table)
traceback.print_exc()
Cmd = "SELECT * from "+self.table+" where ts_tried=0 or (ts_tried+0>1358756016 and ts_tried+0<"+str(self.ts)+"+0-"+str(timetoretry)+") AND status+0 != 3 order by ts_created asc limit 30"
try:
cur = self.conn.cursor()
cur.execute(Cmd)
for srow in cur:
#print(repr(srow)) # debug, what will be sent
if svc_count == 0: # on first row only increase the inum!
self.inum=self.inum+1 # increase the message number / WHY HERE? ACK WILL NOT DELETE THE ROWS!
if self.inum > 65535:
self.inum = 1 # avoid zero for sending
#self.ts_inum=self.ts # time to set new inum value
svc_count=svc_count+1
sta_reg=srow[0]
status=srow[1]
val_reg=srow[2]
value=srow[3]
ts_created=srow[4]
if val_reg != '':
sendstring += val_reg+":"+str(value)+"\n"
if sta_reg != '':
sendstring += sta_reg+":"+str(status)+"\n"
Cmd="update "+self.table+" set ts_tried="+str(int(self.ts))+",inum="+str(self.inum)+" where sta_reg='"+sta_reg+"' and status="+str(status)+" and ts_created="+str(ts_created)
#print "update Cmd=",Cmd # debug
self.conn.execute(Cmd)
if svc_count>0: # there is something (changed services) to be sent!
#print(svc_count,"services to send using inum",self.inum) # debug
self.udpsend(sendstring) # sending away
Cmd="SELECT count(inum) from "+self.table # unsent service count in buffer
cur.execute(Cmd) #
for srow in cur:
svc_count2=int(srow[0]) # total number of unsent messages
if svc_count2>30: # do not complain below 30
print(svc_count2,"SERVICES IN BUFFER waiting for ack from monitoring server")
except: # buff2server read unsuccessful. unlikely...
msg='problem with '+self.table+' read '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
#sys.stdout.flush()
time.sleep(1)
return 1
self.conn.commit() # buff2server transaction end
return 0
# udpmessage() end
# #################
def udpsend(self, sendstring = ''): # actual udp sending, no resend. give message as parameter. used by buff2server too.
''' Sends UDP data immediately, adding self.inum if >0. '''
if sendstring == '': # nothing to send
print('udpsend(): nothing to send!')
return 1
self.ts = round(time.time(),1)
sendstring += "id:"+str(self.host_id)+"\n" # loodame, et ts_created on enam-vahem yhine neil teenustel...
if self.inum > 0: # "in:inum" to be added
sendstring += "in:"+str(self.inum)+","+str(int(round(self.ts)))+"\n"
self.traffic[1]=self.traffic[1]+len(sendstring) # adding to the outgoing UDP byte counter
try:
self.led.commLED(0) # off, blinking shows sending and time to ack
except:
pass
try:
sendlen=self.UDPSock.sendto(sendstring.encode('utf-8'),self.saddr) # tagastab saadetud baitide arvu
self.traffic[1]=self.traffic[1]+sendlen # traffic counter udp out
msg='==>> sent ' +str(sendlen)+' bytes to '+str(repr(self.saddr))+' '+sendstring.replace('\n',' ') # show as one line
print(msg)
#syslog(msg)
sendstring=''
self.ts_udpsent=self.ts # last successful udp send
return sendlen
except:
msg='udp send failure in udpsend() to saddr '+repr(self.saddr)+', lasting s '+str(int(self.ts - self.ts_udpsent)) # cannot send, this means problem with connectivity
#syslog(msg)
print(msg)
traceback.print_exc()
try:
self.led.alarmLED(1) # send failure
except:
pass
return None
def read_buffer(self, mode = 0): # 0 prints content, 1 is silent but returns record count, min and max ts
''' reads the content of the buffer, debugging needs mainly.
Returns the number of waiting to be deleted messages, the earliest and the latest timestamps. '''
if mode == 0: # just print the waiting messages
Cmd ="SELECT * from "+self.table
cur = self.conn.cursor()
cur.execute(Cmd)
for row in cur:
print(repr(row))
elif mode == 1: # stats
Cmd ="SELECT count(ts_created),min(ts_created),max(ts_created) from "+self.table
cur = self.conn.cursor()
cur.execute(Cmd)
for row in cur:
return row[0],row[1],row[2] # print(repr(row))
def udpread(self):
''' Checks received data for monitoring server to see if the data contains key "in",
then deletes the rows with this inum in the sql table.
If the received datagram contains more data, these key:value pairs are
returned as dictionary.
'''
data=''
data_dict={} # possible setup and commands
sendstring = ''
try: # if anything is comes into udp buffer before timepout
buf=1024
rdata,raddr = self.UDPSock.recvfrom(buf)
data=rdata.decode("utf-8") # python3 related need due to mac in hex
except:
#print('no new udp data received') # debug
#traceback.print_exc()
return None
if len(data) > 0: # something arrived
#log.info('>>> got from receiver '+str(repr(raddr))+' '+str(repr(data)))
self.traffic[0]=self.traffic[0]+len(data) # adding top the incoming UDP byte counter
log.debug('<<<< got from receiver '+str(data.replace('\n', ' ')))
if (int(raddr[1]) < 1 or int(raddr[1]) > 65536):
msg='illegal remote port '+str(raddr[1])+' in the message received from '+raddr[0]
print(msg)
#syslog(msg)
if raddr[0] != self.ip:
msg='illegal sender '+str(raddr[0])+' of message: '+data+' at '+str(int(self.ts)) # ignore the data received!
print(msg)
#syslog(msg)
data='' # data destroy
if "id:" in data: # first check based on host id existence in the received message, must exist to be valid message!
in_id=data[data.find("id:")+3:].splitlines()[0]
if in_id != self.host_id:
log.warning("invalid id "+in_id+" in server message from "+str(raddr[0])) # this is not for us!
data=''
return data # error condition, traffic counter was still increased
else:
self.ts_udpgot=self.ts # timestamp of last udp received
try:
self.led.commLED(1) # data from server, comm OK
except:
pass
self.sk.up()
lines=data.splitlines() # split message into key:value lines
for i in range(len(lines)): # looking into every member of incoming message
if ":" in lines[i]:
#print " "+lines[i]
line = lines[i].split(':')
line = lines[i].split(':')
sregister = line[0] # setup reg name
svalue = line[1] # setup reg value
log.debug('processing key:value '+sregister+':'+svalue)
if sregister != 'in' and sregister != 'id': # may be setup or command (cmd:)
msg='got setup/cmd reg:val '+sregister+':'+svalue # need to reply in order to avoid retransmits of the command(s)
log.info(msg)
data_dict.update({ sregister : svalue }) # in and id are not included in dict
#udp.syslog(msg) # cannot use udp here
#sendstring += sregister+":"+svalue+"\n" # add to the answer - better to answer with real values immediately after change
else:
if sregister == "in": # one such a key in message
inumm=eval(data[data.find("in:")+3:].splitlines()[0].split(',')[0]) # loodaks integerit
if inumm >= 0 and inumm<65536: # valid inum, response to message sent if 1...65535. datagram including "in:0" is a server initiated "fast communication" message
#print "found valid inum",inum,"in the incoming message " # temporary
msg='got ack '+str(inumm)+' in message: '+data.replace('\n',' ')
log.debug(msg)
#syslog(msg)
Cmd="BEGIN IMMEDIATE TRANSACTION" # buff2server, to delete acknowledged rows from the buffer
self.conn.execute(Cmd) # buff2server ack transactioni algus, loeme ja kustutame saadetud read
Cmd="DELETE from "+self.table+" WHERE inum='"+str(inumm)+"'" # deleting all rows where inum matches server ack
try:
self.conn.execute(Cmd) # deleted
except:
msg='problem with '+Cmd+'\n'+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
time.sleep(1)
self.conn.commit() # buff2server transaction end
#if len(sendstring) > 0:
# self.udpsend(sendstring) # send the response right away to avoid multiple retransmits
# log.info('response to server: '+str(sendstring)) # this answers to the server but does not update the setup or service table yet!
#siin ei vasta
return data_dict # possible key:value pairs here for setup change or commands. returns {} for just ack with no cmd
else:
return None
def syslog(self, msg,logaddr=()): # sending out syslog message to self.logaddr.
msg=msg+"\n" # add newline to the end
#print('syslog send to',self.logaddr) # debug
dnsize=0
if self.logaddr == None and logaddr != ():
self.logaddr = logaddr
try: #
self.UDPlogSock.sendto(msg.encode('utf-8'),self.logaddr)
if not '255.255.' in self.logaddr[0] and not '10.0.' in self.logaddr[0] and not '192.168.' in self.logaddr[0]: # sending syslog out of local network
dnsize=len(msg) # udp out increase, payload only
except:
pass # kui udp ei toimi, ei toimi ka syslog
print('could NOT send syslog message to '+repr(self.logaddr))
traceback.print_exc()
self.traffic[1] += dnsize # udp traffic
return 0
def comm(self): # do this regularly, blocks for the time of socket timeout!
''' Communicates with monitoring server, listens to return cmd and setup key:value and sends waiting data. '''
self.ts = round(time.time(),1) # timestamp
self.unsent() # delete old records
udpgot = self.udpread() # check for incoming udp data
# parse_udp()
self.buff2server() # send away. the ack for this is available on next comm() hopefully
return udpgot
class TCPchannel(UDPchannel): # used this parent to share self.syslog()
''' Communication via TCP (pull, push, calendar) '''
def __init__(self, id = '000000000000', supporthost = 'www.itvilla.ee', directory = '/support/pyapp/', uploader='/upload.php', base64string='cHlhcHA6QkVMYXVwb2E='):
self.supporthost = supporthost
self.uploader=uploader
self.base64string=base64string
self.traffic = [0,0] # TCP bytes in, out
self.setID(id)
self.directory=directory
self.ts_cal=time.time()
self.conn = sqlite3.connect(':memory:') # for calendar table
self.makecalendar()
def setID(self, invar):
''' Set the host id '''
self.host_id = invar
def getID(self):
'''returns server ip for this instance '''
return self.host_id
def get_traffic(self): # TCP traffic counter
return self.traffic # tuple in, out
def set_traffic(self, bytes_in = None, bytes_out = None): # set TCP traffic counters (it is possible to update only one of them as well)
''' Restores TCP traffic counter [in, out] '''
if bytes_in != None:
if not bytes_in < 0:
self.traffic[0] = bytes_in
log.debug('set bytes_in to '+str(bytes_in))
else:
log.warning('invalid bytes_in '+str(bytes_in))
if bytes_out != None:
if not bytes_out < 0:
self.traffic[1] = bytes_out
log.debug('set bytes_out to '+str(bytes_in))
else:
print('invalid bytes_out',bytes_out)
log.warning('invalid bytes_out '+str(bytes_in))
def get_ts_cal(self): # last time calendar was accessed
return int(round(self.ts_cal))
def push(self, filename): # send (gzipped) file to supporthost
''' push file filename to supporthost directory using uploader and base64string (for basic auth) '''
if os.path.isfile(filename):
pass
else:
msg='push: found no file '+filename
print(msg)
return 2 # no such file
if '.gz' in filename or '.tgz' in filename: # packed already
pass
else: # lets unpack too
f_in = open(filename, 'rb')
f_out = gzip.open(filename+'.gz', 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
filename = filename+'.gz' # new filename to send
dnsize=os.stat(filename)[6] # file size to be sent
msg='the file was gzipped to '+filename+' with size '+str(dnsize) # the original file is kept!
print(msg)
#udp.syslog(msg)
try:
r = requests.post('http://'+self.supporthost+self.uploader,
files={'file': open(filename, 'rb')},
headers={'Authorization': 'Basic '+self.base64string},
data={'mac': self.directory+self.host_id+'/'}
)
print('post response:',r.text) # nothing?
msg='file '+filename+' with size '+str(dnsize)+' sent to '+self.directory+self.host_id+'/'
#udp.syslog(msg)
print(msg)
self.traffic[1] += dnsize
return 0
except:
msg='the file '+filename+' was NOT sent to '+self.directory+self.host_id+'/ '+str(sys.exc_info()[1])
#udp.syslog(msg)
print(msg)
#traceback.print_exc()
return 1
def pull(self, filename, filesize, start=0):
''' Retrieves file from support server via http get, uncompressing
too if filename contains .gz or tgz and succesfully retrieved.
Parameter start=0 normally, higher with resume.
'''
oksofar=1 # success flag
filename2='' # for uncompressed from the downloaded file
filepart=filename+'.part' # temporary, to be renamed to filename when complete
filebak=filename+'.bak'
dnsize=0 # size of downloaded file
if start>filesize:
msg='pull parameters: file '+filename+' start '+str(start)+' above filesize '+str(filesize)
log.debug(msg)
#udp.syslog(msg)
return 99 # illegal parameters or file bigger than stated during download resume
req = 'http://'+self.supporthost+self.directory+self.host_id+'/'+filename
pullheaders={'Range': 'bytes=%s-' % (start)} # with requests
msg='trying '+req+' from byte '+str(start)+' using '+repr(pullheaders)
log.info(msg)
#udp.syslog(msg)
try:
response = requests.get(req, headers=pullheaders) # with python3
output = open(filepart,'wb')
output.write(response.content)
output.close()
except:
msg='pull: partial or failed download of temporary file '+filepart+' '+str(sys.exc_info()[1])
log.warning(msg)
#udp.syslog(msg)
#traceback.print_exc()
try:
dnsize=os.stat(filepart)[6] # int(float(subexec('ls -l '+filename,1).split(' ')[4]))
except:
msg='pull: got no size for file '+os.getcwd()+'/'+filepart+' '+str(sys.exc_info()[1])
print(msg)
#udp.syslog(msg)
#traceback.print_exc()
oksofar=0
if dnsize == filesize: # ok
msg='pull: file '+filename+' download OK, size '+str(dnsize)
print(msg)
#udp.syslog(msg)
try:
os.rename(filename, filebak) # keep the previous version if exists
#msg='renamed '+filename+' to '+filebak
except:
#traceback.print_exc()
msg='FAILED to rename '+filename+' to '+filebak+' '+str(sys.exc_info()[1])
print(msg)
#udp.syslog(msg)
oksofar=0
try:
os.rename(filepart, filename) #rename filepart to filename2
#msg='renamed '+filepart+' to '+filename
except:
msg='FAILED to rename '+filepart+' to '+filename+' '+str(sys.exc_info()[1])
print(msg)
#udp.syslog(msg)
oksofar=0
#traceback.print_exc()
if oksofar == 0: # trouble, exit
self.traffic[0] += dnsize
return 1
if '.gz' in filename: # lets unpack too
filename2=filename.replace('.gz','')
try:
os.rename(filename2, filename2+'.bak') # keep the previous versioon if exists
except:
#traceback.print_exc()
pass
try:
f = gzip.open(filename,'rb')
output = open(filename2,'wb')
output.write(f.read());
output.close() # file with filename2 created
msg='pull: gz file '+filename+' unzipped to '+filename2+', previous file kept as '+filebak
print(msg)
except:
os.rename(filename2+'.bak', filename2) # restore the previous versioon if unzip failed
msg='pull: file '+filename+' unzipping failure, previous file '+filename2+' restored. '+str(sys.exc_info()[1])
#traceback.print_exc()
print(msg)
#udp.syslog(msg)
self.traffic[0] += dnsize
return 1
if '.tgz' in filename: # possibly contains a directory
try:
f = tarfile.open(filename,'r')
f.extractall() # extract all into the current directory
f.close()
msg='pull: tgz file '+filename+' successfully unpacked'
print(msg)
#udp.syslog(msg)
except:
msg='pull: tgz file '+filename+' unpacking failure! '+str(sys.exc_info()[1])
#traceback.print_exc()
print(msg)
#udp.syslog(msg)
self.traffic[0] += dnsize
return 1
# temporarely switching off this chmod feature, failing!!
#if '.py' in filename2 or '.sh' in filename2: # make it executable, only works with gzipped files!
# try:
# st = os.stat('filename2')
# os.chmod(filename2, st.st_mode | stat.S_IEXEC) # add +x for the owner
# msg='made the pulled file executable'
# print(msg)
# syslog(msg)
# return 0
# except:
# msg='FAILED to make pulled file executable!'
# print(msg)
## syslog(msg)
# traceback.print_exc()
# return 99
self.traffic[0] += dnsize
return 0
else:
if dnsize<filesize:
msg='pull: file '+filename+' received partially with size '+str(dnsize)
print(msg)
#udp.syslog(msg)
self.traffic[0] += dnsize
return 1 # next try will continue
else:
msg='pull: file '+filename+' received larger than unexpected, in size '+str(dnsize)
print(msg)
#udp.syslog(msg)
self.traffic[0] += dnsize
return 99
def makecalendar(self, table='calendar'): # creates buffer table in memory for calendar events
Cmd='drop table if exists '+table
sql="CREATE TABLE "+table+"(title,timestamp,value);CREATE INDEX ts_calendar on "+table+"(timestamp);" # semicolon needed for NPE for some reason!
try:
self.conn.execute(Cmd) # drop the table if it exists
self.conn.executescript(sql) # read table into database
self.conn.commit()
msg='successfully (re)created table '+table
return 0
except:
msg='sqlread: '+str(sys.exc_info()[1])
print(msg)
#udp.syslog(msg)
traceback.print_exc()
time.sleep(1)
return 1
def get_calendar(self, id, days = 3): # query to SUPPORTHOST, returning txt. started by cmd:GCAL too for testing
''' google calendar events via monitoring server '''
# example: http://www.itvilla.ee/cgi-bin/gcal.cgi?mac=000101000001&days=10
self.ts_cal=time.time() # calendar access timestamp
cur=self.conn.cursor()
req = 'http://www.itvilla.ee/cgi-bin/gcal.cgi?mac='+id+'&days='+str(days)+'&format=json'
headers={'Authorization': 'Basic YmFyaXg6Y29udHJvbGxlcg=='} # Base64$="YmFyaXg6Y29udHJvbGxlcg==" ' barix:controller
msg='starting gcal query '+req
print(msg) # debug
try:
response = requests.get(req, headers = headers)
except:
msg='gcal query '+req+' failed!'
traceback.print_exc()
print(msg)
#udp.syslog(msg)
return 1 # kui ei saa gcal yhendust, siis lopetab ja vana ei havita!
try:
events = eval(response.content) # string to list
except:
msg='getting calendar events failed for host id '+id
print(msg)
#udp.syslog(msg)
traceback.print_exc() # debug
return 1 # kui ei saa normaalseid syndmusi, siis ka lopetab
#print(repr(events)) # debug
Cmd = "BEGIN IMMEDIATE TRANSACTION"
try:
self.conn.execute(Cmd)
Cmd="delete from calendar"
self.conn.execute(Cmd)
for event in events:
#print('event',event) # debug
columns=str(list(event.keys())).replace('[','(').replace(']',')')
values=str(list(event.values())).replace('[','(').replace(']',')')
#columns=str(list(event.keys())).replace('{','(').replace('}',')')
#values=str(list(event.values())).replace('{','(').replace('}',')')
Cmd = "insert into calendar"+columns+" values"+values
print(Cmd) # debug
self.conn.execute(Cmd)
self.conn.commit()
msg='calendar table updated'
print(msg)
#udp.syslog(msg) # FIXME - syslog via UDPchannel does not work. syslog() is found, but not it's logaddr?
#self.syslog(msg) # common parent UDP TCP channel
return 0
except:
msg='delete + insert to calendar table failed!'
print(msg)
#udp.syslog(msg)
print('logaddr in tcp',self.logaddr)
#self.syslog(msg,logaddr=self.logaddr) # class UDPchannel is parent to TCPchannel
#UDPchannel.syslog(msg)
traceback.print_exc() # debug
return 1 # kui insert ei onnestu, siis ka delete ei toimu
def chk_calevents(self, title = ''): # set a new setpoint if found in table calendar (sharing database connection with setup)
''' Obsolete, functionality moved to gcal.py '''
ts=time.time()
cur=self.conn.cursor()
value='' # local string value
if title == '':
return None
Cmd = "BEGIN IMMEDIATE TRANSACTION"
try:
conn.execute(Cmd)
Cmd="select value from calendar where title='"+title+"' and timestamp+0<"+str(ts)+" order by timestamp asc" # find the last passed event value
cur.execute(Cmd)
for row in cur:
value=row[0] # overwrite with the last value before now
#print(Cmd4,', value',value) # debug. voib olla mitu rida, viimane value jaab iga title jaoks kehtima
self.conn.commit()
return value # last one for given title becomes effective. can be empty string too, then use default value for setpoint related to title
except:
traceback.print_exc()
return None
| gpl-3.0 | 1,857,257,954,211,608,300 | 41.280047 | 194 | 0.545951 | false |
exsodus3249/kite | src/back/tests/test_maildir.py | 2 | 1429 | import kite.maildir
import unittest
import types
mockdata = {"from": {"address": "gerard", "name": ""}, "subject": "test", "contents": "empty", "to": "gerard", "date": "November 13, 2007"}
class EmptyObject(object):
"""Empty class used by mocks. Required because python doesn't let us add attributes
to an empty object()"""
pass
class MockMailItem(object):
def __init__(self):
self.fp = EmptyObject()
self.fp.read = types.MethodType(lambda self: mockdata["contents"], self.fp)
def getheaders(self, header):
hd = header.lower()
if hd in mockdata:
return (mockdata[hd], "Nulltuple")
class MockMailDir(object):
def __init__(self):
self.mi = MockMailItem()
def iteritems(self):
return [("randomId", self.mi)]
def get(self, id):
if id == "randomId":
return self.mi
# FIXME: write a better test
#class TestMailDir(unittest.TestCase):
# def test_extract_email(self):
# mi = MockMailItem()
# self.assertEqual(kite.maildir.extract_email(mi), mockdata)
#
# def test_get_emails(self):
# md = MockMailDir()
# parsedMaildir = kite.maildir.get_emails(md)
# self.assertEqual(len(parsedMaildir), 1)
#
# def test_get_email(self):
# md = MockMailDir()
# parsedEmail = kite.maildir.get_email(md, "randomId")
# self.assertEqual(parsedEmail, mockdata)
| bsd-3-clause | -2,268,751,218,875,746,000 | 30.065217 | 139 | 0.614416 | false |
mauricioabreu/speakerfight | deck/models.py | 1 | 12968 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db import models, transaction
from django.db.models import Count
from django.db.models.aggregates import Sum
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils import timezone, six
from django.utils.encoding import python_2_unicode_compatible
from django_extensions.db.fields import AutoSlugField
from allauth.account.signals import user_signed_up
from textwrap import dedent
from jury.models import Jury
class DeckBaseManager(models.QuerySet):
def cached_authors(self):
return super(DeckBaseManager, self).select_related('author')
def published_ones(self):
return self.cached_authors().filter(is_published=True)
def upcoming(self, published_only=True):
return self.filter(due_date__gte=timezone.now(), is_published=published_only)
def order_by_never_voted(self, user_id):
if self.model != Proposal:
raise AttributeError(
"%s object has no attribute %s" % (
self.model, 'order_by_never_voted'))
order_by_criteria = dedent("""
SELECT 1
FROM deck_vote
WHERE deck_vote.user_id = %s AND
deck_vote.proposal_id = deck_proposal.activity_ptr_id
LIMIT 1
""")
new_ordering = ['-never_voted']
if settings.DATABASES['default'].get('ENGINE') == 'django.db.backends.sqlite3':
new_ordering = ['never_voted']
new_ordering.extend(Proposal._meta.ordering)
return self.extra(
select=dict(never_voted=order_by_criteria % user_id),
order_by=new_ordering
)
@python_2_unicode_compatible
class DeckBaseModel(models.Model):
title = models.CharField(_('Title'), max_length=200)
slug = AutoSlugField(populate_from='title', overwrite=True,
max_length=200, unique=True, db_index=True)
description = models.TextField(
_('Description'), max_length=10000, blank=True)
created_at = models.DateTimeField(_('Created At'), auto_now_add=True)
is_published = models.BooleanField(_('Publish'), default=True)
# relations
author = models.ForeignKey(to=settings.AUTH_USER_MODEL,
related_name='%(class)ss')
# managers
objects = DeckBaseManager.as_manager()
class Meta:
abstract = True
def __str__(self):
return six.text_type(self.title)
@python_2_unicode_compatible
class Vote(models.Model):
ANGRY, SLEEPY, SAD, HAPPY, LAUGHING = range(-1, 4)
VOTE_TITLES = dict(
angry=_('Angry'), sad=_('Sad'),
sleepy=_('Sleepy'), happy=_('Happy'),
laughing=_('Laughing')
)
VOTE_RATES = ((ANGRY, 'angry'),
(SAD, 'sad'),
(SLEEPY, 'sleepy'),
(HAPPY, 'happy'),
(LAUGHING, 'laughing'))
rate = models.SmallIntegerField(_('Rate Index'), null=True, blank=True,
choices=VOTE_RATES)
# relations
proposal = models.ForeignKey(to='deck.Proposal', related_name='votes')
user = models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='votes')
class Meta:
verbose_name = _('Vote')
verbose_name_plural = _('Votes')
unique_together = (('proposal', 'user'),)
def __str__(self):
return six.text_type("{0.user}: {0.rate} in {0.proposal}".format(self))
def save(self, *args, **kwargs):
validation_message = None
user_is_in_jury = self.proposal.event.jury.users.filter(
pk=self.user.pk).exists()
if (self.user.is_superuser or user_is_in_jury):
pass
elif self.user == self.proposal.author:
validation_message = _(u'You cannot Rate your own proposals.')
elif not self.proposal.event.allow_public_voting:
validation_message = _(u"Proposal doesn't accept Public Voting.")
if validation_message:
raise ValidationError(_(validation_message))
return super(Vote, self).save(*args, **kwargs)
class Activity(DeckBaseModel):
PROPOSAL = 'proposal'
WORKSHOP = 'workshop'
OPENNING = 'openning'
COFFEEBREAK = 'coffee-break'
LUNCH = 'lunch'
LIGHTNINGTALKS = 'lightning-talks'
ENDING = 'ending'
ACTIVITY_TYPES = (
(PROPOSAL, _('Proposal')),
(WORKSHOP, _('Workshop')),
(OPENNING, _('Openning')),
(COFFEEBREAK, _('Coffee Break')),
(LUNCH, _('Lunch')),
(LIGHTNINGTALKS, _('Lightning Talks')),
(ENDING, _('Ending')),
)
start_timetable = models.TimeField(
_('Start Timetable'), null=True, blank=False)
end_timetable = models.TimeField(
_('End Timetable'), null=True, blank=False)
track_order = models.SmallIntegerField(_('Order'), null=True, blank=True)
activity_type = models.CharField(
_('Type'), choices=ACTIVITY_TYPES, default=PROPOSAL, max_length=50)
# relations
track = models.ForeignKey(to='deck.Track', related_name='activities',
null=True, blank=True)
class Meta:
ordering = ('track_order', 'start_timetable', 'pk')
verbose_name = _('Activity')
verbose_name_plural = _('Activities')
@property
def timetable(self):
if all([self.start_timetable is None, self.end_timetable is None]):
return '--:--'
return '{0} - {1}'.format(
self.start_timetable.strftime('%H:%M'),
self.end_timetable.strftime('%H:%M')
)
class Proposal(Activity):
is_approved = models.BooleanField(_('Is approved'), default=False)
more_information = models.TextField(
_('More information'), max_length=10000, null=True, blank=True)
# relations
event = models.ForeignKey(to='deck.Event', related_name='proposals')
class Meta:
ordering = ['title']
verbose_name = _('Proposal')
verbose_name_plural = _('Proposals')
def save(self, *args, **kwargs):
if not self.pk and self.event.due_date_is_passed:
raise ValidationError(
_("This Event doesn't accept Proposals anymore."))
return super(Proposal, self).save(*args, **kwargs)
@property
def get_rate(self):
rate = None
try:
rate = self.votes__rate__sum
except AttributeError:
rate = self.votes.aggregate(Sum('rate'))['rate__sum']
finally:
return rate or 0
def rate(self, user, rate):
rate_int = [r[0] for r in Vote.VOTE_RATES if rate in r][0]
with transaction.atomic():
self.votes.update_or_create(user=user, defaults={'rate': rate_int})
def user_already_voted(self, user):
if isinstance(user, AnonymousUser):
return False
return self.votes.filter(user=user).exists()
def user_can_vote(self, user):
can_vote = False
if self.author == user and not self.event.author == user:
pass
elif self.event.allow_public_voting:
can_vote = True
elif user.is_superuser:
can_vote = True
elif self.event.jury.users.filter(pk=user.pk).exists():
can_vote = True
return can_vote
def user_can_approve(self, user):
can_approve = False
if user.is_superuser:
can_approve = True
elif self.event.jury.users.filter(pk=user.pk).exists():
can_approve = True
return can_approve
def get_absolute_url(self):
return reverse('view_event', kwargs={'slug': self.event.slug}) + \
'#' + self.slug
def approve(self):
if self.is_approved:
raise ValidationError(_("This Proposal was already approved."))
self.is_approved = True
self.save()
def disapprove(self):
if not self.is_approved:
raise ValidationError(_("This Proposal was already disapproved."))
self.is_approved = False
self.save()
@python_2_unicode_compatible
class Track(models.Model):
title = models.CharField(_('Title'), max_length=200)
slug = AutoSlugField(populate_from='title', overwrite=True,
max_length=200, unique=True, db_index=True)
# relations
event = models.ForeignKey(to='deck.Event', related_name='tracks')
class Meta:
verbose_name = _('Track')
verbose_name_plural = _('Tracks')
def __str__(self):
return six.text_type('Track for: "%s"' % self.event.title)
@property
def proposals(self):
return Proposal.objects.filter(
pk__in=self.activities.values_list('pk', flat=True)
)
class Event(DeckBaseModel):
allow_public_voting = models.BooleanField(_('Allow Public Voting'),
default=True)
due_date = models.DateTimeField(null=False, blank=False)
slots = models.SmallIntegerField(_('Slots'), default=10)
# relations
jury = models.OneToOneField(to='jury.Jury', related_name='event',
null=True, blank=True)
anonymous_voting = models.BooleanField(
_('Anonymous Voting?'), default=False)
class Meta:
ordering = ['-due_date', '-created_at']
verbose_name = _('Event')
verbose_name_plural = _('Events')
@property
def due_date_is_passed(self):
return timezone.now() > self.due_date
@property
def due_date_is_close(self):
if self.due_date_is_passed:
return False
return timezone.now() > self.due_date - timezone.timedelta(days=7)
def get_absolute_url(self):
return reverse('view_event', kwargs={'slug': self.slug})
def user_can_see_proposals(self, user):
can_see_proposals = False
if user.is_superuser or self.author == user:
can_see_proposals = True
elif self.allow_public_voting:
can_see_proposals = True
elif (not user.is_anonymous() and
self.jury.users.filter(pk=user.pk).exists()):
can_see_proposals = True
return can_see_proposals
def get_proposers_count(self):
return self.proposals.values_list(
'author', flat=True).distinct().count()
def get_votes_count(self):
return self.proposals.values_list('votes', flat=True).count()
def get_votes_to_export(self):
return self.proposals.values(
'id', 'title', 'author__username', 'author__email'
).annotate(
Sum('votes__rate')
).annotate(Count('votes'))
def get_schedule(self):
schedule = Activity.objects.filter(track__event=self)\
.cached_authors()\
.annotate(Sum('proposal__votes__rate'))\
.extra(select=dict(track_isnull='track_id IS NULL'))\
.order_by('track_isnull', 'track_order',
'-proposal__votes__rate__sum')
return schedule
def get_not_approved_schedule(self):
return self.proposals\
.cached_authors()\
.filter(
models.Q(is_approved=False) |
models.Q(track__isnull=True))
@receiver(user_signed_up)
def send_welcome_mail(request, user, **kwargs):
if not settings.SEND_NOTIFICATIONS:
return
message = render_to_string('mailing/welcome.txt')
subject = _(u'Welcome')
recipients = [user.email]
send_mail(subject, message, settings.NO_REPLY_EMAIL, recipients)
@receiver(post_save, sender=Event)
def create_initial_jury(sender, instance, signal, created, **kwargs):
if not created:
return
jury = Jury()
jury.save()
jury.users.add(instance.author)
instance.jury = jury
instance.save()
@receiver(post_save, sender=Event)
def create_initial_track(sender, instance, signal, created, **kwargs):
if not created:
return
Track.objects.create(event=instance)
@receiver(post_delete, sender=Proposal)
def send_proposal_deleted_mail(sender, instance, **kwargs):
if not settings.SEND_NOTIFICATIONS:
return
context = {'event_title': instance.event.title,
'proposal_title': instance.title}
message = render_to_string('mailing/jury_deleted_proposal.txt', context)
subject = _(u'Proposal from %s just got deleted' % instance.event.title)
recipients = instance.event.jury.users.values_list('email', flat=True)
send_mail(subject, message, settings.NO_REPLY_EMAIL, recipients)
| mit | -5,524,374,505,757,374,000 | 32.683117 | 87 | 0.610117 | false |
thomas-bottesch/fcl | python/utils/create_pca_vectors_from_dataset.py | 1 | 2284 | from __future__ import print_function
import fcl
import os
import time
from os.path import abspath, join, dirname, isfile
from fcl import kmeans
from fcl.datasets import load_sector_dataset, load_usps_dataset
from fcl.matrix.csr_matrix import get_csr_matrix_from_object, csr_matrix_to_libsvm_string
from sklearn.decomposition import TruncatedSVD, PCA
from scipy.sparse import csr_matrix
from sklearn.datasets import dump_svmlight_file
import numpy as np
import argparse
def get_pca_projection_csrmatrix(fcl_csr_input_matrix, component_ratio):
n_components = int(fcl_csr_input_matrix.annz * component_ratio)
p = TruncatedSVD(n_components = n_components)
start = time.time()
p.fit(fcl_csr_input_matrix.to_numpy())
# convert to millis
fin = (time.time() - start) * 1000
(n_samples, n_dim) = fcl_csr_input_matrix.shape
print("Truncated SVD took %.3fs to retrieve %s components for input_matrix with n_samples %d, n_dim %d" % (fin/1000.0, str(n_components), n_samples, n_dim))
return get_csr_matrix_from_object(p.components_)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create a pca matrix from an input matrix with given component ratio.')
parser.add_argument('path_input_dataset', type=str, help='Path to the input libsvm dataset')
parser.add_argument('path_output_dataset', type=str, help='Path to the input libsvm dataset')
parser.add_argument('--component_ratio', default=0.1, type=float, help='Percentage of the average non zero values of the input dataset to use as components.')
args = parser.parse_args()
if not isfile(args.path_input_dataset):
raise Exception("Unable to find path_input_dataset: %s" % args.path_input_dataset)
print("Loading data from %s" % args.path_input_dataset)
fcl_mtrx_input_dataset = get_csr_matrix_from_object(args.path_input_dataset)
print("Retrieving the pca projection matrix")
pca_mtrx = get_pca_projection_csrmatrix(fcl_mtrx_input_dataset, args.component_ratio)
print("Convert pca projection matrix to libsvm string")
pca_mtrx_lsvm_str = csr_matrix_to_libsvm_string(pca_mtrx)
print("Writing pca projection matrix libsvm string to file %s" % args.path_output_dataset)
with open(args.path_output_dataset, 'w') as f:
f.write(pca_mtrx_lsvm_str)
| mit | 1,100,592,844,665,426,300 | 45.612245 | 160 | 0.741243 | false |
credativUK/connector-magento | __unported__/magentoerpconnect/partner.py | 1 | 25089 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import xmlrpclib
from collections import namedtuple
from openerp.osv import fields, orm
from openerp.addons.connector.queue.job import job
from openerp.addons.connector.connector import ConnectorUnit
from openerp.addons.connector.exception import MappingError
from openerp.addons.connector.unit.backend_adapter import BackendAdapter
from openerp.addons.connector.unit.mapper import (mapping,
only_create,
ImportMapper
)
from openerp.addons.connector.exception import IDMissingInBackend
from .unit.backend_adapter import GenericAdapter
from .unit.import_synchronizer import (DelayedBatchImport,
MagentoImportSynchronizer
)
from .backend import magento
from .connector import get_environment
_logger = logging.getLogger(__name__)
class res_partner(orm.Model):
_inherit = 'res.partner'
_columns = {
'magento_bind_ids': fields.one2many(
'magento.res.partner', 'openerp_id',
string="Magento Bindings"),
'magento_address_bind_ids': fields.one2many(
'magento.address', 'openerp_id',
string="Magento Address Bindings"),
'birthday': fields.date('Birthday'),
'company': fields.char('Company'),
}
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default['magento_bind_ids'] = False
return super(res_partner, self).copy_data(cr, uid, id,
default=default,
context=context)
def _address_fields(self, cr, uid, context=None):
""" Returns the list of address fields that are synced from the parent
when the `use_parent_address` flag is set. """
fields = super(res_partner, self)._address_fields(cr, uid,
context=context)
fields.append('company')
return fields
class magento_res_partner(orm.Model):
_name = 'magento.res.partner'
_inherit = 'magento.binding'
_inherits = {'res.partner': 'openerp_id'}
_description = 'Magento Partner'
_rec_name = 'website_id'
def _get_mag_partner_from_website(self, cr, uid, ids, context=None):
mag_partner_obj = self.pool['magento.res.partner']
return mag_partner_obj.search(
cr, uid, [('website_id', 'in', ids)], context=context)
_columns = {
'openerp_id': fields.many2one('res.partner',
string='Partner',
required=True,
ondelete='cascade'),
'backend_id': fields.related(
'website_id', 'backend_id',
type='many2one',
relation='magento.backend',
string='Magento Backend',
store={
'magento.res.partner': (lambda self, cr, uid, ids, c=None: ids,
['website_id'], 10),
'magento.website': (_get_mag_partner_from_website,
['backend_id'], 20),
},
readonly=True),
'website_id': fields.many2one('magento.website',
string='Magento Website',
required=True,
ondelete='restrict'),
'group_id': fields.many2one('magento.res.partner.category',
string='Magento Group (Category)'),
'created_at': fields.datetime('Created At (on Magento)',
readonly=True),
'updated_at': fields.datetime('Updated At (on Magento)',
readonly=True),
'emailid': fields.char('E-mail address'),
'taxvat': fields.char('Magento VAT'),
'newsletter': fields.boolean('Newsletter'),
'guest_customer': fields.boolean('Guest Customer'),
'consider_as_company': fields.boolean(
'Considered as company',
help="An account imported with a 'company' in "
"the billing address is considered as a company.\n "
"The partner takes the name of the company and "
"is not merged with the billing address."),
}
_sql_constraints = [
('magento_uniq', 'unique(website_id, magento_id)',
'A partner with same ID on Magento already exists for this website.'),
]
class magento_address(orm.Model):
_name = 'magento.address'
_inherit = 'magento.binding'
_inherits = {'res.partner': 'openerp_id'}
_description = 'Magento Address'
_rec_name = 'backend_id'
def _get_mag_address_from_partner(self, cr, uid, ids, context=None):
mag_address_obj = self.pool['magento.address']
return mag_address_obj.search(
cr, uid, [('magento_partner_id', 'in', ids)], context=context)
_columns = {
'openerp_id': fields.many2one('res.partner',
string='Partner',
required=True,
ondelete='cascade'),
'created_at': fields.datetime('Created At (on Magento)',
readonly=True),
'updated_at': fields.datetime('Updated At (on Magento)',
readonly=True),
'is_default_billing': fields.boolean('Default Invoice'),
'is_default_shipping': fields.boolean('Default Shipping'),
'magento_partner_id': fields.many2one('magento.res.partner',
string='Magento Partner',
required=True,
ondelete='cascade'),
'backend_id': fields.related(
'magento_partner_id', 'backend_id',
type='many2one',
relation='magento.backend',
string='Magento Backend',
store={
'magento.address': (lambda self, cr, uid, ids, c=None: ids,
['magento_partner_id'], 10),
'magento.res.partner': (_get_mag_address_from_partner,
['backend_id', 'website_id'], 20),
},
readonly=True),
'website_id': fields.related(
'magento_partner_id', 'website_id',
type='many2one',
relation='magento.website',
string='Magento Website',
store={
'magento.address': (lambda self, cr, uid, ids, c=None: ids,
['magento_partner_id'], 10),
'magento.res.partner': (_get_mag_address_from_partner,
['website_id'], 20),
},
readonly=True),
'is_magento_order_address': fields.boolean(
'Address from a Magento Order'),
}
_sql_constraints = [
('magento_uniq', 'unique(backend_id, magento_id)',
'A partner address with same ID on Magento already exists.'),
]
@magento
class PartnerAdapter(GenericAdapter):
_model_name = 'magento.res.partner'
_magento_model = 'customer'
_admin_path = '/{model}/edit/id/{id}'
def _call(self, method, arguments):
try:
return super(PartnerAdapter, self)._call(method, arguments)
except xmlrpclib.Fault as err:
# this is the error in the Magento API
# when the customer does not exist
if err.faultCode == 102:
raise IDMissingInBackend
else:
raise
def search(self, filters=None, from_date=None, magento_website_ids=None):
""" Search records according to some criterias and returns a
list of ids
:rtype: list
"""
if filters is None:
filters = {}
if from_date is not None:
# updated_at include the created records
str_from_date = from_date.strftime('%Y/%m/%d %H:%M:%S')
filters['updated_at'] = {'from': str_from_date}
if magento_website_ids is not None:
filters['website_id'] = {'in': magento_website_ids}
# the search method is on ol_customer instead of customer
return self._call('ol_customer.search',
[filters] if filters else [{}])
@magento
class PartnerBatchImport(DelayedBatchImport):
""" Import the Magento Partners.
For every partner in the list, a delayed job is created.
"""
_model_name = ['magento.res.partner']
def run(self, filters=None):
""" Run the synchronization """
from_date = filters.pop('from_date', None)
magento_website_ids = [filters.pop('magento_website_id')]
record_ids = self.backend_adapter.search(filters,
from_date,
magento_website_ids)
_logger.info('search for magento partners %s returned %s',
filters, record_ids)
for record_id in record_ids:
self._import_record(record_id)
@magento
class PartnerImport(MagentoImportSynchronizer):
_model_name = ['magento.res.partner']
def _import_dependencies(self):
""" Import the dependencies for the record"""
record = self.magento_record
self._import_dependency(record['group_id'],
'magento.res.partner.category')
@property
def mapper(self):
""" Return an instance of ``Mapper`` for the synchronization.
The instanciation is delayed because some synchronisations do
not need such an unit and the unit may not exist.
For ``magento.res.partner``, we have a company mapper and
a mapper, ensure we find the correct one here.
:rtype: :py:class:`~.PartnerImportMapper`
"""
if self._mapper is None:
get_unit = self.environment.get_connector_unit
self._mapper = get_unit(PartnerImportMapper)
return self._mapper
def _after_import(self, partner_binding_id):
""" Import the addresses """
get_unit = self.get_connector_unit_for_model
book = get_unit(PartnerAddressBook, 'magento.address')
book.import_addresses(self.magento_id, partner_binding_id)
@magento
class PartnerImportMapper(ImportMapper):
_model_name = 'magento.res.partner'
direct = [
('email', 'email'),
('dob', 'birthday'),
('created_at', 'created_at'),
('updated_at', 'updated_at'),
('email', 'emailid'),
('taxvat', 'taxvat'),
('group_id', 'group_id'),
]
@only_create
@mapping
def is_company(self, record):
# partners are companies so we can bind
# addresses on them
return {'is_company': True}
@mapping
def names(self, record):
# TODO create a glue module for base_surname
parts = [part for part in (record['firstname'],
record['middlename'],
record['lastname']) if part]
return {'name': ' '.join(parts)}
@mapping
def customer_group_id(self, record):
# import customer groups
binder = self.get_binder_for_model('magento.res.partner.category')
category_id = binder.to_openerp(record['group_id'], unwrap=True)
if category_id is None:
raise MappingError("The partner category with "
"magento id %s does not exist" %
record['group_id'])
# FIXME: should remove the previous tag (all the other tags from
# the same backend)
return {'category_id': [(4, category_id)]}
@mapping
def website_id(self, record):
binder = self.get_binder_for_model('magento.website')
website_id = binder.to_openerp(record['website_id'])
return {'website_id': website_id}
@mapping
def lang(self, record):
binder = self.get_binder_for_model('magento.storeview')
binding_id = binder.to_openerp(record['store_id'])
if binding_id:
storeview = self.session.browse('magento.storeview',
binding_id)
if storeview.lang_id:
return {'lang': storeview.lang_id.code}
@only_create
@mapping
def customer(self, record):
return {'customer': True}
@mapping
def type(self, record):
return {'type': 'default'}
@only_create
@mapping
def openerp_id(self, record):
""" Will bind the customer on a existing partner
with the same email """
sess = self.session
partner_ids = sess.search('res.partner',
[('email', '=', record['email']),
('customer', '=', True),
# FIXME once it has been changed in openerp
('is_company', '=', True)])
if partner_ids:
return {'openerp_id': partner_ids[0]}
AddressInfos = namedtuple('AddressInfos', ['magento_record',
'partner_binding_id',
'merge'])
@magento
class PartnerAddressBook(ConnectorUnit):
""" Import all addresses from the address book of a customer.
This class is responsible to define which addresses should
be imported and how (merge with the partner or not...).
Then, it delegate the import to the appropriate importer.
This is really intricate. The datamodel are different between
Magento and OpenERP and we have many uses cases to cover.
The first thing is that:
- we do not import companies and individuals the same manner
- we do not know if an account is a company -> we assume that
if we found something in the company field of the billing
address, the whole account is a company.
Differences:
- Individuals: we merge the billing address with the partner,
so we'll end with 1 entity if the customer has 1 address
- Companies: we never merge the addresses with the partner,
but we use the company name of the billing address as name
of the partner. We also copy the address informations from
the billing address as default values.
More information on:
https://bugs.launchpad.net/openerp-connector/+bug/1193281
"""
_model_name = 'magento.address'
def import_addresses(self, magento_partner_id, partner_binding_id):
get_unit = self.get_connector_unit_for_model
addresses = self._get_address_infos(magento_partner_id,
partner_binding_id)
for address_id, infos in addresses:
importer = get_unit(MagentoImportSynchronizer)
importer.run(address_id, infos)
def _get_address_infos(self, magento_partner_id, partner_binding_id):
get_unit = self.get_connector_unit_for_model
adapter = get_unit(BackendAdapter)
mag_address_ids = adapter.search({'customer_id':
{'eq': magento_partner_id}})
if not mag_address_ids:
return
for address_id in mag_address_ids:
magento_record = adapter.read(address_id)
# defines if the billing address is merged with the partner
# or imported as a standalone contact
merge = False
if magento_record.get('is_default_billing'):
if magento_record.get('company'):
# when a company is there, we never merge the contact
# with the partner.
# Copy the billing address on the company
# and use the name of the company for the name
company_mapper = get_unit(CompanyImportMapper,
'magento.res.partner')
map_record = company_mapper.map_record(magento_record)
self.session.write('magento.res.partner',
partner_binding_id,
map_record.values())
else:
# for B2C individual customers, merge with the main
# partner
merge = True
# in the case if the billing address no longer
# has a company, reset the flag
self.session.write('magento.res.partner',
partner_binding_id,
{'consider_as_company': False})
address_infos = AddressInfos(magento_record=magento_record,
partner_binding_id=partner_binding_id,
merge=merge)
yield address_id, address_infos
class BaseAddressImportMapper(ImportMapper):
""" Defines the base mappings for the imports
in ``res.partner`` (state, country, ...)
"""
direct = [('postcode', 'zip'),
('city', 'city'),
('telephone', 'phone'),
('fax', 'fax'),
('company', 'company'),
]
@mapping
def state(self, record):
if not record.get('region'):
return
state_ids = self.session.search('res.country.state',
[('name', '=ilike', record['region'])])
if state_ids:
return {'state_id': state_ids[0]}
@mapping
def country(self, record):
if not record.get('country_id'):
return
country_ids = self.session.search(
'res.country',
[('code', '=', record['country_id'])])
if country_ids:
return {'country_id': country_ids[0]}
@mapping
def street(self, record):
value = record['street']
lines = [line.strip() for line in value.split('\n') if line.strip()]
if len(lines) == 1:
result = {'street': lines[0], 'street2': False}
elif len(lines) >= 2:
result = {'street': lines[0], 'street2': u' - '.join(lines[1:])}
else:
result = {}
return result
@mapping
def title(self, record):
prefix = record['prefix']
title_id = False
if prefix:
title_ids = self.session.search('res.partner.title',
[('domain', '=', 'contact'),
('shortcut', 'ilike', prefix)])
if title_ids:
title_id = title_ids[0]
else:
title_id = self.session.create('res.partner.title',
{'domain': 'contact',
'shortcut': prefix,
'name': prefix})
return {'title': title_id}
@magento
class CompanyImportMapper(BaseAddressImportMapper):
""" Special mapping used when we import a company.
A company is considered as such when the billing address
of an account has something in the 'company' field.
This is a very special mapping not used in the same way
than the other.
The billing address will exist as a contact,
but we want to *copy* the data on the company.
The input record is the billing address.
The mapper returns data which will be written on the
main partner, in other words, the company.
The ``@only_create`` decorator would not have any
effect here because the mapper is always called
for updates.
"""
_model_name = 'magento.res.partner'
direct = BaseAddressImportMapper.direct + [
('company', 'name'),
]
@mapping
def consider_as_company(self, record):
return {'consider_as_company': True}
@magento
class AddressAdapter(GenericAdapter):
_model_name = 'magento.address'
_magento_model = 'customer_address'
def search(self, filters=None):
""" Search records according to some criterias
and returns a list of ids
:rtype: list
"""
return [int(row['customer_address_id']) for row
in self._call('%s.list' % self._magento_model,
[filters] if filters else [{}])]
@magento
class AddressImport(MagentoImportSynchronizer):
_model_name = ['magento.address']
def run(self, magento_id, address_infos):
""" Run the synchronization """
self.address_infos = address_infos
return super(AddressImport, self).run(magento_id)
def _get_magento_data(self):
""" Return the raw Magento data for ``self.magento_id`` """
# we already read the data from the Partner Importer
if self.address_infos.magento_record:
return self.address_infos.magento_record
else:
return super(AddressImport, self)._get_magento_data()
def _define_partner_relationship(self, data):
""" Link address with partner or parent company. """
partner_binding_id = self.address_infos.partner_binding_id
partner_id = self.session.read('magento.res.partner',
partner_binding_id,
['openerp_id'])['openerp_id'][0]
if self.address_infos.merge:
# it won't be imported as an independant address,
# but will be linked with the main res.partner
data['openerp_id'] = partner_id
data['type'] = 'default'
else:
data['parent_id'] = partner_id
partner = self.session.browse('res.partner', partner_id)
data['lang'] = partner.lang
data['magento_partner_id'] = self.address_infos.partner_binding_id
return data
def _create(self, data):
data = self._define_partner_relationship(data)
return super(AddressImport, self)._create(data)
@magento
class AddressImportMapper(BaseAddressImportMapper):
_model_name = 'magento.address'
# TODO fields not mapped:
# "suffix"=>"a",
# "vat_id"=>"12334",
direct = BaseAddressImportMapper.direct + [
('created_at', 'created_at'),
('updated_at', 'updated_at'),
('is_default_billing', 'is_default_billing'),
('is_default_shipping', 'is_default_shipping'),
('company', 'company'),
]
@mapping
def names(self, record):
# TODO create a glue module for base_surname
parts = [part for part in (record['firstname'],
record.get('middlename'),
record['lastname']) if part]
return {'name': ' '.join(parts)}
@mapping
def use_parent_address(self, record):
return {'use_parent_address': False}
@mapping
def type(self, record):
if record.get('is_default_billing'):
address_type = 'invoice'
elif record.get('is_default_shipping'):
address_type = 'delivery'
else:
address_type = 'contact'
return {'type': address_type}
@job
def partner_import_batch(session, model_name, backend_id, filters=None):
""" Prepare the import of partners modified on Magento """
if filters is None:
filters = {}
assert 'magento_website_id' in filters, (
'Missing information about Magento Website')
env = get_environment(session, model_name, backend_id)
importer = env.get_connector_unit(PartnerBatchImport)
importer.run(filters=filters)
| agpl-3.0 | -4,476,961,058,515,787,300 | 37.539171 | 79 | 0.544781 | false |
luotao1/Paddle | python/paddle/static/io.py | 1 | 30947 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import errno
import inspect
import logging
import os
import warnings
import six
import numpy as np
import paddle
from paddle.fluid import (
core,
Variable,
CompiledProgram,
default_main_program,
Program,
layers,
unique_name,
program_guard, )
from paddle.fluid.io import prepend_feed_ops, append_fetch_ops
from paddle.fluid.framework import static_only, Parameter
from paddle.fluid.executor import Executor, global_scope
from paddle.fluid.log_helper import get_logger
__all__ = [
'save_inference_model',
'load_inference_model',
'serialize_program',
'serialize_persistables',
'save_to_file',
'deserialize_program',
'deserialize_persistables',
'load_from_file',
'normalize_program',
]
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
def _check_args(caller, args, supported_args=None, deprecated_args=None):
supported_args = [] if supported_args is None else supported_args
deprecated_args = [] if deprecated_args is None else deprecated_args
for arg in args:
if arg in deprecated_args:
raise ValueError(
"argument '{}' in function '{}' is deprecated, only {} are supported.".
format(arg, caller, supported_args))
elif arg not in supported_args:
raise ValueError(
"function '{}' doesn't support argument '{}',\n only {} are supported.".
format(caller, arg, supported_args))
def _check_vars(name, var_list):
if not isinstance(var_list, list):
var_list = [var_list]
if not var_list or not all([isinstance(var, Variable) for var in var_list]):
raise ValueError(
"'{}' should be a Variable or a list of Variable.".format(name))
def _normalize_path_prefix(path_prefix):
"""
convert path_prefix to absolute path.
"""
if not isinstance(path_prefix, six.string_types):
raise ValueError("'path_prefix' should be a string.")
if path_prefix.endswith("/"):
raise ValueError("'path_prefix' should not be a directory")
path_prefix = os.path.normpath(path_prefix)
path_prefix = os.path.abspath(path_prefix)
return path_prefix
def _get_valid_program(program=None):
"""
return default main program if program is None.
"""
if program is None:
program = default_main_program()
elif isinstance(program, CompiledProgram):
program = program._program
if program is None:
raise TypeError(
"The type of input program is invalid, expected tyep is Program, but received None"
)
warnings.warn(
"The input is a CompiledProgram, this is not recommended.")
if not isinstance(program, Program):
raise TypeError(
"The type of input program is invalid, expected type is fluid.Program, but received %s"
% type(program))
return program
def _clone_var_in_block(block, var):
assert isinstance(var, Variable)
if var.desc.type() == core.VarDesc.VarType.LOD_TENSOR:
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True)
else:
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
persistable=True)
def normalize_program(program, feed_vars, fetch_vars):
"""
:api_attr: Static Graph
Normalize/Optimize a program according to feed_vars and fetch_vars.
Args:
program(Program): Specify a program you want to optimize.
feed_vars(Variable | list[Variable]): Variables needed by inference.
fetch_vars(Variable | list[Variable]): Variables returned by inference.
Returns:
Program: Normalized/Optimized program.
Raises:
TypeError: If `program` is not a Program, an exception is thrown.
TypeError: If `feed_vars` is not a Variable or a list of Variable, an exception is thrown.
TypeError: If `fetch_vars` is not a Variable or a list of Variable, an exception is thrown.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# normalize main program.
program = default_main_program()
normalized_program = paddle.static.normalize_program(program, [image], [predict])
"""
if not isinstance(program, Program):
raise TypeError(
"program type must be `fluid.Program`, but received `%s`" %
type(program))
if not isinstance(feed_vars, list):
feed_vars = [feed_vars]
if not all(isinstance(v, Variable) for v in feed_vars):
raise TypeError(
"feed_vars type must be a Variable or a list of Variable.")
if not isinstance(fetch_vars, list):
fetch_vars = [fetch_vars]
if not all(isinstance(v, Variable) for v in fetch_vars):
raise TypeError(
"fetch_vars type must be a Variable or a list of Variable.")
# remind users to set auc_states to 0 if auc op were found.
for op in program.global_block().ops:
# clear device of Op
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
op._set_attr(device_attr_name, "")
if op.type == 'auc':
warnings.warn("Be sure that you have set auc states to 0 "
"before saving inference model.")
break
# fix the bug that the activation op's output as target will be pruned.
# will affect the inference performance.
# TODO(Superjomn) add an IR pass to remove 1-scale op.
with program_guard(program):
uniq_fetch_vars = []
for i, var in enumerate(fetch_vars):
var = layers.scale(
var, 1., name="save_infer_model/scale_{}".format(i))
uniq_fetch_vars.append(var)
fetch_vars = uniq_fetch_vars
# serialize program
copy_program = program.clone()
global_block = copy_program.global_block()
remove_op_idx = []
for i, op in enumerate(global_block.ops):
op.desc.set_is_target(False)
if op.type == "feed" or op.type == "fetch":
remove_op_idx.append(i)
for idx in remove_op_idx[::-1]:
global_block._remove_op(idx)
copy_program.desc.flush()
feed_var_names = [var.name for var in feed_vars]
copy_program = copy_program._prune_with_input(
feeded_var_names=feed_var_names, targets=fetch_vars)
copy_program = copy_program._inference_optimize(prune_read_op=True)
fetch_var_names = [var.name for var in fetch_vars]
prepend_feed_ops(copy_program, feed_var_names)
append_fetch_ops(copy_program, fetch_var_names)
copy_program.desc._set_version()
return copy_program
def is_persistable(var):
"""
Check whether the given variable is persistable.
Args:
var(Variable): The variable to be checked.
Returns:
bool: True if the given `var` is persistable
False if not.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
param = fluid.default_main_program().global_block().var('fc.b')
res = fluid.io.is_persistable(param)
"""
if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \
var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \
var.desc.type() == core.VarDesc.VarType.READER:
return False
return var.persistable
@static_only
def serialize_program(feed_vars, fetch_vars, **kwargs):
"""
:api_attr: Static Graph
Serialize default main program according to feed_vars and fetch_vars.
Args:
feed_vars(Variable | list[Variable]): Variables needed by inference.
fetch_vars(Variable | list[Variable]): Variables returned by inference.
kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly.
- program(Program): specify a program if you don't want to use default main program.
Returns:
bytes: serialized program.
Raises:
ValueError: If `feed_vars` is not a Variable or a list of Variable, an exception is thrown.
ValueError: If `fetch_vars` is not a Variable or a list of Variable, an exception is thrown.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# serialize the default main program to bytes.
serialized_program = paddle.static.serialize_program([image], [predict])
# deserialize bytes to program
deserialized_program = paddle.static.deserialize_program(serialized_program)
"""
# verify feed_vars
_check_vars('feed_vars', feed_vars)
# verify fetch_vars
_check_vars('fetch_vars', fetch_vars)
program = _get_valid_program(kwargs.get('program', None))
program = normalize_program(program, feed_vars, fetch_vars)
return _serialize_program(program)
def _serialize_program(program):
"""
serialize given program to bytes.
"""
return program.desc.serialize_to_string()
@static_only
def serialize_persistables(feed_vars, fetch_vars, executor, **kwargs):
"""
:api_attr: Static Graph
Serialize parameters using given executor and default main program according to feed_vars and fetch_vars.
Args:
feed_vars(Variable | list[Variable]): Variables needed by inference.
fetch_vars(Variable | list[Variable]): Variables returned by inference.
kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly.
- program(Program): specify a program if you don't want to use default main program.
Returns:
bytes: serialized program.
Raises:
ValueError: If `feed_vars` is not a Variable or a list of Variable, an exception is thrown.
ValueError: If `fetch_vars` is not a Variable or a list of Variable, an exception is thrown.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# serialize parameters to bytes.
serialized_params = paddle.static.serialize_persistables([image], [predict], exe)
# deserialize bytes to parameters.
main_program = paddle.static.default_main_program()
deserialized_params = paddle.static.deserialize_persistables(main_program, serialized_params, exe)
"""
# verify feed_vars
_check_vars('feed_vars', feed_vars)
# verify fetch_vars
_check_vars('fetch_vars', fetch_vars)
program = _get_valid_program(kwargs.get('program', None))
program = normalize_program(program, feed_vars, fetch_vars)
return _serialize_persistables(program, executor)
def _serialize_persistables(program, executor):
"""
Serialize parameters using given program and executor.
"""
vars_ = list(filter(is_persistable, program.list_vars()))
# warn if no variable found in model
if len(vars_) == 0:
warnings.warn("no variable in your model, please ensure there are any "
"variables in your model to save")
return None
# create a new program and clone persitable vars to it
save_program = Program()
save_block = save_program.global_block()
save_var_map = {}
for var in vars_:
if var.type != core.VarDesc.VarType.RAW:
var_copy = _clone_var_in_block(save_block, var)
save_var_map[var_copy.name] = var
# create in_vars and out_var, then append a save_combine op to save_program
in_vars = []
for name in sorted(save_var_map.keys()):
in_vars.append(save_var_map[name])
out_var_name = unique_name.generate("out_var")
out_var = save_block.create_var(
type=core.VarDesc.VarType.RAW, name=out_var_name)
out_var.desc.set_persistable(True)
save_block.append_op(
type='save_combine',
inputs={'X': in_vars},
outputs={'Y': out_var},
attrs={'file_path': '',
'save_to_memory': True})
# run save_program to save vars
# NOTE(zhiqiu): save op will add variable kLookupTablePath to save_program.desc,
# which leads to diff between save_program and its desc. Call _sync_with_cpp
# to keep consistency.
save_program._sync_with_cpp()
executor.run(save_program)
# return serialized bytes in out_var
return global_scope().find_var(out_var_name).get_bytes()
def save_to_file(path, content):
"""
Save content to given path.
Args:
path(str): Path to write content to.
content(bytes): Content to write.
Returns:
None
"""
if not isinstance(content, bytes):
raise ValueError("'content' type should be bytes.")
with open(path, "wb") as f:
f.write(content)
@static_only
def save_inference_model(path_prefix, feed_vars, fetch_vars, executor,
**kwargs):
"""
:api_attr: Static Graph
Save current model and its parameters to given path. i.e.
Given path_prefix = "/path/to/modelname", after invoking
save_inference_model(path_prefix, feed_vars, fetch_vars, executor),
you will find two files named modelname.pdmodel and modelname.pdiparams
under "/path/to", which represent your model and parameters respectively.
Args:
path_prefix(str): Directory path to save model + model name without suffix.
feed_vars(Variable | list[Variable]): Variables needed by inference.
fetch_vars(Variable | list[Variable]): Variables returned by inference.
executor(Executor): The executor that saves the inference model. You can refer
to :ref:`api_guide_executor_en` for more details.
kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly.
- program(Program): specify a program if you don't want to use default main program.
Returns:
None
Raises:
ValueError: If `feed_vars` is not a Variable or a list of Variable, an exception is thrown.
ValueError: If `fetch_vars` is not a Variable or a list of Variable, an exception is thrown.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# Feed data and train process
# Save inference model. Note we don't save label and loss in this example
paddle.static.save_inference_model(path_prefix, [image], [predict], exe)
# In this example, the save_inference_mode inference will prune the default
# main program according to the network's input node (img) and output node(predict).
# The pruned inference program is going to be saved in file "./infer_model.pdmodel"
# and parameters are going to be saved in file "./infer_model.pdiparams".
"""
# check path_prefix, set model_path and params_path
path_prefix = _normalize_path_prefix(path_prefix)
try:
# mkdir may conflict if pserver and trainer are running on the same machine
dirname = os.path.dirname(path_prefix)
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
model_path = path_prefix + ".pdmodel"
params_path = path_prefix + ".pdiparams"
if os.path.isdir(model_path):
raise ValueError("'{}' is an existing directory.".format(model_path))
if os.path.isdir(params_path):
raise ValueError("'{}' is an existing directory.".format(params_path))
# verify feed_vars
_check_vars('feed_vars', feed_vars)
# verify fetch_vars
_check_vars('fetch_vars', fetch_vars)
program = _get_valid_program(kwargs.get('program', None))
program = normalize_program(program, feed_vars, fetch_vars)
# serialize and save program
program_bytes = _serialize_program(program)
save_to_file(model_path, program_bytes)
# serialize and save params
params_bytes = _serialize_persistables(program, executor)
save_to_file(params_path, params_bytes)
@static_only
def deserialize_program(data):
"""
:api_attr: Static Graph
Deserialize given data to a program.
Args:
data(bytes): serialized program.
Returns:
Program: deserialized program.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# serialize the default main program to bytes.
serialized_program = paddle.static.serialize_program([image], [predict])
# deserialize bytes to program
deserialized_program = paddle.static.deserialize_program(serialized_program)
"""
program = Program.parse_from_string(data)
if not core._is_program_version_supported(program._version()):
raise ValueError("Unsupported program version: %d\n" %
program._version())
return program
@static_only
def deserialize_persistables(program, data, executor):
"""
:api_attr: Static Graph
Deserialize given data to parameters according to given program and executor.
Args:
program(Program): program that contains parameter names (to deserialize).
data(bytes): serialized parameters.
executor(Executor): executor used to run load op.
Returns:
Program: deserialized program.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# serialize parameters to bytes.
serialized_params = paddle.static.serialize_persistables([image], [predict], exe)
# deserialize bytes to parameters.
main_program = paddle.static.default_main_program()
deserialized_params = paddle.static.deserialize_persistables(main_program, serialized_params, exe)
"""
if not isinstance(program, Program):
raise TypeError(
"program type must be `fluid.Program`, but received `%s`" %
type(program))
# load params to a tmp program
load_program = Program()
load_block = load_program.global_block()
vars_ = list(filter(is_persistable, program.list_vars()))
origin_shape_map = {}
load_var_map = {}
check_vars = []
sparse_vars = []
for var in vars_:
assert isinstance(var, Variable)
if var.type == core.VarDesc.VarType.RAW:
continue
if isinstance(var, Parameter):
origin_shape_map[var.name] = tuple(var.desc.get_shape())
if var.type == core.VarDesc.VarType.SELECTED_ROWS:
sparse_vars.append(var)
continue
var_copy = _clone_var_in_block(load_block, var)
check_vars.append(var)
load_var_map[var_copy.name] = var_copy
# append load_combine op to load parameters,
load_var_list = []
for name in sorted(load_var_map.keys()):
load_var_list.append(load_var_map[name])
load_block.append_op(
type='load_combine',
inputs={},
outputs={"Out": load_var_list},
# if load from memory, file_path is data
attrs={'file_path': data,
'model_from_memory': True})
executor.run(load_program)
# check var shape
for var in check_vars:
if not isinstance(var, Parameter):
continue
var_tmp = paddle.fluid.global_scope().find_var(var.name)
assert var_tmp != None, "can't not find var: " + var.name
new_shape = (np.array(var_tmp.get_tensor())).shape
assert var.name in origin_shape_map, var.name + " MUST in var list."
origin_shape = origin_shape_map.get(var.name)
if new_shape != origin_shape:
raise RuntimeError(
"Shape mismatch, program needs a parameter with shape ({}), "
"but the loaded parameter ('{}') has a shape of ({}).".format(
origin_shape, var.name, new_shape))
def load_from_file(path):
"""
Load file in binary mode.
Args:
path(str): Path of an existed file.
Returns:
bytes: Content of file.
"""
with open(path, 'rb') as f:
data = f.read()
return data
@static_only
def load_inference_model(path_prefix, executor, **kwargs):
"""
:api_attr: Static Graph
Load inference model from a given path. By this API, you can get the model
structure(Inference Program) and model parameters.
Args:
path_prefix(str | None): One of the following:
- Directory path to save model + model name without suffix.
- Set to None when reading the model from memory.
executor(Executor): The executor to run for loading inference model.
See :ref:`api_guide_executor_en` for more details about it.
kwargs: Supported keys including 'model_filename', 'params_filename'.Attention please, kwargs is used for backward compatibility mainly.
- model_filename(str): specify model_filename if you don't want to use default name.
- params_filename(str): specify params_filename if you don't want to use default name.
Returns:
list: The return of this API is a list with three elements:
(program, feed_target_names, fetch_targets). The `program` is a
``Program`` (refer to :ref:`api_guide_Program_en`), which is used for inference.
The `feed_target_names` is a list of ``str``, which contains names of variables
that need to feed data in the inference program. The `fetch_targets` is a list of
``Variable`` (refer to :ref:`api_guide_Program_en`). It contains variables from which
we can get inference results.
Raises:
ValueError: If `path_prefix.pdmodel` or `path_prefix.pdiparams` doesn't exist.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_static()
# Build the model
startup_prog = paddle.static.default_startup_program()
main_prog = paddle.static.default_main_program()
with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data(name="img", shape=[64, 784])
w = paddle.create_parameter(shape=[784, 200], dtype='float32')
b = paddle.create_parameter(shape=[200], dtype='float32')
hidden_w = paddle.matmul(x=image, y=w)
hidden_b = paddle.add(hidden_w, b)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(startup_prog)
# Save the inference model
path_prefix = "./infer_model"
paddle.static.save_inference_model(path_prefix, [image], [hidden_b], exe)
[inference_program, feed_target_names, fetch_targets] = (
paddle.static.load_inference_model(path_prefix, exe))
tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32)
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
# In this example, the inference program was saved in file
# "./infer_model.pdmodel" and parameters were saved in file
# " ./infer_model.pdiparams".
# By the inference program, feed_target_names and
# fetch_targets, we can use an executor to run the inference
# program to get the inference result.
"""
# check kwargs
supported_args = ('model_filename', 'params_filename')
deprecated_args = ('pserver_endpoints', )
caller = inspect.currentframe().f_code.co_name
_check_args(caller, kwargs, supported_args, deprecated_args)
# load from memory
if path_prefix is None:
_logger.warning("Load inference model from memory is deprecated.")
model_filename = kwargs.get('model_filename', None)
params_filename = kwargs.get('params_filename', None)
if params_filename is None:
raise ValueError(
"params_filename cannot be None when path_prefix is None.")
load_dirname = ''
program_bytes = model_filename
params_filename = params_filename
# load from file
else:
# check and norm path_prefix
path_prefix = _normalize_path_prefix(path_prefix)
# set model_path and params_path in new way,
# path_prefix represents a file path without suffix in this case.
if not kwargs:
model_path = path_prefix + ".pdmodel"
params_path = path_prefix + ".pdiparams"
# set model_path and params_path in old way for compatible,
# path_prefix represents a directory path.
else:
model_filename = kwargs.get('model_filename', None)
params_filename = kwargs.get('params_filename', None)
# set model_path
if model_filename is None:
model_path = os.path.join(path_prefix, "__model__")
else:
model_path = os.path.join(path_prefix,
model_filename + ".pdmodel")
if not os.path.exists(model_path):
model_path = os.path.join(path_prefix, model_filename)
# set params_path
if params_filename is None:
params_path = os.path.join(path_prefix, "")
else:
params_path = os.path.join(path_prefix,
params_filename + ".pdiparams")
if not os.path.exists(params_path):
params_path = os.path.join(path_prefix, params_filename)
_logger.warning("The old way to load inference model is deprecated."
" model path: {}, params path: {}".format(
model_path, params_path))
program_bytes = load_from_file(model_path)
load_dirname = os.path.dirname(params_path)
params_filename = os.path.basename(params_path)
# deserialize bytes to program
program = deserialize_program(program_bytes)
# load params data
params_path = os.path.join(load_dirname, params_filename)
params_bytes = load_from_file(params_path)
# deserialize bytes to params
deserialize_persistables(program, params_bytes, executor)
feed_target_names = program.desc.get_feed_target_names()
fetch_target_names = program.desc.get_fetch_target_names()
fetch_targets = [
program.global_block().var(name) for name in fetch_target_names
]
return [program, feed_target_names, fetch_targets]
| apache-2.0 | -2,991,404,676,168,279,000 | 36.602673 | 144 | 0.626587 | false |
Erotemic/hotspotter | hsgui/_frontend/EditPrefSkel.py | 1 | 2626 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/joncrall/code/hotspotter/hsgui/_frontend/EditPrefSkel.ui'
#
# Created: Mon Feb 10 13:40:41 2014
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_editPrefSkel(object):
def setupUi(self, editPrefSkel):
editPrefSkel.setObjectName(_fromUtf8("editPrefSkel"))
editPrefSkel.resize(668, 530)
self.verticalLayout = QtGui.QVBoxLayout(editPrefSkel)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.prefTreeView = QtGui.QTreeView(editPrefSkel)
self.prefTreeView.setObjectName(_fromUtf8("prefTreeView"))
self.verticalLayout.addWidget(self.prefTreeView)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.redrawBUT = QtGui.QPushButton(editPrefSkel)
self.redrawBUT.setObjectName(_fromUtf8("redrawBUT"))
self.horizontalLayout.addWidget(self.redrawBUT)
self.unloadFeaturesAndModelsBUT = QtGui.QPushButton(editPrefSkel)
self.unloadFeaturesAndModelsBUT.setObjectName(_fromUtf8("unloadFeaturesAndModelsBUT"))
self.horizontalLayout.addWidget(self.unloadFeaturesAndModelsBUT)
self.defaultPrefsBUT = QtGui.QPushButton(editPrefSkel)
self.defaultPrefsBUT.setObjectName(_fromUtf8("defaultPrefsBUT"))
self.horizontalLayout.addWidget(self.defaultPrefsBUT)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(editPrefSkel)
QtCore.QMetaObject.connectSlotsByName(editPrefSkel)
def retranslateUi(self, editPrefSkel):
editPrefSkel.setWindowTitle(QtGui.QApplication.translate("editPrefSkel", "Edit Preferences", None, QtGui.QApplication.UnicodeUTF8))
self.redrawBUT.setText(QtGui.QApplication.translate("editPrefSkel", "Redraw", None, QtGui.QApplication.UnicodeUTF8))
self.unloadFeaturesAndModelsBUT.setText(QtGui.QApplication.translate("editPrefSkel", "Unload Features and Models", None, QtGui.QApplication.UnicodeUTF8))
self.defaultPrefsBUT.setText(QtGui.QApplication.translate("editPrefSkel", "Defaults", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
editPrefSkel = QtGui.QWidget()
ui = Ui_editPrefSkel()
ui.setupUi(editPrefSkel)
editPrefSkel.show()
sys.exit(app.exec_())
| apache-2.0 | -1,449,345,158,768,102,100 | 45.070175 | 161 | 0.739147 | false |
thomas-maurice/docker-minecraft-webapp | webapp/lib/mcrcon.py | 1 | 1703 | import socket
import select
import struct
class MCRconException(Exception):
pass
class MCRcon:
socket = None
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
def disconnect(self):
self.socket.close()
self.socket = None
def send(self, out_type, out_data):
if self.socket is None:
raise MCRconException("Must connect before sending data")
# Send a request packet
out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00'
out_length = struct.pack('<i', len(out_payload))
self.socket.send(out_length + out_payload)
# Read response packets
in_data = ""
while True:
# Read a packet
in_length, = struct.unpack('<i', self.socket.recv(4))
in_payload = self.socket.recv(in_length)
in_id, in_type = struct.unpack('<ii', in_payload[:8])
in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:]
# Sanity checks
if in_padding != b'\x00\x00':
raise MCRconException("Incorrect padding")
if in_id == -1:
raise MCRconException("Login failed")
# Record the response
in_data += in_data_partial.decode('utf8')
# If there's nothing more to receive, return the response
if len(select.select([self.socket], [], [], 0)[0]) == 0:
return in_data
def command(self, command):
return self.send(2, command)
def login(self, password):
return self.send(3, password)
| gpl-3.0 | 6,767,356,456,409,357,000 | 31.132075 | 93 | 0.571932 | false |
danidee10/Votr | api/api.py | 1 | 3600 | from os import getenv
from models import db, Users, Polls, Topics, Options, UserPolls
from flask import Blueprint, request, jsonify, session
from datetime import datetime
from config import SQLALCHEMY_DATABASE_URI
if getenv('APP_MODE') == 'PRODUCTION':
from production_settings import SQLALCHEMY_DATABASE_URI
api = Blueprint('api', 'api', url_prefix='/api')
@api.route('/polls', methods=['GET', 'POST'])
# retrieves/adds polls from/to the database
def api_polls():
if request.method == 'POST':
# get the poll and save it in the database
poll = request.get_json()
# simple validation to check if all values are properly set
for key, value in poll.items():
if not value:
return jsonify({'message': 'value for {} is empty'.format(key)})
title = poll['title']
options_query = lambda option: Options.query.filter(Options.name.like(option))
options = [Polls(option=Options(name=option))
if options_query(option).count() == 0
else Polls(option=options_query(option).first()) for option in poll['options']
]
eta = datetime.utcfromtimestamp(poll['close_date'])
new_topic = Topics(title=title, options=options, close_date=eta)
db.session.add(new_topic)
db.session.commit()
# run the task
from tasks import close_poll
close_poll.apply_async((new_topic.id, SQLALCHEMY_DATABASE_URI), eta=eta)
return jsonify({'message': 'Poll was created succesfully'})
else:
# it's a GET request, return dict representations of the API
polls = Topics.query.filter_by(status=True).join(Polls).order_by(Topics.id.desc()).all()
all_polls = {'Polls': [poll.to_json() for poll in polls]}
return jsonify(all_polls)
@api.route('/polls/options')
def api_polls_options():
all_options = [option.to_json() for option in Options.query.all()]
return jsonify(all_options)
@api.route('/poll/vote', methods=['PATCH'])
def api_poll_vote():
poll = request.get_json()
poll_title, option = (poll['poll_title'], poll['option'])
join_tables = Polls.query.join(Topics).join(Options)
# Get topic and username from the database
topic = Topics.query.filter_by(title=poll_title, status=True).first()
user = Users.query.filter_by(username=session['user']).first()
# if poll was closed in the background before user voted
if not topic:
return jsonify({'message': 'Sorry! this poll has been closed'})
# filter options
option = join_tables.filter(Topics.title.like(poll_title), Topics.status == True).filter(Options.name.like(option)).first()
# check if the user has voted on this poll
poll_count = UserPolls.query.filter_by(topic_id=topic.id).filter_by(user_id=user.id).count()
if poll_count > 0:
return jsonify({'message': 'Sorry! multiple votes are not allowed'})
if option:
# record user and poll
user_poll = UserPolls(topic_id=topic.id, user_id=user.id)
db.session.add(user_poll)
# increment vote_count by 1 if the option was found
option.vote_count += 1
db.session.commit()
return jsonify({'message': 'Thank you for voting'})
return jsonify({'message': 'option or poll was not found please try again'})
@api.route('/poll/<poll_name>')
def api_poll(poll_name):
poll = Topics.query.filter(Topics.title.like(poll_name)).first()
return jsonify({'Polls': [poll.to_json()]}) if poll else jsonify({'message': 'poll not found'})
| gpl-3.0 | -3,323,841,830,138,290,700 | 32.962264 | 127 | 0.648889 | false |
jamesmunns/wate_backend | prototyping/create_schema.py | 1 | 1046 | import psycopg2
import getpass
username = getpass.getuser()
password = getpass.getpass("Database password for {}: ".format(username))
database = "wate"
def create_user_table(cursor):
user_schema = """
CREATE TABLE users (
id serial PRIMARY KEY,
name text NOT NULL,
username text NOT NULL,
email citext UNIQUE NOT NULL,
joindate date NOT NULL,
passhash character (60),
use_metric_units boolean,
emails_disabled boolean
);
"""
cursor.execute(user_schema)
def create_weight_table(cursor):
weight_schema = """
CREATE TABLE weights (
user_id integer REFERENCES users(id) NOT NULL,
weight_lbs numeric CHECK (weight_lbs > 0) NOT NULL,
measure_date date NOT NULL,
measure_time time);
"""
cursor.execute(weight_schema)
with psycopg2.connect(dbname=database, user=username, password=password) as conn:
with conn.cursor() as cur:
create_user_table(cur)
create_weight_table(cur)
| mit | -8,867,129,375,044,253,000 | 27.27027 | 81 | 0.632887 | false |
Wilo/barcampMilagro2015 | pushfeed/pushfeed/pipelines.py | 1 | 1083 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import logging
from scrapy.conf import settings
from scrapy.exceptions import DropItem
import rethinkdb as r
#class PushfeedPipeline(object):
# def process_item(self, item, spider):
# return item
class RethinkdbPipeline(object):
"""docstring for RethinkdbPipeline"""
def __init__(self):
r.connect(settings['RETHINKDB_SERVER'], settings['RETHINKDB_PORT']).repl()
self.db = r.db(settings['RETHINKDB_DB']).table(settings['RETHINKDB_TABLE'])
def process_item(self, item, spider):
for data in item:
if not data:
raise DropItem
data = dict(title=item['title'][0], description=item['description'][0],
date=item['date'][0], link=item['link'][0], img=item['img'][0])
self.db.insert(data).run()
logging.log(logging.INFO,"Feed added to rethinkdb database!")
return item
| mit | 595,242,613,530,834,000 | 31.818182 | 83 | 0.649123 | false |
ask/kamqp | kamqp/client_0_8/__init__.py | 1 | 1253 | """AMQP Client implementing the 0-8 spec."""
# Copyright (C) 2007-2008 Barry Pederson <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
# Pull in the public items from the various sub-modules
#
from .basic_message import Message
from .channel import Channel
from .connection import Connection
from .exceptions import (AMQPError, AMQPConnectionError,
AMQPChannelError, AMQPInternalError)
__all__ = ["Connection", "Channel", "Message", "AMQPError",
"AMQPConnectionError", "AMQPChannelError",
"AMQPInternalError"]
| lgpl-2.1 | -5,888,403,056,893,450,000 | 42.206897 | 75 | 0.740623 | false |
hmpf/nav | python/nav/web/portadmin/urls.py | 1 | 1470 | #
# Copyright (C) 2011, 2013-2015 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""PortAdmin Django URL config"""
from django.conf.urls import url
from nav.web.portadmin import views
urlpatterns = [
url(r'^$',
views.index,
name='portadmin-index'),
url(r'^ip=(?P<ip>[\d\.]+)',
views.search_by_ip,
name='portadmin-ip'),
url(r'^sysname=(?P<sysname>\S+)',
views.search_by_sysname,
name='portadmin-sysname'),
url(r'^interfaceid=(?P<interfaceid>\d+)',
views.search_by_interfaceid,
name='portadmin-interface'),
url(r'^save_interfaceinfo',
views.save_interfaceinfo),
url(r'^restart_interface',
views.restart_interface),
url(r'^write_mem',
views.write_mem),
url(r'^trunk/(?P<interfaceid>\d+)',
views.render_trunk_edit,
name="portadmin-render-trunk-edit"),
]
| gpl-3.0 | 2,910,476,749,803,089,400 | 32.409091 | 79 | 0.667347 | false |
bmcage/centrifuge-1d | centrifuge1d/modules/direct_consolidation_saturated/options.py | 1 | 7263 | from __future__ import division, print_function
import sys
from ..shared.functions import lagrangian_derivative_coefs
from numpy import linspace, power, empty, array, log
from ..shared.consolidation import (create_CON, CON_SLURRY, CON_GOMPERTZ,
CON_FREEFORM, CON_SLURRY_CC, CON_SLURRY_KWA,
CON_WEIBULL)
def dtype_deps(cfg):
dtype = cfg.get_value('dtype')
result = []
if dtype == 1: pass
elif dtype in [2,3]: result = ['k_dx']
return result
PARENTAL_MODULES = ['base']
CONFIG_OPTIONS = ['inner_points', 'dtype',
('con_type', CON_SLURRY),
('con_max_refine', 0),
(lambda cfg: cfg.get_value('con_type') == CON_SLURRY,
['a', 'b', 'c', 'd']),
(lambda cfg: cfg.get_value('con_type') == CON_SLURRY_CC,
['a', 'cc', 'c', 'd']),
(lambda cfg: cfg.get_value('con_type') in [CON_GOMPERTZ,],
['a', 'b', 'c', 'd', 'cc']),
(lambda cfg: cfg.get_value('con_type') in [CON_WEIBULL,CON_SLURRY_KWA],
['b', 'e', 'f', 'c', 'd']),
(lambda cfg: cfg.get_value('con_type') in [CON_FREEFORM],
[('ei', None), ('si', None), ('ki', None), ('eiadd', None)]),
'porosity',
'estimate_zp0',
('L_atol', 1e-8),
dtype_deps,
# dependent
(lambda cfg: cfg.get_value('fl1') > 0.0,
['fp1'], [('fp1', -1.0)]),
(lambda cfg: cfg.get_value('fl2') > 0.0,
['fp2'], [('fp2', -1.0)]),
#
'rb_type',
# dependent
(lambda cfg: cfg.get_value('rb_type') == 2,
['h_last']),
(lambda cfg: cfg.get_value('rb_type') == 3,
['dip_height']),
'h_last',
'l0',
'wl0',
'density_s', #density sample in g/(cm^3)
('excess_load', [0]),
('excess_load_t',[0]),
('numerfact_e0', 0.999),
('e0_overshoot_factor', 0.),
]
INTERNAL_OPTIONS = ['m', 'y', 'y12', 'dy', 'alpha', 'ldc1', 'ldc2', 'ldc3',
'k_dx', 'wm0', 'CON',
'first_idx', 'last_idx', 'wl_idx', 'L_idx',
'mass_in_idx', 'mass_out_idx',
'z_size', 'gamma_w', 'gamma_s', 'e0']
EXCLUDE_FROM_MODEL = ['dtype']
PROVIDE_OPTIONS = []
OPTIONS_ITERABLE_LISTS = ['porosity']
def load_func(x, atimes, aloads, duration_change=10):
#print (x, atimes, aloads,aloads[x>=atimes])
x_load = aloads[x>=atimes][-1]
#10 sec later
x_offset_load = aloads[x+duration_change>=atimes][-1]
if (x_load == x_offset_load):
return x_load
else:
#load will change, change smootly to the change
t_new_load = atimes[x+duration_change>=atimes][-1]
val= (x - (t_new_load-duration_change))/duration_change * (x_offset_load-x_load) + x_load
return val
def create_excess_load(times, loads, duration_change=10):
if (len(times) != len(loads)):
print ("ERROR: excess loads and excess load times don't have same array sizes!")
sys.exit(0)
if (len(times) == 0 or (len(times) == 1 and times[0] == 0 and loads[0] == 0)):
#no loads
return lambda x: 0.
else:
atimes = array(times)
aloads = array(loads)
return lambda x: load_func(x, atimes, aloads, duration_change)
#return lambda x: aloads[x>=atimes][-1]
def adjust_cfg(cfg):
#specific weight water in g/(s cm^2)
cfg.set_value('gamma_w', cfg.get_value('density')*cfg.get_value('g'))
#specific weight sample in g/(s cm^2)
cfg.set_value('gamma_s', cfg.get_value('density_s')*cfg.get_value('g'))
# Discretization
inner_points = cfg.get_value('inner_points')
discretization_type = cfg.get_value('dtype')
if discretization_type == 1: # linear discretization
y = linspace(0, 1, inner_points + 2)
elif discretization_type in [2,3]: # L= a+ka+(k^2)a+...+(k^inner_points)a
# L=1 (as we use transformed interval <0,1>)
# L = a*[(1-k^(inner_points +1))/(1-k)]
k = cfg.get_value('k_dx')
a=(1-k)/(1-power(k, inner_points+1))
y= empty([inner_points+2, ])
y[0] = 0.0; y[-1] = 1.0
for i in range(1, inner_points+1):
y[i] = y[i-1] + a
a = a*k
if discretization_type == 3:
# invert it
tmp = y[::-1]
y[:] = 1. - tmp[:]
else:
print('Unsupported discretization type:', discretization_type)
exit(1)
#porosity and void ratio
por = cfg.get_value('porosity')
if not (0<por<1):
print ('Porosity must be a value between 0 and 1. Given:', por)
exit(1)
e0 = por/(1-por)
cfg.set_value('e0', e0)
print ('Consolidation: Calculated initial void ratio is', cfg.get_value('e0'))
ksguess = cfg.get_value('ks')
ks = ksguess
if cfg.get_value('con_type') in [CON_SLURRY, CON_GOMPERTZ]:
ks = (1+e0)*(cfg.get_value('c')+cfg.get_value('d')*e0)
cfg.set_value('ks', ks)
elif cfg.get_value('con_type') in [CON_SLURRY_CC, CON_SLURRY_KWA, CON_WEIBULL]:
ks = log(e0/cfg.get_value('c')) / cfg.get_value('d')
else:
print ("ERROR: cannot calculate the start ks as consolidation type is not known!")
sys.exit(0)
print ('Consolidation: Your guessed ks', ksguess, 'has been changed into calculated', ks, 'cm/s')
back = raw_input("Continue? [Y/n] ")
if back.strip().lower() in ['n', "no"]:
sys.exit(0)
# Determine consolidation curve model used, all data is now available
cfg.set_value('CON', create_CON(cfg))
cfg.set_value('excess_load_f', create_excess_load(
cfg.get_value('excess_load_t'),
cfg.get_value('excess_load'),
duration_change=10))
cfg.set_value('y', y)
cfg.set_value('y12', (y[1:]+y[:-1])/2.)
dy = y[1:]-y[:-1]
alpha = empty([len(dy)+1, ])
alpha[0] = 0.
alpha[1:] = dy
cfg.set_value('dy', dy)
cfg.set_value('alpha', alpha)
ldc1, ldc2, ldc3 = lagrangian_derivative_coefs(dy)
cfg.set_value('ldc1', ldc1)
cfg.set_value('ldc2', ldc2)
cfg.set_value('ldc3', ldc3)
inner_points = cfg.get_value('inner_points')
cfg.set_value('sc_max_refine', 0)
cfg.set_value('first_idx', 0)
cfg.set_value('last_idx', inner_points+1)
cfg.set_value('mass_in_idx', inner_points+2)
cfg.set_value('wl_idx', inner_points+3)
cfg.set_value('L_idx', inner_points+4)
cfg.set_value('mass_out_idx', inner_points+5)
# total length of 'z' array (discretization points + s1,s2,mass_in,...)
cfg.set_value('z_size', inner_points+6)
def check_cfg(cfg):
if not (not cfg.get_value('wl0') is None or not cfg.get_value('ww0') is None):
print("One of 'wl0' or 'ww0' parameters must be specified.")
return False
return True
| gpl-2.0 | 4,322,076,158,088,722,400 | 37.226316 | 101 | 0.514801 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.