repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
wiredrive/wtframework | wtframework/wtf/testobjects/tests/test_watched_test_case.py | 1 | 5126 | ##########################################################################
# This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from mockito import mock
from wtframework.wtf.testobjects.testcase import WatchedTestCase
import unittest2
__wtf_test_logger_var = []
class TestWatchedTestCaseWatchers(WatchedTestCase):
def __init__(self, *args, **kwargs):
super(TestWatchedTestCaseWatchers, self).__init__(*args, **kwargs)
self.watcher = LoggerTestWatcher()
self._register_watcher(self.watcher)
def setUp(self):
self.watcher.log.append("setUp")
return
def tearDown(self):
self.watcher.log.append("tearDown")
return
def test_aaa_something(self):
"Dummy test to set things up."
self.watcher.log.append("test_aaa")
return
def test_bbb_something(self):
"2nd dummp test to set things up"
self.watcher.log.append("test_bbb")
return
def test_zzz_test_our_real_event_sequence(self):
"Check the dummy test's sequence of events."
# Keep in mind we're running a test after another test.
expected = ['before_setup', # generated by test_aaa
'setUp',
'before_test',
'test_aaa',
'on_test_pass',
'after_test',
'tearDown',
'after_teardown',
'before_setup', # generated by test_bbb
'setUp',
'before_test',
'test_bbb',
'on_test_pass',
'after_test',
'tearDown',
'after_teardown',
'before_setup', # generated by test_zzz
'setUp',
'before_test']
print self.get_log()
self.assertEqual(expected,
self.get_log())
class TestWatchedTestCase(unittest2.TestCase):
def test_passed_test_case_runs_setup_and_cleanup(self):
mockresult = mock(unittest2.TestResult)
tc = TestCaseStub(methodName="runTest")
tc.run(mockresult)
self.assertTrue(tc.setupRan)
self.assertTrue(tc.tearDownRan)
def test_failed_setup_does_not_run_test_and_runs_cleanup(self):
mockresult = mock(unittest2.TestResult)
tc = TestCaseStub(methodName="runTest")
tc.failSetup = True
tc.run(mockresult)
self.assertTrue(tc.tearDownRan)
self.assertFalse(tc.testRan)
def test_failed_test_does_not_complete_and_runs_cleanup(self):
mockresult = mock(unittest2.TestResult)
tc = TestCaseStub(methodName="runTest")
tc.failTest = True
tc.run(mockresult)
self.assertTrue(tc.tearDownRan)
self.assertTrue(tc.testRan)
self.assertFalse(tc.testPassed)
class TestCaseStub(WatchedTestCase):
setupRan = False
testRan = False
testPassed = False
tearDownRan = False
failSetup = False
failTest = False
def setUp(self):
self.setupRan = True
if self.failSetup:
raise RuntimeError("test error")
def tearDown(self):
self.tearDownRan = True
def runTest(self):
self.testRan = True
if self.failTest:
raise RuntimeError("Failed test")
self.testPassed = True
class LoggerTestWatcher(object):
"This test watcher just logs actions to a list to verify order of events."
log = []
def before_setup(self, test_case, test_result):
print "LoggerTestWatcher before_setup"
self.log.append("before_setup")
def before_test(self, test_case, test_result):
self.log.append("before_test")
def after_test(self, test_case, test_result):
self.log.append("after_test")
def after_teardown(self, test_case, test_result):
self.log.append("after_teardown")
def on_test_failure(self, test_case, test_result, exception):
self.log.append("on_test_failure")
def on_test_error(self, test_case, test_result, exception):
self.log.append("on_test_error")
def on_test_pass(self, test_case, test_result):
self.log.append("on_test_pass")
def get_log(self):
return self.log
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest2.main()
| gpl-3.0 | -9,188,379,825,369,977,000 | 29.331361 | 78 | 0.588958 | false |
ljmanso/AGM2 | tools/agmdsr/src/genericworker.py | 1 | 3256 | #
# Copyright (C) 2017 by YOUR NAME HERE
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
import sys, Ice, os
from PySide import *
ROBOCOMP = ''
try:
ROBOCOMP = os.environ['ROBOCOMP']
except KeyError:
print '$ROBOCOMP environment variable not set, using the default value /opt/robocomp'
ROBOCOMP = '/opt/robocomp'
preStr = "-I/opt/robocomp/interfaces/ -I"+ROBOCOMP+"/interfaces/ --all /opt/robocomp/interfaces/"
Ice.loadSlice(preStr+"CommonBehavior.ice")
import RoboCompCommonBehavior
additionalPathStr = ''
icePaths = []
try:
SLICE_PATH = os.environ['SLICE_PATH'].split(':')
for p in SLICE_PATH:
icePaths.append(p)
additionalPathStr += ' -I' + p + ' '
icePaths.append('/opt/robocomp/interfaces')
except:
print 'SLICE_PATH environment variable was not exported. Using only the default paths'
pass
ice_AGM2 = False
for p in icePaths:
print 'Trying', p, 'to load AGM2.ice'
if os.path.isfile(p+'/AGM2.ice'):
print 'Using', p, 'to load AGM2.ice'
preStr = "-I/opt/robocomp/interfaces/ -I"+ROBOCOMP+"/interfaces/ " + additionalPathStr + " --all "+p+'/'
wholeStr = preStr+"AGM2.ice"
Ice.loadSlice(wholeStr)
ice_AGM2 = True
break
if not ice_AGM2:
print 'Couln\'t load AGM2'
sys.exit(-1)
from RoboCompAGM2 import *
from agmdsrserviceI import *
import rospy
from std_msgs.msg import *
try:
from RoboCompAGM2ROS.msg import *
except:
print "couldn't load msg"
from RoboCompAGM2ROS.srv import *
#class for rosPublisher
class PublisherAGMDSRTopic():
def __init__(self):
self.pub_structuralChange = rospy.Publisher("structuralChange", World, queue_size=1000)
self.pub_edgesUpdated = rospy.Publisher("edgesUpdated", EdgeSequence, queue_size=1000)
self.pub_symbolsUpdated = rospy.Publisher("symbolsUpdated", NodeSequence, queue_size=1000)
def structuralChange(self, w):
self.pub_structuralChange.publish(w)
def edgesUpdated(self, modification):
self.pub_edgesUpdated.publish(modification)
def symbolsUpdated(self, modification):
self.pub_symbolsUpdated.publish(modification)
class GenericWorker(QtCore.QObject):
kill = QtCore.Signal()
def __init__(self, mprx):
super(GenericWorker, self).__init__()
self.agmdsrtopic_proxy = mprx["AGMDSRTopicPub"]
self.agmdsrtopic_rosproxy = PublisherAGMDSRTopic()
self.mutex = QtCore.QMutex(QtCore.QMutex.Recursive)
self.Period = 30
self.timer = QtCore.QTimer(self)
@QtCore.Slot()
def killYourSelf(self):
rDebug("Killing myself")
self.kill.emit()
# \brief Change compute period
# @param per Period in ms
@QtCore.Slot(int)
def setPeriod(self, p):
print "Period changed", p
Period = p
timer.start(Period)
| gpl-3.0 | 1,309,409,873,425,347,300 | 28.333333 | 106 | 0.727273 | false |
raphaelvalentin/Utils | ngspice/syntax/nport.py | 1 | 5197 | from ngspice.syntax import *
from functions.science import flatten
from rawdata import touchstone
from interpolate import interp1d
__all__ = ['Nport']
class VCVSx(Netlist):
__name__ = "vcvs"
__type__ = "instance"
__indent__ = ""
def __init__(self, name='E1', nodes=('in', 'out', 'sensp', 'sensm'), gain=complex(0,0), freq=1e9 ):
self.name = name
self.nodes = nodes
self.gain = gain
self.freq = freq
n1, n2, n3 = newnode(), newnode(), newnode()
self.e1, self.e2, self.l1 = newname('e'), newname('e'), newname('l')
self.append( VCVS(name=self.e1, nodes=(nodes[0], n1, nodes[2], nodes[3]), gain=gain.real) )
self.append( VCVS(name=self.e2, nodes=(n1, nodes[1], n2, '0'), gain=abs(gain)) )
self.append( VCCS(name=newname('g'), nodes=('0', n2, nodes[2], nodes[3]), gain=1.0) )
self.append( Inductor(name=self.l1, nodes=(n2, '0'), l=gain.imag/(2.0*pi*freq)/abs(gain) ) )
def alter(self, gain, freq):
netlist = Netlist()
netlist.append( Alter(self.e1, gain=gain.real) )
netlist.append( Alter(self.e2, gain=abs(gain)) )
netlist.append( Alter(self.l1, gain.imag/(2.0*pi*freq)/abs(gain)) )
return netlist
class OnePort(Netlist):
__name__ = "oneport"
__type__ = "instance"
def __init__(self, name='oneport1', nodes=('1', '0'), gain=complex(0, 0), freq=1e9):
self.name = name
self.nodes = nodes
n1, n2 = newnode(), newnode()
self.append( Resistor(name=newname('r'), nodes=(nodes[0], n1), r=-50) )
self.append( Resistor(name=newname('r'), nodes=(n1, n2), r=100) )
self.append( VCVS(nodes=(n2, nodes[1], n1, nodes[1]), gain=gain, freq=freq) )
def alter(self, gain):
return self[2].alter(gain)
class Nport(Netlist):
# http://analog-innovation.com/CreateS-ParameterSubcircuitsforSpice.pdf
__name__ = "nport"
__type__ = "instance"
def __init__(self, name='nport1', nodes=('1', '0', '2', '0'), file="", freq=None):
self.name = name
self.nodes = nodes
self.file = file
self.data = touchstone.snp(self.file).read()
x = []
for i in xrange(len(nodes)/2):
row = []
for j in xrange(len(nodes)/2):
if freq:
freqs = self.data['freq']
sij = self.data['s%d%d'%(i+1,j+1)]
xsij = interp1d(freqs, sij)(freq)
row.append( xsij )
else:
row.append( self.data['s%d%d'%(i+1,j+1)][0] )
freq = self.data['freq'][0]
x.append(row)
self._ivcvs = []
n1 = [newnode() for i in xrange(len(nodes)/2)]
for i in xrange(len(nodes)/2):
self.append( Resistor(name=newname('r'), nodes=(nodes[i*2], n1[i]), r=-50) )
n2 = [newnode() for _i in xrange(len(nodes)/2)] + [nodes[1]]
self.append( Resistor(name=newname('r'), nodes=(n1[i], n2[0]), r=100) )
for j in xrange(len(nodes)/2):
self.append( VCVSx(nodes=(n2[j], n2[j+1], n1[j], nodes[-1]), gain=x[i][j], freq=freq) )
self._ivcvs.append( len(self)-1 )
def alter(self, freq):
x = []
for i in xrange(len(self.nodes)/2):
row = []
for j in xrange(len(self.nodes)/2):
freqs = self.data['freq']
sij = self.data['s%d%d'%(i+1,j+1)]
xsij = interp1d(freqs, sij)(freq)
row.append( xsij )
x.append(row)
y = list(flatten(x))
netlist = Netlist()
for i, k in enumerate(self._ivcvs):
netlist.append( self[k].alter(y[i], freq) )
return netlist
class Nport1(Netlist):
# http://analog-innovation.com/CreateS-ParameterSubcircuitsforSpice.pdf
__name__ = "nport"
__type__ = "instance"
def __init__(self, name='nport1', nodes=('1', '0', '2', '0'), file="", freq=None):
self.name = name
self.nodes = nodes
self.file = file
self.data = touchstone.snp(self.file).read()
x = []
if freq:
for i in xrange(len(nodes)/2):
row = []
for j in xrange(len(nodes)/2):
freqs = self.data['freq']
sij = self.data['s%d%d'%(i+1,j+1)]
xsij = interp1d(freqs, sij)(freq)
row.append( xsij )
x.append(row)
else:
for i in xrange(len(nodes)/2):
row = []
for j in xrange(len(nodes)/2):
row.append( self.data['s%d%d'%(i+1,j+1)][0] )
freq = self.data['freq'][0]
x.append(row)
self._ivcvs = []
n1 = [newnode() for i in xrange(len(nodes)/2)]
for i in xrange(len(nodes)/2):
self.append( Resistor(name=newname('r'), nodes=(nodes[i*2], n1[i]), r=-50) )
n2 = [newnode() for _i in xrange(len(nodes)/2)] + [nodes[1]]
self.append( Resistor(name=newname('r'), nodes=(n1[i], n2[0]), r=100) )
for j in xrange(len(nodes)/2):
self.append( VCVSx(nodes=(n2[j], n2[j+1], n1[j], nodes[-1]), gain=x[i][j], freq=freq) )
self._ivcvs.append( len(self)-1 )
def alter(self, freq):
y = []
n = len(self.nodes)/2
freqs = self.data['freq']
for i in xrange(n):
for j in xrange(n):
sij = self.data['s%d%d'%(i+1,j+1)]
xsij = interp1d(freqs, sij)(freq)
y.append( xsij )
netlist = Netlist()
for k, yi in zip(self._ivcvs, y):
netlist.extend( self[k].alter(yi, freq) )
return netlist
| gpl-2.0 | 2,556,812,261,765,248,500 | 34.59589 | 103 | 0.557629 | false |
kylef/pyppp | pyppp/django/forms.py | 1 | 3810 | from django import forms
from django.conf import settings
from django.http import HttpResponseRedirect
from django.views.decorators.cache import never_cache
from django.contrib.auth import authenticate, REDIRECT_FIELD_NAME
from django.contrib.formtools.wizard import FormWizard
from pyppp.django import login
from pyppp.django.models import UserPPP
class UserFormBase(forms.Form):
def __init__(self, *args, **kwargs):
self.user_cache = None
super(UserFormBase, self).__init__(*args, **kwargs)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class AuthenticationForm(UserFormBase):
username = forms.CharField(max_length=30)
password = forms.CharField(widget=forms.PasswordInput)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError('Please enter a correct username and password. Note that both fields are case-sensitive.')
elif not self.user_cache.is_active:
raise forms.ValidationError('This account is inactive')
return self.cleaned_data
class PasscodeForm(UserFormBase):
username = forms.CharField(max_length=30)
passcode = forms.CharField(max_length=4)
card = forms.CharField(max_length=8)
code = forms.CharField(max_length=8)
def __init__(self, *args, **kwargs):
super(PasscodeForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['readonly'] = True
self.fields['card'].widget.attrs['readonly'] = True
self.fields['code'].widget.attrs['readonly'] = True
def clean(self):
if self.user_cache is not None:
return self.cleaned_data
username = self.cleaned_data.get('username')
passcode = self.cleaned_data.get('passcode')
if username and passcode:
self.user_cache = authenticate(username=username, passcode=passcode)
if self.user_cache is None:
raise forms.ValidationError('Incorrect passcode.')
return self.cleaned_data
class LoginWizard(FormWizard):
def parse_params(self, request, *args, **kwargs):
current_step = self.determine_step(request, *args, **kwargs)
if request.method == 'POST' and current_step == 0:
request.session.set_test_cookie()
form = self.get_form(current_step, request.POST)
if form.is_valid():
ppp, created = UserPPP.objects.get_or_create(user=form.user_cache)
passcode_info = ppp.get_current_sequence_info()
self.initial[(current_step + 1)] = {
'username': form.cleaned_data.get('username'),
'card': passcode_info['card'],
'code': '%s%s' % (passcode_info['row'], passcode_info['column'])
}
def get_template(self, step):
return 'pyppp/form.html'
def done(self, request, form_list):
if not request.session.test_cookie_worked():
print "Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."
redirect_to = request.REQUEST.get(REDIRECT_FIELD_NAME, '')
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
login(request, form_list[1].get_user())
return HttpResponseRedirect(redirect_to)
| bsd-2-clause | 4,494,156,654,408,472,000 | 38.6875 | 134 | 0.623622 | false |
google/nerfactor | nerfactor/models/base.py | 1 | 5484 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import losses
from networks import base as basenet
from util import logging as logutil
logger = logutil.Logger(loggee="models/base")
class Model(tf.keras.Model):
"""Uses only the parent's trackability and nothing else.
"""
def __init__(self, config, debug=False):
super().__init__()
self.config = config
self.debug = debug
if debug:
logger.warn("Model in debug mode; behavior may be different")
self.net = {'main': basenet.Network()} # NOTE: insert trainable networks
# of your model into this dictionary, values of which will be registered
# as trainable
self.trainable_registered = False # NOTE: before training, call
# register_trainable() to register trainable parameters (which lie in
# self.net)
# Initialize loss functions and parse weights
self.wloss = self._init_loss() # NOTE: list of weight and
# (initialized) loss function pairs
def _init_loss(self):
wloss = []
loss_str = self.config.get('DEFAULT', 'loss')
for x in loss_str.split(','):
loss_name, weight = self._parse_loss_and_weight(x)
if loss_name == 'lpips':
loss = losses.LPIPS(per_ch=False)
elif loss_name == 'elpips':
bs = self.config.getint('DEFAULT', 'bs')
loss = losses.ELPIPS(bs)
elif loss_name == 'l1':
loss = losses.L1()
elif loss_name == 'l2':
loss = losses.L2()
elif loss_name == 'ssim':
loss = losses.SSIM(1 - 0)
else:
raise NotImplementedError(loss_name)
wloss.append((weight, loss))
return wloss
@staticmethod
def _parse_loss_and_weight(weight_loss_str):
"""Handles strings like '1e+2lpips' or 'l1,10barron'.
"""
# Start from the back because looking for the longest string that
# can be converted to a float
for i in range(len(weight_loss_str), -1, -1):
try:
weight = float(weight_loss_str[:i])
except ValueError:
continue
loss_name = weight_loss_str[i:]
return loss_name, weight
# Weight not specified
return weight_loss_str, 1.
def register_trainable(self):
"""Trackable objects (such as Keras sequentials and layers) must be
directly under `self` to be registered to `trainable_variables`, so
this function simply adds aliases directly under `self` to all nets'
trainable variables.
"""
registered = []
pref = 'net_'
for net_name, net in self.net.items():
attr_name = pref + net_name
assert attr_name.isidentifier(), (
"Prepending '{pref}' to your network name '{net}' doesn't "
"make a valid identifier; change your network name").format(
pref=pref, net=net_name)
for layer_i, layer in enumerate(net.layers):
if layer.trainable:
attr_name_full = attr_name + '_layer%d' % layer_i
assert not hasattr(self, attr_name_full), (
"Can't register `{}` because it is already an "
"attribute").format(attr_name_full)
setattr(self, attr_name_full, layer)
registered.append(attr_name_full)
logger.info("Trainable layers registered:\n\t%s", registered)
self.trainable_registered = True
@staticmethod
def _validate_mode(mode):
allowed_modes = ('train', 'vali', 'test')
if mode not in allowed_modes:
raise ValueError(mode)
def call(self, batch, mode='train'):
"""
Returns:
tuple:
- **pred**
- **gt**
- **loss_kwargs** (*dict*) -- Keyword arguments for loss
computation.
- **to_vis** (*dict*) -- Tensors to visualize.
"""
raise NotImplementedError
def compute_loss(self, pred, gt, **kwargs):
"""
Returns:
tf.Tensor: Loss.
"""
raise NotImplementedError
def vis_batch(self, data_dict, outdir, mode='train', dump_raw_to=None):
raise NotImplementedError
def compile_batch_vis(self, batch_vis_dirs, outpref, mode='train'):
"""Compiles batch visualizations into a consolidated view.
Returns:
str: Convinient link to your consolidated view, which will be
logged into TensorBoard. So you should add proper file extension
(and maybe also file viewer prefix), returning something like
``'http://your.file.viewer/' + outpref + '.html'``.
"""
raise NotImplementedError
| apache-2.0 | -6,663,748,743,042,805,000 | 37.34965 | 80 | 0.578957 | false |
dials/dials | command_line/apply_mask.py | 1 | 3188 | import pickle
from dxtbx.format.image import ImageBool
from iotbx.phil import parse
import dials.util
help_message = """
This program augments a experiments JSON file with one or more masks specified by the
user. Its only function is to input the mask file paths to the experiments JSON file,
but means that the user does not have to edit the experiments file by hand.
Crucially, the mask files must be provided in the same order as their corresponding
imagesets (sequences) appear in the experiments JSON file.
Examples::
dials.apply_mask models.expt input.mask=pixels.mask
dials.apply_mask models.expt input.mask=pixels1.mask input.mask=pixels2.mask
"""
phil_scope = parse(
"""
input {
mask = None
.multiple = True
.type = str
.help = "The mask filenames, one mask per imageset"
}
output {
experiments = masked.expt
.type = str
.help = "Name of output experiments file"
}
""",
process_includes=True,
)
class Script:
"""A class to encapsulate the script."""
def __init__(self):
"""Initialise the script."""
from dials.util.options import OptionParser
# Create the parser
usage = "dials.apply_mask models.expt input.mask=pixels.mask"
self.parser = OptionParser(
usage=usage, epilog=help_message, phil=phil_scope, read_experiments=True
)
def run(self, args=None):
"""Run the script."""
from dials.util import Sorry
from dials.util.options import flatten_experiments
# Parse the command line arguments
params, options = self.parser.parse_args(args, show_diff_phil=True)
experiments = flatten_experiments(params.input.experiments)
# Check that an experiment list and at least one mask file have been provided
if not (experiments and params.input.mask):
self.parser.print_help()
return
# Check number of experiments
n_expts = len(experiments)
n_masks = len(params.input.mask)
if n_expts != n_masks:
raise Sorry(
"The number of masks provided must match the number of imagesets "
"(sequences).\n"
"You have provided an experiment list containing {} imageset(s).\n"
"You have provided {} mask file(s).".format(n_expts, n_masks)
)
# Get the imageset
imagesets = experiments.imagesets()
for i, imageset in enumerate(imagesets):
# Set the lookup
with open(params.input.mask[i], "rb") as f:
mask = pickle.load(f, encoding="bytes")
imageset.external_lookup.mask.filename = params.input.mask[i]
imageset.external_lookup.mask.data = ImageBool(mask)
# Dump the experiments
print(f"Writing experiments to {params.output.experiments}")
experiments.as_file(filename=params.output.experiments)
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
| bsd-3-clause | 306,330,743,678,722,560 | 29.951456 | 86 | 0.620452 | false |
WGBH/django-textplusstuff | textplusstuff/datastructures.py | 1 | 5466 | from __future__ import unicode_literals
import json
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from .parser import (
MarkdownFlavoredTextNode,
ModelStuffNode,
TextPlusStuffLexer,
TextPlusStuffParser
)
class TextPlusStuff(object):
def __init__(self, raw_text, field=None):
raw_text = raw_text or ""
if not isinstance(raw_text, str):
raise UnicodeError(
(
"TextPlusStuff can only be initialized with either "
"unicode or UTF-8 strings."
)
)
else:
raw_text_processed = force_text(raw_text, errors='replace')
self.raw_text = raw_text_processed
# Initialize lexer
lexer = TextPlusStuffLexer(raw_val=raw_text_processed)
# Use the lexer to create tokens
tokens = lexer.tokenize()
# Pass tokens to parser and parse
self.nodelist = TextPlusStuffParser(tokens=tokens).parse()
def render(self, render_markdown_as, **kwargs):
"""
Renders a TextPlusStuffField
`render_markdown_as`: The format that markdown-flavored text should
be transformed in. Options: `html`, `markdown`, `plain_text`
"""
final_output = ""
include_content_nodes = kwargs.pop('include_content_nodes', True)
extra_context = kwargs.pop('extra_context', None)
for node in self.nodelist:
if isinstance(node, MarkdownFlavoredTextNode):
final_output += node.render(render_as=render_markdown_as)
elif isinstance(node, ModelStuffNode):
if include_content_nodes is False:
pass
else:
final_output += node.render(extra_context=extra_context)
return final_output
def as_html(self, **kwargs):
"""
Renders a TextPlusStuffField as HTML.
Optional keyword arguments:
* `include_content_nodes`: Boolean signifying whether or not to render
content nodes (i.e. ModelStuff tokens).
Defaults to `True`.
"""
return mark_safe(
self.render(
'html',
include_content_nodes=kwargs.pop(
'include_content_nodes', True
),
extra_context=kwargs.pop('extra_context', None)
)
)
def as_json(self, **kwargs):
"""
Renders a TextPlusStuffField as a JSON object.
* `render_markdown_as`: The format that markdown-flavored text should
be transformed in. Options: `html` (default), `markdown`, `plain_text`.
"""
final_output_as_html = ""
final_output_as_markdown = ""
include_content_nodes = kwargs.pop('include_content_nodes', True)
extra_context = kwargs.pop('extra_context', None)
convert_to_json_string = kwargs.pop('convert_to_json_string', False)
model_stuff_node_counter = 0
model_stuff_node_context_list = []
for node in self.nodelist:
if isinstance(node, MarkdownFlavoredTextNode):
final_output_as_html += node.render(render_as='html')
final_output_as_markdown += node.render(render_as='markdown')
elif isinstance(node, ModelStuffNode):
if include_content_nodes is True:
final_output_as_markdown += "{{{{ NODE__{index} }}}}"\
.format(
index=model_stuff_node_counter
)
final_output_as_html += (
'<span data-textplusstuff-contentnode-arrayindex='
'"{index}"></span>'
).format(index=model_stuff_node_counter)
model_stuff_node_context_list.append({
'model': '{}:{}'.format(
node.node_mapping.get('content_type__app_label'),
node.node_mapping.get('content_type__model')
),
'rendition': node.get_rendition().short_name,
'context': node.get_node_context(
extra_context=extra_context
)
})
model_stuff_node_counter += 1
dict_to_return = {
'text_as_markdown': final_output_as_markdown,
'text_as_html': final_output_as_html,
'content_nodes': model_stuff_node_context_list
}
to_return = dict_to_return
if convert_to_json_string is True:
to_return = json.dumps(dict_to_return)
return to_return
def as_plaintext(self, **kwargs):
"""
Renders a TextPlusStuffField as plain text (all markdown
formatting removed).
Content nodes (i.e. ModelStuff tokens) will not be rendered.
"""
return self.render(
'plain_text',
include_content_nodes=False
)
def as_markdown(self, **kwargs):
"""
Renders a TextPlusStuffField as markdown.
Content nodes (i.e. ModelStuff tokens) will not be rendered.
"""
return self.render(
'markdown',
include_content_nodes=False
)
__all__ = ('TextPlusStuff')
| mit | 2,719,036,316,729,344,000 | 35.932432 | 79 | 0.5397 | false |
horazont/aiosasl | tests/test_stringprep.py | 1 | 5198 | ########################################################################
# File name: test_stringprep.py
# This file is part of: aiosasl
#
# LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
import unittest
from aiosasl.stringprep import (
saslprep,
check_bidi,
trace,
)
class Testcheck_bidi(unittest.TestCase):
# some test cases which are not covered by the other tests
def test_empty_string(self):
check_bidi("")
def test_L_RAL_violation(self):
with self.assertRaises(ValueError):
check_bidi("\u05be\u0041")
class TestSASLprep(unittest.TestCase):
def test_map_to_nothing_rfcx(self):
self.assertEqual(
"IX",
saslprep("I\u00ADX"),
"SASLprep requirement: map SOFT HYPHEN to nothing")
def test_map_to_space(self):
self.assertEqual(
"I X",
saslprep("I\u00A0X"),
"SASLprep requirement: map SOFT HYPHEN to nothing")
def test_identity_rfcx(self):
self.assertEqual(
"user",
saslprep("user"),
"SASLprep requirement: identity transform")
def test_case_preservation_rfcx(self):
self.assertEqual(
"USER",
saslprep("USER"),
"SASLprep requirement: preserve case")
def test_nfkc_rfcx(self):
self.assertEqual(
"a",
saslprep("\u00AA"),
"SASLprep requirement: NFKC")
self.assertEqual(
"IX",
saslprep("\u2168"),
"SASLprep requirement: NFKC")
def test_prohibited_character_rfcx(self):
with self.assertRaises(
ValueError,
msg="SASLprep requirement: prohibited character (C.2.1)"):
saslprep("\u0007")
with self.assertRaises(
ValueError,
msg="SASLprep requirement: prohibited character (C.8)"):
saslprep("\u200E")
def test_bidirectional_check_rfcx(self):
with self.assertRaises(
ValueError,
msg="SASLprep requirement: bidirectional check"):
saslprep("\u0627\u0031")
def test_unassigned(self):
with self.assertRaises(
ValueError,
msg="SASLprep requirement: unassigned"):
saslprep("\u0221", allow_unassigned=False)
with self.assertRaises(
ValueError,
msg="enforce no unassigned by default"):
saslprep("\u0221")
self.assertEqual(
"\u0221",
saslprep("\u0221", allow_unassigned=True))
class Testtrace(unittest.TestCase):
def test_identity_rfcx(self):
self.assertEqual(
"user",
trace("user"),
"trace requirement: identity transform")
def test_case_preservation_rfcx(self):
self.assertEqual(
"USER",
trace("USER"),
"trace requirement: preserve case")
def test_prohibited_character_rfcx(self):
with self.assertRaises(
ValueError,
msg="trace requirement: prohibited character (C.2.1)"):
trace("\u0007")
with self.assertRaises(
ValueError,
msg="trace requirement: prohibited character (C.2.2)"):
trace("\u070F")
with self.assertRaises(
ValueError,
msg="trace requirement: prohibited character (C.3)"):
trace("\uE000")
with self.assertRaises(
ValueError,
msg="trace requirement: prohibited character (C.4)"):
trace("\uFDEF")
with self.assertRaises(
ValueError,
msg="trace requirement: prohibited character (C.5)"):
trace("\uD800")
with self.assertRaises(
ValueError,
msg="trace requirement: prohibited character (C.6)"):
trace("\uFFF9")
with self.assertRaises(
ValueError,
msg="trace requirement: prohibited character (C.8)"):
trace("\u0340")
with self.assertRaises(
ValueError,
msg="trace requirement: prohibited character (C.9)"):
trace("\U000E0001")
def test_bidirectional_check_rfcx(self):
with self.assertRaises(
ValueError,
msg="trace requirement: bidirectional check"):
trace("\u0627\u0031")
| lgpl-3.0 | 873,962,870,824,638,300 | 30.125749 | 74 | 0.558869 | false |
prophile/jacquard | jacquard/directory/base.py | 1 | 1451 | """Base class for directory implementations."""
import abc
import collections
UserEntry = collections.namedtuple("UserEntry", "id join_date tags")
UserEntry.__doc__ = """
Description of attributes of a single user.
Internally this is a `collections.namedtuple`.
"""
UserEntry.id.__doc__ = """String user ID."""
UserEntry.join_date.__doc__ = """Date at which the user is considered to have joined."""
UserEntry.tags.__doc__ = """Container of tags which apply to this user, defined by the directory."""
class Directory(metaclass=abc.ABCMeta):
"""User directory."""
@abc.abstractmethod
def __init__(self, **kwargs):
"""
Standard constructor.
Keyword arguments are taken from the `directory` section of config
files, and appear as strings. Useful for specifying connection URLs
etc.
"""
raise NotImplementedError
@classmethod
def from_configuration(cls, config, options):
"""
Construct from a Config object, and a dictionary of options.
By default this does not use the `config` and just defers to
`__init__` passing the options as kwargs.
"""
return cls(**options)
@abc.abstractmethod
def lookup(self, user_id):
"""
Look up user by ID.
For missing users this must return None, otherwise it must return a
corresponding `UserEntry`.
"""
raise NotImplementedError
| mit | -3,654,069,613,207,161,300 | 27.45098 | 100 | 0.645762 | false |
UfSoft/trac-google-search | tracext/google/search/__init__.py | 1 | 3529 | # -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8 et
# ==============================================================================
# Copyright © 2008 UfSoft.org - Pedro Algarvio <[email protected]>
#
# Please view LICENSE for additional licensing information.
# ==============================================================================
__version__ = '0.1.3'
__author__ = 'Pedro Algarvio'
__email__ = '[email protected]'
__packagename__ = 'TracGoogleSearch'
__license__ = 'BSD'
__url__ = 'http://google.ufsoft.org'
__summary__ = 'Google Adsense Search Plugin for Trac'
import pkg_resources
from trac.config import Option, BoolOption, IntOption
from trac.core import Component, implements
from trac.env import IEnvironmentSetupParticipant
from trac.web.chrome import ITemplateProvider
# ==============================================================================
# Google Search Config
# ==============================================================================
class GoogleSearchConfig(Component):
google_search_active = BoolOption(
'google.search', 'google_search_active', True,
"""Enable Google Adsense search."""
)
search_form_id = Option('google.search', 'search_form_id', 'search',
"""The form ID where the adsesnse for search code should be placed.
The default is "search" which is trac's mini search form. Content will
be replaced"""
)
search_form_text_input_width = IntOption(
'google.search', 'search_form_text_input_width', 31,
"""
Initial width(number of characters) of the search string text input.
"""
)
search_form_forid = Option('google.search', 'search_form_forid', '',
"""This is the value of the hidden input with the name "cof" that
Google gives on their code, usualy something like "FORID:n" where n
is an integer value. This cannot be empty."""
)
search_form_client_id = Option('google.search', 'search_form_client_id', '',
"""This is the value of the hidden input with the name "cx" that
Google gives on their code, usualy something like
"partner-pub-0000000000000000:0aaa0aaa00a" (this is just an example).
This cannot be empty."""
)
search_iframe_initial_width = IntOption(
'google.search', 'search_iframe_initial_width', 800,
"""
Initial width of the IFRAME that Google returns. It will then increase
the available width of the div by the ID "content".
This value should not be that bigger because resizing only occurs
correctly if initial size is smaller than the available width.
"""
)
# ==============================================================================
# Google Search Resources
# ==============================================================================
class GoogleSearchResources(Component):
implements(ITemplateProvider)
# ITemplateProvider methods
def get_htdocs_dirs(self):
"""Return the absolute path of a directory containing additional
static resources (such as images, style sheets, etc).
"""
yield 'googlesearch', pkg_resources.resource_filename(__name__,
'htdocs')
def get_templates_dirs(self):
"""Return the absolute path of the directory containing the provided
Genshi templates.
"""
yield pkg_resources.resource_filename(__name__, 'templates')
| bsd-3-clause | 4,980,436,316,728,324,000 | 42.02439 | 80 | 0.55924 | false |
skymill/automated-ebs-snapshots | automated_ebs_snapshots/command_line_options.py | 1 | 3302 | """ Command line options """
import argparse
import sys
import os.path
from ConfigParser import SafeConfigParser
from automated_ebs_snapshots.valid_intervals import VALID_INTERVALS
settings = SafeConfigParser()
settings.read('{}/settings.conf'.format(
os.path.dirname(os.path.realpath(__file__))))
parser = argparse.ArgumentParser(
description='Automatic AWS EBS snapshot handling')
aws_config_ag = parser.add_argument_group(
title='AWS configuration options')
aws_config_ag.add_argument(
'--access-key-id',
help='AWS access key')
aws_config_ag.add_argument(
'--secret-access-key',
help='AWS secret access key')
aws_config_ag.add_argument(
'--region',
default='us-east-1',
help='AWS region. Default: us-east-1')
general_ag = parser.add_argument_group(
title='General')
general_ag.add_argument(
'-c', '--config',
help='Configuration file to read')
general_ag.add_argument(
'-i', '--interval',
default='daily',
help=(
'Volume snapshotting interval. Default: daily. '
'Valid values are: {}'.format(', '.join(VALID_INTERVALS))))
general_ag.add_argument(
'-r', '--retention',
default=0,
type=int,
help=(
'Number of snapshots to keep. 0 == keep all. '
'Default: 0. '
'WARNING: This setting will delete older snapshots!'))
general_ag.add_argument(
'--version',
action='count',
help='Print the Automated EBS Snapshots version and exit')
general_ag.add_argument(
'--log-file',
help='Path to file to send logs to')
general_ag.add_argument(
'--daemon',
help=(
'Run Automatic EBS Snapshots in daemon mode. Valid modes are '
'[start|stop|restart|foreground]'))
admin_actions_ag = parser.add_argument_group(
title='Administrative actions')
admin_actions_ag.add_argument(
'--list',
action='count',
help='List volumes that we are watching')
admin_actions_ag.add_argument(
'--unwatch',
metavar='VOLUME_ID',
help=(
'Remove an EBS volume from the watch list. '
'Usage: --unwatch vol-12345678'))
admin_actions_ag.add_argument(
'--watch',
metavar='VOLUME_ID',
help=(
'Add a new EBS volume to the watch list. '
'Usage: --watch vol-12345678'))
admin_actions_ag.add_argument(
'--snapshots',
metavar='VOLUME',
help='List all snapshots of this EBS volume')
admin_actions_ag.add_argument(
'--unwatch-file',
metavar='FILE_NAME',
help=(
'Remove all EBS volumes in the config file from the watch list. '
'Usage: --unwatch-file volumes.conf'))
admin_actions_ag.add_argument(
'--watch-file',
metavar='FILE_NAME',
help=(
'Add all EBS volumes in the config file to the watch list. '
'Usage: --watch-file volumes.conf'))
actions_ag = parser.add_argument_group(
title='Actions')
actions_ag.add_argument(
'--run',
action='count',
help='Run the watcher to ensure snapshots')
actions_ag.add_argument(
'--force-run',
action='count',
help='Similar to --run, but always take a snapshot and purge '
'snapshots that should be removed.')
args = parser.parse_args()
if args.version:
print('Automated EBS Snapshots version {}'.format(
settings.get('general', 'version')))
sys.exit(0)
| apache-2.0 | 5,670,893,979,281,945,000 | 28.482143 | 73 | 0.648092 | false |
pombredanne/dask | dask/tests/test_async.py | 1 | 5236 | from __future__ import absolute_import, division, print_function
from operator import add
from copy import deepcopy
import dask
import pytest
from dask.async import *
fib_dask = {'f0': 0, 'f1': 1, 'f2': 1, 'f3': 2, 'f4': 3, 'f5': 5, 'f6': 8}
def test_start_state():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
result = start_state_from_dask(dsk)
expeted = {'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'finished': set([]),
'released': set([]),
'running': set([]),
'ready': ['z'],
'waiting': {'w': set(['z'])},
'waiting_data': {'x': set(['z']),
'y': set(['w']),
'z': set(['w'])}}
def test_start_state_looks_at_cache():
dsk = {'b': (inc, 'a')}
cache = {'a': 1}
result = start_state_from_dask(dsk, cache)
assert result['dependencies']['b'] == set(['a'])
assert result['ready'] == ['b']
def test_start_state_with_redirects():
dsk = {'x': 1, 'y': 'x', 'z': (inc, 'y')}
result = start_state_from_dask(dsk)
assert result['cache'] == {'x': 1}
def test_start_state_with_independent_but_runnable_tasks():
assert start_state_from_dask({'x': (inc, 1)})['ready'] == ['x']
def test_finish_task():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
sortkey = order(dsk).get
state = start_state_from_dask(dsk)
state['ready'].remove('z')
state['running'] = set(['z', 'other-task'])
task = 'z'
result = 2
oldstate = deepcopy(state)
state['cache']['z'] = result
finish_task(dsk, task, state, set(), sortkey)
assert state == {
'cache': {'y': 2, 'z': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'finished': set(['z']),
'released': set(['x']),
'running': set(['other-task']),
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'ready': ['w'],
'waiting': {},
'waiting_data': {'y': set(['w']),
'z': set(['w'])}}
def test_get():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
assert get_sync(dsk, 'w') == 4
assert get_sync(dsk, ['w', 'z']) == (4, 2)
def test_nested_get():
dsk = {'x': 1, 'y': 2, 'a': (add, 'x', 'y'), 'b': (sum, ['x', 'y'])}
assert get_sync(dsk, ['a', 'b']) == (3, 3)
def test_cache_options():
try:
from chest import Chest
except ImportError:
return
cache = Chest()
def inc2(x):
assert 'y' in cache
return x + 1
with dask.set_options(cache=cache):
get_sync({'x': (inc2, 'y'), 'y': 1}, 'x')
def test_sort_key():
L = ['x', ('x', 1), ('z', 0), ('x', 0)]
assert sorted(L, key=sortkey) == ['x', ('x', 0), ('x', 1), ('z', 0)]
def test_callback():
f = lambda x: x + 1
dsk = {'a': (f, 1)}
from dask.threaded import get
def start_callback(key, d, state):
assert key == 'a' or key is None
assert d == dsk
assert isinstance(state, dict)
def end_callback(key, value, d, state, worker_id):
assert key == 'a' or key is None
assert value == 2 or value is None
assert d == dsk
assert isinstance(state, dict)
get(dsk, 'a', start_callback=start_callback, end_callback=end_callback)
def test_order_of_startstate():
dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b'),
'x': 1, 'y': (inc, 'x')}
result = start_state_from_dask(dsk)
assert result['ready'] == ['y', 'b']
dsk = {'x': 1, 'y': (inc, 'x'), 'z': (inc, 'y'),
'a': 1, 'b': (inc, 'a')}
result = start_state_from_dask(dsk)
assert result['ready'] == ['b', 'y']
def test_nonstandard_exceptions_propagate():
class MyException(Exception):
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return "My Exception!"
def f():
raise MyException(1, 2)
from dask.threaded import get
try:
get({'x': (f,)}, 'x')
assert False
except MyException as e:
assert "My Exception!" in str(e)
assert "Traceback" in str(e)
assert 'a' in dir(e)
assert 'traceback' in dir(e)
assert e.exception.a == 1 and e.exception.b == 2
assert e.a == 1 and e.b == 2
def test_remote_exception():
e = TypeError("hello")
a = remote_exception(e, 'traceback')
b = remote_exception(e, 'traceback')
assert type(a) == type(b)
assert isinstance(a, TypeError)
assert 'hello' in str(a)
assert 'traceback' in str(a)
| bsd-3-clause | 5,824,641,980,801,946,000 | 27.456522 | 75 | 0.442704 | false |
nagordon/mechpy | mechpy/composites.py | 1 | 71681 | # coding: utf-8
'''
Module for composite material analysis
Hyer-Stress Analysis of Fiber-Reinforced Composite Materials
Herakovich-Mechanics of Fibrous Composites
Daniel-Engineering Mechanics of Composite Materials
Kollar-Mechanics of COmposite Structures
NASA- Basic Mechancis of Lamianted Composites
https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19950009349.pdf
TODO:
* transverse shear stress reddy pg 136 or daniel pg 139
* include line loads (Qx,Qy) for combined loading
* calculate capability of panel based on margin
'''
#==============================================================================
# Import Modules
#==============================================================================
from __future__ import print_function, division
__author__ = 'Neal Gordon <[email protected]>'
__date__ = '2016-12-02'
__version__ = 0.1
from copy import copy
from numpy import pi, zeros, ones, linspace, arange, array, sin, cos, sqrt, pi
from numpy.linalg import solve, inv
#from scipy import linalg
import numpy as np
#np.set_printoptions(suppress=False,precision=2) # suppress scientific notation
np.set_printoptions(precision=3, linewidth=200)#, threshold=np.inf)
import scipy
from scipy.spatial import ConvexHull
#np.set_printoptions(formatter={'float': lambda x: "{:.2f}".format(x)})
import pandas as pd
import sympy as sp
from sympy import Function, dsolve, Eq, Derivative, symbols, pprint
from sympy.plotting import plot3d
#from sympy import cos, sin
#sp.init_printing(use_latex='mathjax')
#sp.init_printing(wrap_line=False, pretty_print=True)
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (8,5)
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 14
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot,figure,xlim,ylim,title,legend, \
grid, show, xlabel,ylabel, tight_layout
from mpl_toolkits.mplot3d import axes3d
# if using ipython console, turn off inline plotting
#mpl.use('Qt5Agg')
# inline plotting
from IPython import get_ipython
#get_ipython().magic('matplotlib inline')
###disable inline plotting
try:
get_ipython().magic('matplotlib')
except:
pass
from IPython.display import display
import os
plt.close('all')
#==============================================================================
# Functions
#==============================================================================
def import_matprops(mymaterial=['T300_5208','AL_7075']):
'''
import material properties
'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
if mymaterial==[] or mymaterial=='':
print(matprops.columns.tolist())
mat = matprops[mymaterial]
#mat.applymap(lambda x:np.float(x))
mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore'))
return mat
def Sf(E1,E2,nu12,G12):
'''transversely isptropic compliance matrix. pg 58 herakovich'''
nu21 = E2*nu12/E1
S = array([[1/E1, -nu21/E2, 0],
[-nu12/E1, 1/E2, 0],
[0, 0, 1/G12]])
return S
def S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23):
'''
daniel pg 74
transversely isotropic compliance matrix.
For transversly isotropic
E2=E3, nu12=nu13,G12=G13,G23=E2/(2(1+nu23))
'''
S6 = array( [[ 1/E1, -nu12/E1, -nu12/E1, 0, 0, 0],
[-nu12/E1, 1/E2, -nu23/E2, 0, 0, 0],
[-nu12/E1, -nu23/E2, 1/E2, 0, 0, 0],
[ 0, 0, 0, 1/G23, 0, 0],
[ 0, 0, 0, 0, 1/G13, 0],
[ 0, 0, 0, 0, 0, 1/G12]])
return S6
def C6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23):
'''
daniel pg 74
transversely isotropic stiffness matrix.
'''
C6 = inv(S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23))
return C6
def Qf(E1,E2,nu12,G12):
'''transversly isptropic compliance matrix. pg 58 herakovich
G12 = E1/(2*(1+nu12)) if isotropic'''
nu21 = E2*nu12/E1
Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0],
[ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0],
[0, 0, G12]])
return Q
def T61(th):
'''Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
reddy pg 91'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 0, 0, 0, 2*m*n],
[n**2, m**2, 0, 0, 0,-2*m*n],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, m,-n, 0],
[0, 0, 0, n, m, 0],
[-m*n, m*n, 0, 0, 0,(m**2-n**2)]])
return T1
def T62(th):
'''Strain
voight notation for strain transform. epsilon1 = T2 @ epsilonx
th=ply angle in degrees
reddy pg 91
'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, 0, 0, 0, m*n],
[n**2, m**2, 0, 0, 0,-m*n],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, m,-n, 0],
[0, 0, 0, n, m, 0],
[-2*m*n, 2*m*n, 0, 0, 0,(m**2-n**2)]])
return T2
def T1(th):
'''Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def T2(th):
'''Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
def T1s(th):
'''Symbolic Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sp.sin(th*sp.pi/180)
m = sp.cos(th*sp.pi/180)
T1 = sp.Matrix( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def T2s(th):
'''Symbolic Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sp.sin(th*sp.pi/180)
m = sp.cos(th*sp.pi/180)
T2 = sp.Matrix( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
def failure_envelope():
# failure envelopes
# max stress criteria
# 1 direction in first row
# 2 direction in second row
# failure strength in compression
#Fc = matrix([[-1250.0, -600.0],
# [-200.0, -120.0]]) # ksi
#
##failure strength in tension
#Ft = matrix([[1500, 1000]
# [50, 30]]) # ksi
#
##Failure strength in shear
#Fs = matrix( [100, 70] ) # Shear
Fc1 = [-1250, -600] # Compression 1 direction
Fc2 = [-200, -120] # Compression 2 direction
Ft1 = [1500, 1000] # Tension 1 direction
Ft2 = [50, 30] # Tension 2 direction
Fs = [100, 70] # Shear
# F1 = Ft(1);
# F2 = Ft(1);
# F6 = Fs(1);
for c in range(2):# mattype
factor = 1.25
# right
plot( [Ft1[c], Ft1[c]], [Fc2[c], Ft2[c]])
# left
plot( [Fc1[c], Fc1[c]] , [Fc2[c], Ft2[c]])
# top
plot( [Fc1[c], Ft1[c]] , [Ft2[c], Ft2[c]])
# bottom
plot( [Fc1[c], Ft1[c]] , [Fc2[c], Fc2[c]])
# center horizontal
plot( [Fc1[c], Ft1[c]] , [0, 0])
# center vertical
plot( [0, 0] , [Fc2[c], Ft2[c]])
#xlim([min(Fc1) max(Ft1)]*factor)
#ylim([min(Fc2) max(Ft2)]*factor)
xlabel('$\sigma_1,ksi$')
ylabel('$\sigma_2,ksi$')
title('failure envelope with Max-Stress Criteria')
def material_plots(materials = ['Carbon_cloth_AGP3705H']):
'''
plotting composite properties
Sf(E1,E2,nu12,G12)
'''
# plt.rcParams['figure.figsize'] = (10, 8)
# plt.rcParams['font.size'] = 14
# plt.rcParams['legend.fontsize'] = 14
plyangle = arange(-45, 45.1, 0.1)
h = 1 # lamina thickness
layupname='[0]'
mat = import_matprops(materials)
Ex = mat[materials[0]].E1
Ey = mat[materials[0]].E2
nuxy = mat[materials[0]].nu12
Gxy = mat[materials[0]].G12
# layupname = '[0, 45, 45, 0]'
# Ex= 2890983.38
# Ey= 2844063.06
# nuxy= 0.27
# Gxy= 1129326.25
# h = 0.0600
plt.close('all')
S = Sf(Ex,Ey,nuxy,Gxy)
C = inv(S)
C11 = [(inv(T1(th)) @ C @ T2(th))[0,0] for th in plyangle]
C22 = [(inv(T1(th)) @ C @ T2(th))[1,1] for th in plyangle]
C33 = [(inv(T1(th)) @ C @ T2(th))[2,2] for th in plyangle]
C12 = [(inv(T1(th)) @ C @ T2(th))[0,1] for th in plyangle]
Exbar = zeros(len(plyangle))
Eybar = zeros(len(plyangle))
Gxybar = zeros(len(plyangle))
Q = Qf(Ex,Ey,nuxy,Gxy)
Qbar = zeros((len(plyangle),3,3))
for i,th in enumerate(plyangle):
Qbar[i] = solve(T1(th), Q) @ T2(th)
#Qbar = [solve(T1(th),Q) @ T2(th) for th in plyangle]
Qbar11 = Qbar[:,0,0]
Qbar22 = Qbar[:,1,1]
Qbar66 = Qbar[:,2,2]
Qbar12 = Qbar[:,0,1]
Qbar16 = Qbar[:,0,2]
Qbar26 = Qbar[:,1,2]
Aij = Qbar*h
# laminate Stiffness
# | Exbar Eybar Gxybar |
# A = | vxybar vyxbar etasxbar |
# | etaxsbar etaysbar etasybar |
# laminate Comnpliance
aij = zeros((len(plyangle),3,3))
for i, _Aij in enumerate(Aij):
aij[i] = inv(_Aij)
# material properties for whole laminate (Daniel, pg183)
Exbar = [1/(h*_aij[0,0]) for _aij in aij]
Eybar = [1/(h*_aij[1,1]) for _aij in aij]
Gxybar = [1/(h*_aij[2,2]) for _aij in aij]
# Global Stress
s_xy = array([[100],
[10],
[5]])
# local ply stress
s_12 = np.zeros((3,len(plyangle)))
for i,th in enumerate(plyangle):
#s_12[:,i] = np.transpose(T1(th) @ s_xy)[0] # local stresses
s_12[:,[i]] = T1(th) @ s_xy
# Plotting
figure()#, figsize=(10,8))
plot(plyangle, C11, plyangle, C22, plyangle, C33, plyangle, C12)
legend(['$\overline{C}_{11}$','$\overline{C}_{22}$', '$\overline{C}_{44}$', '$\overline{C}_{66}$'])
title('Transversly Isotropic Stiffness properties of carbon fiber T300_5208')
xlabel("$\Theta$")
ylabel('$\overline{C}_{ii}$, ksi')
grid()
figure()#, figsize=(10,8))
plot(plyangle, Exbar, label = r"Modulus: $E_x$")
plot(plyangle, Eybar, label = r"Modulus: $E_y$")
plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$")
title("Constitutive Properties in various angles")
xlabel("$\Theta$")
ylabel("modulus, psi")
legend()
grid()
figure()#,figsize=(10,8))
plot(plyangle, s_12[0,:], label = '$\sigma_{11},ksi$' )
plot(plyangle, s_12[1,:], label = '$\sigma_{22},ksi$' )
plot(plyangle, s_12[2,:], label = '$\sigma_{12},ksi$' )
legend(loc='lower left')
xlabel("$\Theta$")
ylabel("Stress, ksi")
grid()
# plot plyangle as a function of time
figure()#,figsize=(10,8))
plot(plyangle,Qbar11, label = "Qbar11")
plot(plyangle,Qbar22, label = "Qbar22")
plot(plyangle,Qbar66, label = "Qbar66")
legend(loc='lower left')
xlabel("$\Theta$")
ylabel('Q')
grid()
# plot plyangle as a function of time
figure()#,figsize=(10,8))
plot(plyangle,Qbar12, label = "Qbar12")
plot(plyangle,Qbar16, label = "Qbar16")
plot(plyangle,Qbar26, label = "Qbar26")
legend(loc='lower left')
xlabel("$\Theta$")
ylabel('Q')
grid()
titlename = 'Laminate Properties varying angle for {} {}'.format(materials[0], layupname)
#df = pd.DataFrame({'plyangle':plyangle, 'Exbar':Exbar, 'Eybar':Eybar,'Gxybar':Gxybar})
#print(df)
#df.to_csv(titlename+'.csv')
plt.figure(figsize=(9,6))
plot(plyangle, Exbar, label = r"Modulus: $E_x$")
plot(plyangle, Eybar, label = r"Modulus: $E_y$")
plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$")
title(titlename)
xlabel("$\Theta$")
ylabel("modulus, psi")
legend(loc='best')
grid()
#plt.savefig(titlename+'.png')
show()
def laminate_gen(lamthk=1.5, symang=[45,0,90], plyratio=2.0, matrixlayers=False, balancedsymmetric=True):
'''
## function created to quickly create laminates based on given parameters
lamthk=1.5 # total #thickness of laminate
symang = [45,0,90, 30] #symmertic ply angle
plyratio=2.0 # lamina/matrix ratio
matrixlayers=False # add matrix layers between lamina plys
nonsym=False # symmetric
mat = material type, as in different plies, matrix layer, uni tapes, etc
#ply ratio can be used to vary the ratio of thickness between a matrix ply
and lamina ply. if the same thickness is desired, plyratio = 1,
if lamina is 2x as thick as matrix plyratio = 2
'''
if matrixlayers:
nply = (len(symang)*2+1)*2
nm = nply-len(symang)*2
nf = len(symang)*2
tm = lamthk / (plyratio*nf + nm)
tf = tm*plyratio
plyangle = zeros(nply//2)
mat = 2*ones(nply//2) # orthotropic fiber and matrix = 1, isotropic matrix=2,
mat[1:-1:2] = 1 # [2 if x%2 else 1 for x in range(nply//2) ]
plyangle[1:-1:2] = symang[:] # make a copy
thk = tm*ones(nply//2)
thk[2:2:-1] = tf
lamang = list(symang) + list(symang[::-1])
plyangle = list(plyangle) + list(plyangle[::-1])
mat = list(mat) + list(mat[::-1])
thk = list(thk) + list(thk[::-1])
else: # no matrix layers, ignore ratio
if balancedsymmetric:
nply = len(symang)*2
mat = list(3*np.ones(nply))
thk = list(lamthk/nply*np.ones(nply))
lamang = list(symang) + list(symang[::-1])
plyangle = list(symang) + list(symang[::-1])
else:
nply = len(symang)
mat =[1]*nply
thk = list(lamthk/nply*np.ones(nply))
lamang = symang[:]
plyangle = symang[:]
return thk,plyangle,mat,lamang
def make_quasi(n0=4,n45=4):
#n0 = 4
#n45 = 13
#
#ply0 = [0]*n0
#ply45 = [45]*n45
#plyangle = []
#from itertools import zip_longest
#for x,y in zip_longest(ply0,ply45):
# if len(plyangle)<min(len(ply0),len(ply45))*2:
# plyangle.append(x)
# plyangle.append(y)
# else:
# plyangle.append(x)
# plyangle.reverse()
# plyangle.append(y)
#plyangle = [x for x in plyangle if x is not None]
#plyangle
ntot = n45+n0
plyangle = [45]*int(n45)
for p in [0]*int(n0):
plyangle.append(p)
plyangle.reverse()
return plyangle
#@xw.func
def laminate_calcs(NM,ek,q0,plyangle,plymatindex,materials,platedim, zoffset,SF,plots,prints):
'''
code to compute composite properties, applied mechanical and thermal loads
and stress and strain
inputs
NM # force/moments lbs/in
ek # strain, curvature in/in
q0 = pressure
plyangle # angle for each ply
plymatindex # material for each ply
materials # list materials used,
general outline for computing elastic properties of composites
1) Determine engineering properties of unidirectional laminate. E1, E2, nu12, G12
2) Calculate ply stiffnesses Q11, Q22, Q12, Q66 in the principal/local coordinate system
3) Determine Fiber orientation of each ply
4) Calculate the transformed stiffness Qxy in the global coordinate system
5) Determine the through-thicknesses of each ply
6) Determine the laminate stiffness Matrix (ABD)
7) Calculate the laminate compliance matrix by inverting the ABD matrix
8) Calculate the laminate engineering properties
# Stress Strain Relationship for a laminate, with Q=reduced stiffness matrix
|sx | |Qbar11 Qbar12 Qbar16| |ex +z*kx |
|sy |=|Qbar12 Qbar22 Qbar26|=|ey +z*ky |
|sxy| |Qbar16 Qbar26 Qbar66| |exy+z*kxy|
# Herakovich pg 84
Qbar = inv(T1) @ Q @ T2 == solve(T1, Q) @ T2
transformation reminders - see Herakovich for details
sig1 = T1*sigx
sigx = inv(T1)*sig1
eps1 = T2*epsx
epsx = inv(T2)*epsx
sigx = inv(T1)*Q*T2*epsx
Qbar = inv(T1)*Q*T2
Sbar = inv(T2)*inv(Q)*T2
Notes, core transverse direction is G13, ribbon direction is G23
a_width = 50 # plate width (inches or meters)
b_length = 50 # laminate length, inches or meters
'''
#==========================================================================
# Initialize python settings
#==========================================================================
#get_ipython().magic('matplotlib')
plt.close('all')
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
#plt.rcParams['legend.fontsize'] = 14
#==========================================================================
# Define composite properties
#==========================================================================
assert(len(plyangle)==len(plymatindex))
a_width, b_length = platedim
# either apply strains or loads , lb/in
Nx_, Ny_, Nxy_, Mx_, My_, Mxy_ = NM
NMbarapp = array([[Nx_],[Ny_],[Nxy_],[Mx_],[My_],[Mxy_]])
ex_, ey_, exy_, kx_, ky_, kxy_ = ek
epsilonbarapp = array([[ex_],[ey_],[exy_],[kx_],[ky_],[kxy_]])
Ti = 0 # initial temperature (C)
Tf = 0 # final temperature (C)
#SF = 1.0 # safety factor
#==========================================================================
# Import Material Properties
#==========================================================================
mat = import_matprops(materials)
#mat = import_matprops(['E-Glass Epoxy cloth','rohacell2lb']) # Herakovich
alphaf = lambda mat: array([[mat.alpha1], [mat.alpha2], [0]])
''' to get ply material info, use as follows
alpha = alphaf(mat[materials[plymatindex[i]]])
mat[materials[1]].E2
'''
laminatethk = array([mat[materials[i]].plythk for i in plymatindex ])
nply = len(laminatethk) # number of plies
H = np.sum(laminatethk) # plate thickness
# area = a_width*H
z = zeros(nply+1)
zmid = zeros(nply)
z[0] = -H/2
for i in range(nply):
z[i+1] = z[i] + laminatethk[i]
zmid[i] = z[i] + laminatethk[i]/2
#==========================================================================
# ABD Matrix Compute
#==========================================================================
# Reduced stiffness matrix for a plane stress ply in principal coordinates
# calcluating Q from the Compliance matrix may cause cancE1ation errors
A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3))
for i in range(nply): # = nply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
Qbar = solve(T1(plyangle[i]), Q) @ T2(plyangle[i]) # inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
A += Qbar*(z[i+1]-z[i])
# coupling stiffness
B += (1/2)*Qbar*(z[i+1]**2-z[i]**2)
# bending or flexural laminate stiffness relating moments to curvatures
D += (1/3)*Qbar*(z[i+1]**3-z[i]**3)
#Cbar6 = T61 @ C6 @ np.transpose(T61)
# laminate stiffness matrix
ABD = zeros((6,6))
ABD[0:3,0:3] = A
ABD[0:3,3:6] = B + zoffset*A
ABD[3:6,0:3] = B + zoffset*A
ABD[3:6,3:6] = D + 2*zoffset*B + zoffset**2*A
# laminatee compliance
abcd = inv(ABD)
a = abcd[0:3,0:3]
#==========================================================================
# Laminate Properties
#==========================================================================
# effective laminate shear coupling coefficients
etasxbar = a[0,2]/a[2,2]
etasybar = a[1,2]/a[2,2]
etaxsbar = a[2,0]/a[0,0]
etaysbar = a[2,1]/a[1,1]
# laminate engineer properties
Exbar = 1 / (H*a[0,0])
Eybar = 1 / (H*a[1,1])
Gxybar = 1 / (H*a[2,2])
nuxybar = -a[0,1]/a[0,0]
nuyxbar = -a[0,1]/a[1,1]
# TODO: validate results, does not appear to be correct
# strain centers, pg 72, NASA-Basic mechanics of lamianted composites
# added divide by zero epsilon
z_eps0_x = -B[0,0] / (D[0,0] + 1e-16)
z_eps0_y = -B[0,1] / (D[0,1] + 1e-16)
z_eps0_xy = -B[0,2] / (D[0,2] + 1e-16)
z_sc = -B[2,2] / (D[2,2] +1e-16) # shear center
# --------------------- Double Check ---------------------
# # Laminate compliance matrix
# LamComp = array([ [1/Exbar, -nuyxbar/Eybar, etasxbar/Gxybar],
# [-nuxybar/Exbar, 1/Eybar , etasybar/Gxybar],
# [etaxsbar/Exbar, etaysbar/Eybar, 1/Gxybar]] )
# # Daniel pg 183
# # combines applied loads and applied strains
# strain_laminate = LamComp @ Nxyzapplied[:3]/H + strainxyzapplied[:3]
# Nxyz = A @ strain_laminate
# stress_laminate = Nxyz/H
# --------------------------------------------------------
#==========================================================================
# Pressure Load
#==========================================================================
#==========================================================================
# pressure displacement and moments
#==========================================================================
D11,D12,D22,D66 = D[0,0], D[0,1], D[1,1], D[2,2]
B11 = B[0,0]
A11, A12 = A[0,0], A[0,1]
# reddy pg 247 Navier displacement solution for a simply supported plate
s = b_length/a_width
x = a_width/2
y = b_length/2
# 5.2.8, reddy, or hyer 13.123
terms = 5
w0 = 0
for m in range(1,terms,2):
for n in range(1,terms,2):
dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4)
alpha = m*pi/a_width
beta = n*pi/b_length
# for uniformly distributed loads, m,n = 1,3,5,...
Qmn = 16*q0/(pi**2*m*n)
Wmn = Qmn/dmn
w0 += Wmn * sin(alpha*x) * sin(beta*y)
w0_simplesupport = w0
# 5.2.12a, reddy
# mid span moments
Mxq=Myq=Mxyq=0
for m in range(1,terms,2):
for n in range(1,terms,2):
dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4)
alpha = m*pi/a_width
beta = n*pi/b_length
# for uniformly distributed loads, m,n = 1,3,5,...
Qmn = 16*q0/(pi**2*m*n)
Wmn = Qmn/dmn
Mxq += (D11*alpha**2 + D12*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length)
Myq += (D12*alpha**2 + D22*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length)
Mxyq += alpha*beta*D66 * Wmn * cos(m*pi*x/a_width) * cos(n*pi*y/b_length)
Mxyq = -2*Mxyq
NMq = [[0],[0],[0],[Mxq],[Myq],[Mxyq]]
# hyer, x-pin-pin, y-free-free plate reaction forces, pg 619
# Forces and Moments across the width of the plate
A11R = A11*(1-B11**2/(A11*D11))
D11R = D11*(1-B11**2/(A11*D11))
Nxq0 = lambda x: B11/D11 * q0 * a_width**2 /12
Nyq0 = lambda x: B11 * A12*q0 * a_width**2 / (D11*A11R*12) * (6*(x/a_width)**2-1/2)
Nxyq0 = lambda x: 0
Mxq0 = lambda x: q0 * a_width**2/8 * (1-4*(x/a_width)**2)
Myq0 = lambda x: D12 * q0 * a_width**2 / (D11R*8) * ((1-2*B11**2/(3*A11*D11))-(4*(x/a_width)**2))
Mxyq0 = lambda x: 0
# clamped plate 5.4.11, reddy
#w0_clamped = ( 49 * q0*a_width**4 * (x/a_width - (x/a_width)**2 )**2 * (y/b_length - (y/b_length)**2)**2) / (8 * (7*D11+4*(D12 + 2*D66)*s**2 + 7*D22*s**4) )
# reddy, 5.4.12
w0_clamped = 0.00342 * (q0*a_width**4) / (D11+0.5714*(D12+2*D66)*s**2+D22*s**4)
# reddy, 5.4.15
#w0_clamped = 0.00348 * (q0*a_width**4) / (D11*b_length**4+0.6047*(D12+2*D66)*s**2+D22*s**4)
# reddy 5.4.15, for isotropic D11=D
w0_clamped_isotropic = 0.00134*q0*a_width**4/D11
#==========================================================================
# Applied Loads and pressure loads
#==========================================================================
NMbarapptotal = NMbarapp + NMq + ABD @ epsilonbarapp
#==========================================================================
# Thermal Loads
#==========================================================================
'''
if the material is isotropic and unconstrained, then no thermal stresses
will be experienced. If there are constraints, then the material will experience
thermally induced stresses. As with orthotropic materials, various directions will have
different stresses, and when stacked in various orientations, stresses can be
unintuitive and complicated. Global Thermal strains are subtracted from applied strains
# 1) determine the free unrestrained thermal strains in each layer, alphabar
'''
dT = Tf-Ti
Nhatth= zeros((3,1)) # unit thermal force in global CS
Mhatth = zeros((3,1)) # unit thermal moment in global CS
alphabar = zeros((3,nply)) # global ply CTE
for i in range(nply): # = nply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
alpha = alphaf(mat[materials[plymatindex[i]]])
Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
alphabar[:,[i]] = solve(T2(plyangle[i]), alpha)
#alphabar[:,[i]] = inv(T2(plyangle[i])) @ alpha # Convert to global CS
Nhatth += Qbar @ (alphabar[:,[i]])*(z[i+1] - z[i]) # Hyer method for calculating thermal unit loads
Mhatth += 0.5*Qbar@(alphabar[:,[i]])*(z[i+1]**2-z[i]**2)
NMhatth = np.vstack((Nhatth,Mhatth))
NMbarth = NMhatth*dT # resultant thermal loads
# Laminate CTE
epsilonhatth = abcd@NMhatth # laminate CTE
# applied loads and thermal loads
epsilonbarapp = abcd @ NMbarapptotal
epsilonbarth = abcd @ NMbarth # resultant thermal strains
epsilonbartotal = epsilonbarapp + epsilonbarth
# Composite respone from applied mechanical loads and strains. Average
# properties only. Used to compare results from tensile test.
#epsilon_laminate = abcd@NMbarapptotal
#sigma_laminate = ABD@epsilon_laminate/H
epsilon_laminate = epsilonbartotal[:]
sigma_laminate = ABD@epsilonbartotal/H
alpha_laminate = a@Nhatth
# determine thermal load and applied loads or strains Hyer pg 435,452
Nx = NMbarapptotal[0,0]*a_width # units kiloNewtons, total load as would be applied in a tensile test
Ny = NMbarapptotal[1,0]*b_length # units kN
#==========================================================================
# Thermal and mechanical local and global stresses at the ply interface
#==========================================================================
# Declare variables for plotting
epsilon_app = zeros((3,2*nply))
sigma_app = zeros((3,2*nply))
epsilonbar_app = zeros((3,2*nply))
sigmabar_app = zeros((3,2*nply))
epsilon_th = zeros((3,2*nply))
sigma_th = zeros((3,2*nply))
epsilonbar_th = zeros((3,2*nply))
sigmabar_th = zeros((3,2*nply))
epsilon = zeros((3,2*nply))
epsilonbar = zeros((3,2*nply))
sigma = zeros((3,2*nply))
sigmabar = zeros((3,2*nply))
for i,k in enumerate(range(0,2*nply,2)):
# stress is calcuated at top and bottom of each ply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
### transverse shear, herakovich pg 254
#Q44 = mat[materials[plymatindex[i]]].G23
#Q55 = mat[materials[plymatindex[i]]].G13
#Qbar44 = Q44*cos(plyangle[i])**2+Q55*sin(plyangle[i])**2
#Qbar55 = Q55*cos(plyangle[i])**2 + Q44*sin(plyangle[i])**2
#Qbar45 = (Q55-Q44)*cos(plyangle[i])*sin(plyangle[i])
#epsilontransverse = array([[gammayz],[gammaxz]])
#sigmatransverse = array([[Qbar44, Qbar45],[Qbar45, Qbar55]]) @ epsilontransverse
# Global stresses and strains, applied load only
epsbarapp1 = epsilonbarapp[0:3] + z[i]*epsilonbarapp[3:7]
epsbarapp2 = epsilonbarapp[0:3] + z[i+1]*epsilonbarapp[3:7]
sigbarapp1 = Qbar @ epsbarapp1
sigbarapp2 = Qbar @ epsbarapp2
# Local stresses and strains, appplied load only
epsapp1 = T2(plyangle[i]) @ epsbarapp1
epsapp2 = T2(plyangle[i]) @ epsbarapp2
sigapp1 = Q @ epsapp1
sigapp2 = Q @ epsapp2
# Interface Stresses and Strains
epsilon_app[:,k:k+2] = np.column_stack((epsapp1,epsapp2))
epsilonbar_app[:,k:k+2] = np.column_stack((epsbarapp1,epsbarapp2))
sigma_app[:,k:k+2] = np.column_stack((sigapp1,sigapp2))
sigmabar_app[:,k:k+2] = np.column_stack((sigbarapp1,sigbarapp2))
# Global stress and strains, thermal loading only
epsbarth1 = epsilonbarth[0:3] + z[i]*epsilonbarth[3:7] - dT*alphabar[:,[i]]
epsbarth2 = epsilonbarth[0:3] + z[i+1]*epsilonbarth[3:7] - dT*alphabar[:,[i]]
sigbarth1 = Qbar @ epsbarth1
sigbarth2 = Qbar @ epsbarth2
# Local stress and strains, thermal loading only
epsth1 = T2(plyangle[i]) @ epsbarth1
epsth2 = T2(plyangle[i]) @ epsbarth2
sigth1 = Q @ epsth1
sigth2 = Q @ epsth2
# Interface Stresses and Strains
epsilon_th[:,k:k+2] = np.column_stack((epsth1,epsth2))
epsilonbar_th[:,k:k+2] = np.column_stack((epsbarth1+dT*alphabar[:,[i]],epsbarth2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress
sigma_th[:,k:k+2] = np.column_stack((sigth1,sigth2))
sigmabar_th[:,k:k+2] = np.column_stack((sigbarth1,sigbarth2))
# TOTAL global stresses and strains, applied and thermal
epsbar1 = epsbarapp1 + epsbarth1
epsbar2 = epsbarapp2 + epsbarth2
sigbar1 = Qbar @ epsbar1
sigbar2 = Qbar @ epsbar2
# TOTAL local stresses and strains , applied and thermal
eps1 = T2(plyangle[i]) @ epsbar1
eps2 = T2(plyangle[i]) @ epsbar2
sig1 = Q @ eps1
sig2 = Q @ eps2
# Interface Stresses and Strains
epsilon[:,k:k+2] = np.column_stack((eps1,eps2))
epsilonbar[:,k:k+2] = np.column_stack((epsbar1+dT*alphabar[:,[i]],epsbar2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress
sigma[:,k:k+2] = np.column_stack((sig1,sig2))
sigmabar[:,k:k+2] = np.column_stack((sigbar1,sigbar2))
#==========================================================================
# Strength Failure Calculations
#==========================================================================
# Strength Ratio
STRENGTHRATIO_MAXSTRESS = zeros((3,2*nply))
# Failure Index
FAILUREINDEX_MAXSTRESS = zeros((3,2*nply))
STRENGTHRATIO_TSAIWU = zeros((nply))
for i,k in enumerate(range(0,2*nply,2)):
# stress
s1 = sigma[0,k]
s2 = sigma[1,k]
s12 = np.abs(sigma[2,k])
# strength
F1 = mat[materials[plymatindex[i]]].F1t if s1 > 0 else mat[materials[plymatindex[i]]].F1c
F2 = mat[materials[plymatindex[i]]].F2t if s2 > 0 else mat[materials[plymatindex[i]]].F2c
F12 = mat[materials[plymatindex[i]]].F12
# Max Stress failure index ,failure if > 1, then fail, FI = 1/SR
FAILUREINDEX_MAXSTRESS[0,k:k+2] = s1 / F1
FAILUREINDEX_MAXSTRESS[1,k:k+2] = s2 / F2
FAILUREINDEX_MAXSTRESS[2,k:k+2] = s12 / F12
# Tsai Wu, failure occures when > 1
F1t = mat[materials[plymatindex[i]]].F1t
F1c = mat[materials[plymatindex[i]]].F1c
F2t = mat[materials[plymatindex[i]]].F2t
F2c = mat[materials[plymatindex[i]]].F2c
F12 = mat[materials[plymatindex[i]]].F12
# inhomogeneous Tsai-Wu criterion # from Daniel
# http://www2.mae.ufl.edu/haftka/composites/mcdaniel-nonhomogenous.pdf
f1 = 1/F1t + 1/F1c
f2 = 1/F2t + 1/F2c
f11 = -1/(F1t*F1c)
f22 = -1/(F2t*F2c)
f66 = 1/F12**2
f12 = -0.5*sqrt(f11*f22)
#TW = f1*s1 + f2*s2 + f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2
# polynomial to solve. Added a machine epsilon to avoid divide by zero errors
lam1 = f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2 + 1e-16
lam2 = f1*s1 + f2*s2 + 1e-16
lam3 = -1
# smallest positive root
roots = array([(-lam2+sqrt(lam2**2-4*lam1*lam3)) / (2*lam1) ,
(-lam2-sqrt(lam2**2-4*lam1*lam3)) / (2*lam1)] )
STRENGTHRATIO_TSAIWU[i] = roots[roots>=0].min() # strength ratio
# f1 = 1/F1t - 1/F1c
# f2 = 1/F2t - 1/F2c
# f11 = 1/(F1t*F1c)
# f22 = 1/(F2t*F2c)
# f66 = 1/F12**2
# STRENGTHRATIO_TSAIWU[i] = 2 / (f1*s2 + f2*s2 + sqrt((f1*s1+f2*s2)**2+4*(f11*s1**2+f22*s2**2+f66*s12**2)))
### Apply safety factors
FAILUREINDEX_MAXSTRESS = FAILUREINDEX_MAXSTRESS * SF
STRENGTHRATIO_TSAIWU = STRENGTHRATIO_TSAIWU / SF
###
MARGINSAFETY_TSAIWU = STRENGTHRATIO_TSAIWU-1 # margin of safety
# strength ratio for max stress, if < 1, then fail, SR = 1/FI
STRENGTHRATIO_MAXSTRESS = 1/(FAILUREINDEX_MAXSTRESS+1e-16)
# margin of safety based on max stress criteria
MARGINSAFETY_MAXSTRESS = STRENGTHRATIO_MAXSTRESS-1
# minimum margin of safety for Max stress failure
MARGINSAFETY_MAXSTRESS_min = MARGINSAFETY_MAXSTRESS.min().min()
FAILUREINDEX_MAXSTRESS_max = FAILUREINDEX_MAXSTRESS.max().max()
# minimum margin of safety of both Tsai-Wu and Max Stress
#MARGINSAFETY_MAXSTRESS_min = np.minimum(MARGINSAFETY_MAXSTRESS.min().min(), MARGINSAFETY_TSAIWU.min() )
# find critial values for all failure criteria
#MARGINSAFETY_MAXSTRESS = MARGINSAFETY_MAXSTRESS[~np.isinf(MARGINSAFETY_MAXSTRESS)] # remove inf
#MARGINSAFETY_TSAIWU = MARGINSAFETY_TSAIWU[~np.isinf(MARGINSAFETY_TSAIWU)] # remove inf
#==========================================================================
# Buckling Failure Calculations
#==========================================================================
''' Buckling of Clamped plates under shear load, reddy, 5.6.17'''
k11 = 537.181*D11/a_width**4 + 324.829*(D12+2*D66)/(a_width**2*b_length**2) + 537.181*D22/b_length**4
k12 = 23.107/(a_width*b_length)
k22 = 3791.532*D11/a_width**4 + 4227.255*(D12+2*D66)/(a_width**2*b_length**2) + 3791.532*D22/b_length**4
Nxycrit0 = 1/k12*np.sqrt(k11*k22)
FI_clamped_shear_buckling = (abs(Nxy_)*SF) / Nxycrit0 # failure if > 1
MS_clamped_shear_buckling = 1/(FI_clamped_shear_buckling+1e-16)-1
'''Kassapoglous pg 126,137
simply supported plate buckling, assumes Nx>0 is compression
Nxcrit0 is the axial load that causes buckling
Nxycrit0 is the shear load that cause buckling
Nxcrit is the axial load part of a combined load that causes buckling
Nxycrit is the shear load part of a combined load that causes buckling
'''
# no buckling issues if Nx is positive
# buckling calcuations assumes Nx compression is positive.
Nx__ = abs(Nx_) if Nx_ < 0 else np.float64(0)
Nxy__ = np.float64(0) if Nxy_ == 0 else abs(Nxy_) # assume shear in 1 direction although both directions are ok
# Nxy=0
Nxcrit0 = pi**2/a_width**2 * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4)
# Nx=0
Nxycrit0 = 9*pi**4*b_length / (32*a_width**3) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4)
FI_Nxy0_buckling, FI_Nx0_buckling, FI_Nx_buckling, FI_Nxy_buckling = 0,0,0,0
if Nx__ == 0 or Nxy__ == 0:
FI_Nxy0_buckling = (Nxy__*SF)/Nxycrit0
FI_Nx0_buckling = (Nx__*SF)/Nxcrit0
else:
# interaction term
k = Nxy__ / Nx__
Nxcrit = min( abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 + sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) ,
abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 - sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) )
Nxycrit = Nxycrit0*sqrt(1-Nxcrit/Nxcrit0)
# interactive calc
FI_Nx_buckling = (Nx__ *SF)/Nxcrit
FI_Nxy_buckling = (Nxy__*SF)/Nxycrit
FI_combinedload_simplesupport_buckle = max([FI_Nxy0_buckling,
FI_Nx0_buckling,
FI_Nx_buckling,
FI_Nxy_buckling] )
MS_min_buckling = 1/(FI_combinedload_simplesupport_buckle+1e-16)-1
#==========================================================================
# Facesheet Wrinkling
#==========================================================================
#==========================================================================
# principal lamainte stresses
#==========================================================================
sigma_principal_laminate = np.linalg.eig(array([[sigma_laminate[0,0],sigma_laminate[2,0],0],
[sigma_laminate[2,0],sigma_laminate[1,0],0],
[0,0,0]]))[0]
tauxy_p = sigma_laminate[2,0]
sigmax_p = sigma_laminate[0,0]
sigmay_p = sigma_laminate[1,0]
thetap = 0.5 * np.arctan( 2*tauxy_p / ((sigmax_p-sigmay_p+1e-16))) * 180/np.pi
#==========================================================================
# Printing Results
#==========================================================================
if prints:
print('--------------- laminate1 Stress analysis of fibers----------')
print('(z-) plyangles (z+)'); print(plyangle)
print('(z-) plymatindex (z+)'); print(plymatindex)
print('ply layers') ; print(z)
print('lamiante thickness, H = {:.4f}'.format(H))
#print('x- zero strain laminate center, z_eps0_x = {:.4f}'.format(z_eps0_x))
#print('y- zero strain laminate center, z_eps0_y = {:.4f}'.format(z_eps0_y))
#print('xy-zero strain laminate center, z_eps0_xy = {:.4f}'.format(z_eps0_xy))
#print('shear center laminate center, z_sc = {:.4f}'.format(z_sc))
print('Applied Loads'); print(NM)
print('ABD=');print(ABD)
print('Ex= {:.2f}'.format(Exbar) )
print('Ey= {:.2f}'.format(Eybar) )
print('nuxy= {:.2f}'.format(nuxybar) )
print('Gxy= {:.2f}'.format(Gxybar) )
print('epsilon_laminate') ; print(epsilon_laminate)
print('sigma_laminate') ; print(sigma_laminate)
print('sigma_principal_laminate') ; print(sigma_principal_laminate)
print('principal_angle = {:.2f} deg'.format(thetap))
print('NMbarapp') ; print(NMbarapp)
print('sigma') ; print(sigma)
print('\nMax Stress Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format( MARGINSAFETY_MAXSTRESS_min ) )
print(MARGINSAFETY_MAXSTRESS)
print('\nTsai-Wu Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format(MARGINSAFETY_TSAIWU.min()))
print(MARGINSAFETY_TSAIWU)
print('\nmaximum failure index = {:.4f}'.format( FAILUREINDEX_MAXSTRESS_max ))
print(FAILUREINDEX_MAXSTRESS)
print('\nBuckling MS for Nxy only for clamped edges = {:.4f}\n'.format(MS_clamped_shear_buckling))
# print('---- Individual Buckling Failure Index (fail>1) combined loads and simple support -----')
# print('FI_Nxy0 = {:.2f}'.format(FI_Nxy0_buckling) )
# print('FI_Nx0 = {:.2f}'.format(FI_Nx0_buckling) )
# print('---- Interactive Buckling Failure Index (fail>1) combined loads and simple support -----')
# print('FI_Nx = {:.2f}'.format(FI_Nx_buckling) )
# print('FI_Nxy = {:.2f}'.format(FI_Nxy_buckling) )
# print('---- Buckling Failure Index (fail>1) combined loads and simple support -----')
# print(FI_combinedload_simplesupport_buckle)
print('buckling combined loads and simple support MS = {:.4f}\n'.format((MS_min_buckling)))
print('Mx_midspan = {:.2f}'.format(Mxq) )
print('My_midspan = {:.2f}'.format(Myq) )
print('Mxy_midspan = {:.2f}'.format(Mxyq) )
print('w0_simplesupport = {:.6f}'.format(w0_simplesupport) )
print('w0_clamped = {:.6f}'.format(w0_clamped) )
print('w0_clamped_isotropic= {:.6f}'.format(w0_clamped_isotropic) )
#display(sp.Matrix(sigmabar))
#==========================================================================
# Plotting
#==========================================================================
if plots:
windowwidth = 800
windowheight = 450
zplot = zeros(2*nply)
for i,k in enumerate(range(0,2*nply,2)): # = nply
zplot[k:k+2] = z[i:i+2]
#legendlab = ['total','thermal','applied','laminate']
# global stresses and strains
mylw = 1.5 #linewidth
# Global Stresses and Strains
f1, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True)
f1.canvas.set_window_title('Global Stress and Strain of %s laminate' % (plyangle))
stresslabel = ['$\sigma_x$','$\sigma_y$','$\\tau_{xy}$']
strainlabel = ['$\epsilon_x$','$\epsilon_y$','$\gamma_{xy}$']
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(strainlabel[i])
ax.set_title(' Ply Strain '+strainlabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(epsilonbar[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(epsilonbar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75, linestyle='--', label='thermal')
ax.plot(epsilonbar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
#ax.set_xticks(linspace( min(ax.get_xticks()) , max(ax.get_xticks()) ,6))
for i,ax in enumerate([ax4,ax5,ax6]):
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
ax.set_title(' Ply Stress '+stresslabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2))
ax.plot(sigmabar[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(sigmabar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(sigmabar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(25,50,windowwidth,windowheight)
except:
pass
f1.show()
#plt.savefig('global-stresses-strains.png')
### Local Stresses and Strains
f2, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True)
f2.canvas.set_window_title('Local Stress and Strain of %s laminate' % (plyangle))
stresslabel = ['$\sigma_1$','$\sigma_2$','$\\tau_{12}$']
strainlabel = ['$\epsilon_1$','$\epsilon_2$','$\gamma_{12}$']
strengthplot = [ [ [F1t,F1t],[zplot.min(), zplot.max()], [F1c, F1c],[zplot.min(), zplot.max()] ] ,
[ [F2t,F2t],[zplot.min(), zplot.max()], [F2c, F2c],[zplot.min(), zplot.max()] ] ,
[ [F12,F12],[zplot.min(), zplot.max()], [-F12,-F12],[zplot.min(), zplot.max()] ] ]
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(strainlabel[i])
ax.set_title(' Ply Strain '+strainlabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(epsilon[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(epsilon_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(epsilon_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
for i,ax in enumerate([ax4,ax5,ax6]):
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
ax.set_title(' Ply Stress '+stresslabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2))
ax.plot(sigma[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(sigma_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(sigma_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
### plots strengths
#ax.plot(strengthplot[i][0],strengthplot[i][1], color='yellow', lw=mylw)
ax.grid(True)
leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(windowwidth+50,50,windowwidth,windowheight)
except:
pass
f2.show()
#plt.savefig('local-stresses-strains.png')
### Failure
f3, ((ax1,ax2,ax3)) = plt.subplots(1,3, sharex=True, sharey=True)
f3.canvas.set_window_title('Failure Index(failure if > 1), %s laminate' % (plyangle))
stresslabel = ['$\sigma_1/F_1$','$\sigma_2/F_2$','$\\tau_{12}/F_{12}$']
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
#ax.set_title(' Ply Strain at $\epsilon=%f$' % (epsxapp*100))
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(FAILUREINDEX_MAXSTRESS[i,:], zplot, color='blue', lw=mylw, label='total')
ax.grid(True)
ax.set_title('Failure Index, fail if > 1')
#leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(25,windowheight+100,windowwidth,windowheight)
except:
pass
f2.show()
#plt.savefig('local-stresses-strains.png')
### warpage
res = 100
Xplt,Yplt = np.meshgrid(np.linspace(-a_width/2,a_width/2,res), np.linspace(-b_length/2,b_length/2,res))
epsx = epsilon_laminate[0,0]
epsy = epsilon_laminate[1,0]
epsxy = epsilon_laminate[2,0]
kapx = epsilon_laminate[3,0]
kapy = epsilon_laminate[4,0]
kapxy = epsilon_laminate[5,0]
### dispalcement
w = -0.5*(kapx*Xplt**2 + kapy*Yplt**2 + kapxy*Xplt*Yplt)
u = epsx*Xplt # pg 451 hyer
fig = plt.figure('plate-warpage')
ax = fig.gca(projection='3d')
ax.plot_surface(Xplt, Yplt, w+zmid[0], cmap=mpl.cm.jet, alpha=0.3)
###ax.auto_scale_xyz([-(a_width/2)*1.1, (a_width/2)*1.1], [(b_length/2)*1.1, (b_length/2)*1.1], [-1e10, 1e10])
ax.set_xlabel('plate width,y-direction,in')
ax.set_ylabel('plate length,x-direction, in')
ax.set_zlabel('warpage,in')
#ax.set_zlim(-0.01, 0.04)
#mngr = plt.get_current_fig_manager() ; mngr.window.setGeometry(450,550,600, 450)
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(windowwidth+50,windowheight+100,windowwidth,windowheight)
except:
pass
plt.show()
#plt.savefig('plate-warpage')
return MARGINSAFETY_MAXSTRESS_min, FAILUREINDEX_MAXSTRESS_max
def plate():
'''
composite plate mechanics
TODO - results need vetted
'''
#==========================================================================
# Initialize
#==========================================================================
get_ipython().magic('matplotlib')
plt.close('all')
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
#plt.rcParams['legend.fontsize'] = 14
#==========================================================================
# Import Material Properties
#==========================================================================
plythk = 0.0025
plyangle = array([0,90,-45,45,0]) * np.pi/180 # angle for each ply
nply = len(plyangle) # number of plies
laminatethk = np.zeros(nply) + plythk
H = sum(laminatethk) # plate thickness
# Create z dimensions of laminate
z_ = np.linspace(-H/2, H/2, nply+1)
a = 20 # plate width;
b = 10 # plate height
q0_ = 5.7 # plate load;
# Transversly isotropic material properties
E1 = 150e9
E2 = 12.1e9
nu12 = 0.248
G12 = 4.4e9
nu23 = 0.458
G23 = E2 / (2*(1+nu23))
# Failure Strengths
F1t = 1500e6
F1c = -1250e6
F2t = 50e6
F2c = -200e6
F12t = 100e6
F12c = -100e6
Strength = np.array([[F1t, F1c],
[F2t, F2c],
[F12t, F12c]])
th = sp.symbols('th')
# Stiffnes matrix in material coordinates
Cijm6 = inv(Sij6)
# reduced stiffness in structural
Cij = sp.Matrix([[Cij6[0,0], Cij6[0,1], 0],
[Cij6[0,1], Cij6[1,1], 0],
[0, 0, Cij6[5,5] ]] )
Tij = sp.Matrix([[cos(th)**2, sin(th)**2, 2*sin(th)*cos(th)],
[sin(th)**2, cos(th)**2, -2*sin(th)*cos(th)],
[-cos(th)*sin(th), sin(th)*cos(th), (cos(th)**2-sin(th)**2)]])
## Cylindrical Bending of a laminated plate
# displacement in w (z direction)
from sympy.abc import x
f = Function('f')
eq = dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x), hint = '1st_homogeneous_coeff_best', simplify=False)
pprint(eq)
#==============================================================================
th,x,y,z,q0,C1,C2,C3,C4,C5,C6,C7,A11,B11,D11,A16,B16 = symbols('th x y z q0 C1 C2 C3 C4 C5 C6 C7 A11 B11 D11 A16 B16')
wfun = Function('wfun')
ufun = Function('ufun')
## EQ 4.4.1a
eq1 = A11*ufun(x).diff(x,2) - B11*wfun(x).diff(x,3)
#eq1 = A11*diff(ufun,x,2) - B11*diff(wfun,x,3); # C5 C1
## EQ 4.4.1b
#eq2 = A16*diff(ufun,x,2) - B16*diff(wfun,x,3); # C5 C1
eq2 = A16*ufun(x).diff(x,2) - B16*wfun(x).diff(x,3)
## EQ 4.4.1c
#eq3 = B11*diff(ufun,x,3) - D11*diff(wfun,x,4) + q0;
eq3 = B11*ufun(x).diff(x,3) - D11*wfun(x).diff(x,4) + q0
################## python conversion eded here ################################
# solve eq1 eq2 and eq3 to get the w and u functions
# displacement in w (z direction) from eq1,eq2,eq3
wfun = A11*q0*x**4 / (4*(6*B11**2-6*A11*D11)) + C1 + C2*x + C3*x**2 + C4*x**3 # C1 C2 C3 C4
# displacement in u (x direction) from eq1,eq2,eq3
ufun = B11*q0*x**3 / (6*(B11**2-A11*D11)) + C7 + x*C6 + 3*B11*x**2*C5/A11 # C5 C6 C7
# Cij6.evalf(subs={th:plyangle[i]}) * (z_[i+1]**3-z_[i]**3)
# cond1 -> w(0)=0 at x(0), roller
C1sol = sp.solve(wfun.subs(x,0), C1)[0] # = 0
# cond2 -> angle at dw/dx at x(0) is 0, cantilever
C2sol = sp.solve(wfun.diff(x).subs(x,0),C2)[0] # = 0
# cond3 -> w(z) = 0 at x(a), roller
C4sol1 = sp.solve(wfun.subs({x:a,C1:C1sol,C2:C2sol}),C4)[0] # C3
# cond4 u = 0 at x = 0
C7sol = sp.solve(ufun.subs(x,0),C7)[0] #=0
# u=0 at x = a
C5sol1 = sp.solve(ufun.subs({x:a, C7:C7sol}),C5)[0] #C6
# cond 5 EQ 4.4.14a Myy = 0 @ x(a) (Mxx , B11 D11) (Myy, B12 D12) roller no moment
C6sol1 = sp.solve( ( ((B11*ufun.diff(x)+0.5*wfun.diff(x)**2 ) - D11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol})), C6)[0] # C6 C3
# EQ 4.4.13a, Nxx = 0 @ x(0) roller has no Nxx
C6sol2 = sp.solve( ((A11* ufun.diff(x) + 0.5*wfun.diff(x)**2)-B11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol}),C6)[0] # C6 C3
C3sol = sp.solve(C6sol1 - C6sol2,C3)[0]
C4sol = C4sol1.subs(C3,C3sol)
C6sol = sp.simplify(C6sol2.subs(C3,C3sol))
C5sol = sp.simplify(C5sol1.subs(C6,C6sol))
# substitute integration constants with actual values( _ is actual number)
C1_ = copy(C1sol)
C2_ = copy(C2sol)
C7_ = copy(C7sol)
C3_ = C3sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C4_ = C4sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C5_ = C5sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C6_ = C6sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# function w(x) vertical displacement w along z with actual vaules
wsol = wfun.subs({q0:q0_, C1:C1_, C2:C2_, C3:C3_, C4:C4_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# function u(x) horizontal displacement u along x with actual vaules
usol = ufun.subs({q0:q0_, C5:C5_, C6:C6_, C7:C7_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# 3d plots
plot3d(wsol,(x,0,a), (y,0,b))
plt.xlabel('x')
plt.ylabel('y')
plt.title('Cylindrical Bending -Displacement of a plate With CLPT')
## Strain calculation
# eq 3.3.8 (pg 116 reddy (pdf = 138))
epstotal = array([[usol.diff(x) + 0.5* wsol.diff(x)**5 - z*wsol.diff(x,2)],[0],[0]])
epsx = epstotal[0,0]
## Calculating and plotting Stress in each layer
res = 8 # accuracy of finding max and min stress
xplot = linspace(0,a,res)
yplot = linspace(0,b,res)
G0 = sp.symbols('G0')
Globalminstress = np.zeros((3, nply))
Globalmaxstress = np.zeros((3, nply))
for kstress in range(3): # stress state s_x, s_y, s_xz
plt.figure(kstress+1)
for klay in range(nply): # loop through all layers
thplot = plyangle[klay]
zplot = linspace(z_[klay],z_[klay+1],res)
stressplot = np.zeros((len(zplot),len(xplot)))
## Calc Stresses
if kstress == 2:
# Shear stresses
G0_ = -sp.integrate(s_stress[0].diff(x),z)+G0
# solve for shear stresses from s_1
s_xz = sp.solve(G0_,G0)[0]
# out of plane shear S_xz does not need to be transformed ??
plot3d(s_xz, (x,0, a), (z, z_[klay], z_[klay+1]) )
else:
# normal stresses
# Cij = reduced structural stiffness in strictural coordinates 3x3
# stress in structural coordinates
s_stress = Cij.subs(th,thplot) @ epstotal
# stressin material coordinates
m_stress = Tij.subs(th,thplot) @ s_stress
#ezsurf(m_stress(kstress),[0,a,z_(klay),z_(klay+1)])
## find max stress in each layer
ii=0
for i in xplot:
jj=0
for j in zplot:
if kstress == 2:
stressplot[ii,jj] = s_xz.subs({x:i, z:j})
else:
stressplot[ii,jj] = m_stress[kstress].subs({x:i, z:j})
jj+=jj
ii+=ii
Globalminstress[kstress,klay] = np.min(stressplot)
Globalmaxstress[kstress,klay] = np.max(stressplot)
#
plt.title('\sigma_%i' % kstress)
## Plot max stress and failure strength
plt.figure()
for i in range(3):
plt.subplot(1, 3, i+1)
plt.bar(range(nply), Globalmaxstress[i,:])
plt.bar(range(nply), Globalminstress[i,:])
plt.scatter(range(nply),np.ones(nply) * Strength[i,0])
plt.scatter(range(nply),np.ones(nply) * Strength[i,1])
plt.xlabel('layer')
plt.title('\sigma%i' % i)
def plate_navier():
'''
composite plate bending with navier solution
TODO - code needs to be converted from matlab
'''
## Plate a*b*h simply supported under q = q0 CLPT
pass
'''
q0,a,b,m,n,x,y = sp.symbols('q0 a b m n x y')
Qmn = 4/(a*b)*sp.integrate( sp.integrate( q0*sp.sin(m*pi*x/a)*sp.sin(n*pi*y/b),(x,0,a)) ,(y,0,b))
dmn = pi**4 / b**4 * (DTij(1,1)*m**4*(b/a)**4 + 2* (DTij(1,2)+2*DTij(6,6)) *m**2*n**2*(b/a)**2 + DTij(2,2)*n**4)
Wmn = Qmn/dmn;
w0 = Wmn * sin(m*pi*x/a) * sin(n*pi*y/b);
w0_ = subs(w0,[q0 a b],[-q0_ a_ b_] );
figure
w0sum = 0;
for n_ = 1:10
for m_ = 1:10
w0sum = w0sum + subs(w0_,[n m],[n_ m_]);
end
end
w0sum;
% xplot = linspace(0,a_,res);
% yplot = linspace(0,b_,res);
ii=1;
for i = xplot
jj=1;
for j = yplot
w0plot(ii,jj) = subs(w0sum,[x y],[i j]);
jj=jj+1;
end
ii=ii+1;
end
surf(xplot,yplot,w0plot)
colorbar
set(gca,'PlotBoxAspectRatio',[2 1 1]);
xlabel('length a, u(x)')
ylabel('length b, v(y)')
zlabel('w(z)')
'''
class laminate(object):
"""
IN-WORK - laminate object for composite material analysis
"""
# constructor
def __init__(self, plyangle, matindex, matname):
# run when laminate is instantiated
# loads materials used
self.plyangle = plyangle
self.matindex = matindex
self.matname = matname
self.__mat = self.__import_matprops(matname)
# create a simple function to handle CTE properties
def __alphaf(self, mat):
return array([[mat.alpha1], [mat.alpha2], [0]])
self.laminatethk = array([self.__mat[matname[i]].plythk for i in matindex ])
self.nply = len(self.laminatethk) # number of plies
self.H = np.sum(self.laminatethk) # plate thickness
# area = a_width*H
z = zeros(self.nply+1)
zmid = zeros(self.nply)
z[0] = -self.H/2
for i in range(self.nply):
z[i+1] = z[i] + self.laminatethk[i]
zmid[i] = z[i] + self.laminatethk[i]/2
self.z = z
self.zmid = zmid
self.__abdmatrix()
def __Qf(self, E1,E2,nu12,G12):
'''transversly isptropic compliance matrix. pg 58 herakovich
G12 = E1/(2*(1+nu12)) if isotropic'''
nu21 = E2*nu12/E1
Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0],
[ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0],
[0, 0, G12]])
return Q
def __T1(self, th):
'''Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def __T2(self, th):
'''Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
# private method
def __abdmatrix(self):
'''used within the object but not accessible outside'''
#==========================================================================
# ABD Matrix Compute
#==========================================================================
# Reduced stiffness matrix for a plane stress ply in principal coordinates
# calcluating Q from the Compliance matrix may cause cancE1ation errors
A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3))
for i in range(self.nply): # = nply
Q = self.__Qf(self.__mat[self.matname[self.matindex[i]]].E1,
self.__mat[self.matname[self.matindex[i]]].E2,
self.__mat[self.matname[self.matindex[i]]].nu12,
self.__mat[self.matname[self.matindex[i]]].G12 )
Qbar = inv(self.__T1(self.plyangle[i])) @ Q @ self.__T2(self.plyangle[i]) # solve(T1(plyangle[i]), Q) @ T2(plyangle[i])
A += Qbar*(self.z[i+1]-self.z[i])
# coupling stiffness
B += (1/2)*Qbar*(self.z[i+1]**2-self.z[i]**2)
# bending or flexural laminate stiffness relating moments to curvatures
D += (1/3)*Qbar*(self.z[i+1]**3-self.z[i]**3)
# laminate stiffness matrix
ABD = zeros((6,6))
ABD[0:3,0:3] = A
ABD[0:3,3:6] = B
ABD[3:6,0:3] = B
ABD[3:6,3:6] = D
self.ABD = ABD
# method
def available_materials(self):
'''show the materials available in the library'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
print('---available materials---')
for k in matprops.columns.tolist():
print(k)
print('-------------------------')
# private method to be used internally
def __import_matprops(self, mymaterial=['T300_5208','AL_7075']):
'''
import material properties
'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
if mymaterial==[] or mymaterial=='':
print(matprops.columns.tolist())
mat = matprops[mymaterial]
#mat.applymap(lambda x:np.float(x))
mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore'))
return mat
def failure_envelope_laminate(Nx,Ny,Nxy,Mx,My,Mxy,q0,mymat,layup):
'''
find the miniumu margin give load conditions
'''
# create a 45 carbon cloth panel with a 0.5 inch rohacell core
_, FAILUREINDEX_MAXSTRESS_max = laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy],
ek=[0,0,0,0,0,0],
q0=q0,
plyangle= layup,
plymatindex=[0,0,0,0],
materials = [mymat],
platedim=[10,10],
zoffset=0,
SF=1.0,
plots=0,
prints=0)
return FAILUREINDEX_MAXSTRESS_max
def plot_single_max_failure_loads(mymat='E-Glass Epoxy fabric M10E-3783', mylayup=[0,45,45,0] ):
'''
loops through and tries to find a load that is close to 0 and then
attempts to find the root (ie margin=0)
older version used newton method for root finding
scipy.optimize.newton(laminate_min, guess)
TODO: Current calculation is stupid using random points to plot. fix it
by use FI, failure index instead of margin to generate a
linear relationship and envelope
'''
#laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0)
loadnamelist = ['Nx','Ny','Nxy','Mx','My','Mxy','q0']
laminate_min_list = []
laminate_min_list.append(lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,N,0,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,N,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,N,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,N,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,0,N,mymat,mylayup))
envelope_loads = []
N_t = array([0,1])
N_c = array([0,-1])
for loadname,laminate_min in zip(loadnamelist,laminate_min_list):
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
N_crit_t = (1-b) / m
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
N_crit_c = (1-b) / m
envelope_loads.append('{} = {:.1f} , {:.1f}'.format(loadname,N_crit_t, N_crit_c))
print('------------- enveloped loads for {} {} -----------------'.format(mylayup, mymat))
for k in envelope_loads:
print(k)
# plot envelope
Nx_env = []
Nxy_env = []
laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup)
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
Nx_env.append( (1-b) / m )
Nxy_env.append( 0 )
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
Nx_env.append( (1-b) / m )
Nxy_env.append( 0 )
laminate_min = lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup)
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
Nxy_env.append( (1-b) / m )
Nx_env.append( 0 )
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
Nxy_env.append( (1-b) / m )
Nx_env.append( 0 )
laminate_min_Nx_Nxy_func = lambda Nx,Nxy: failure_envelope_laminate(Nx,0,Nxy,0,0,0,0,mymat,mylayup)
n = 500
f = 1.25 # < 1
# arr1 = np.random.randint(Nx_env[0]-abs(Nx_env[0]*f),Nx_env[0]+abs(Nx_env[0])*f,n)
# arr2 = np.random.randint(Nx_env[1]-abs(Nx_env[1]*f),Nx_env[1]+abs(Nx_env[1])*f,n)
# Nx_r = np.concatenate((arr1, arr2))
#
# arr1 = np.random.randint(Nxy_env[2]-abs(Nxy_env[2])*f,Nxy_env[2]+abs(Nxy_env[2])*f,n)
# arr2 = np.random.randint(Nxy_env[3]-abs(Nxy_env[3])*f,Nxy_env[3]+abs(Nxy_env[3])*f,n)
# Nxy_r = np.concatenate((arr1, arr2))
Nx_r = np.random.randint(Nx_env[0]*f,Nx_env[1]*f, n)
Nxy_r = np.random.randint(Nxy_env[2]*f,Nxy_env[3]*f, n)
for Nx_ri, Nxy_ri in zip(Nx_r, Nxy_r):
FI = laminate_min_Nx_Nxy_func(Nx_ri, Nxy_ri)
if FI < 1:
Nx_env.append(Nx_ri)
Nxy_env.append(Nxy_ri)
points = array([ [x,xy] for x,xy in zip(Nx_env, Nxy_env)])
hull = scipy.spatial.ConvexHull(points)
plot(points[:,0], points[:,1], 'bo')
for simplex in hull.simplices:
plot(points[simplex, 0], points[simplex, 1], 'k-')
xlabel('Nx, lb/in')
ylabel('Nxy, lb/in')
title('Failure envelope')
return envelope_loads
def my_laminate_with_loading():
# loads lbs/in
Nx = 50
Ny = 0
Nxy = 0
Mx = 0
My = 0
Mxy = 0
q0 = 0 # pressure
# Qx = 0
# Qy = 0
a_width = 50
b_length = 3.14*6.75
## sandwich laminate
# plyangle= [45,45,0, 45,45],
# plymatindex=[0, 0, 1, 0, 0],
# create a 45 carbon cloth panel with a 0.5 inch rohacell core
laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy],
ek=[0,0,0,0,0,0],
q0=q0,
plyangle= [0,60,-60,-60,60,0],
plymatindex=[0,0,0,0,0,0],
materials = ['E-Glass Epoxy Uni'],
platedim=[a_width,b_length],
zoffset=0,
SF=2.0,
plots=0,
prints=1)
if __name__=='__main__':
#plot_single_max_failure_loads()
#plot_failure_index()
my_laminate_with_loading()
#material_plots(['E-Glass Epoxy fabric M10E-3783'])
#plate()
#plot_Nx_Nxy_failure_envelope(['Carbon_cloth_AGP3705H'])
#plot_single_max_failure_loads()
# # reload modules
# import importlib ; importlib.reload
# from composites import laminate
# plyangle = [0,45]
# matindex = [0,0]
# matname = ['graphite-polymer_SI']
# lam1 = laminate(plyangle, matindex, matname)
# lam1.ABD
| mit | 4,286,907,931,120,713,000 | 37.298794 | 238 | 0.518464 | false |
StoneyJackson/unzipR | unzipr.py | 1 | 4368 | # unzipR - A library for recursively extracting files.
# Copyright (C) 2014 Stoney Jackson <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Unzipr determines a files compression algorithm based on the file's
extension. Zip files are extracted into the a directory by the same name as
the zip file minus its extension. So foo.zip is extracted in to a directory
named foo.
New formats can be supports via Unzipr.registerUnzipFormat().
See installRarSupport() at the end of this file for an example.
'''
import pathlib
import shutil
import logging
logger = logging.getLogger(__name__)
def deleteZipFilesFromDirectoryRecursively(directory):
directory = pathlib.Path(directory)
for a_file in directory.iterdir():
if isZipFile(a_file):
a_file.unlink()
elif a_file.is_dir():
deleteZipFilesFromDirectoryRecursively(a_file)
def unzipFileRecursively(zipfile, toDir=None):
'''
If toDir is None, zipfile is extracted to a directory whose name is the same
as the zipfile's name minus its extensions.
'''
zipfile = pathlib.Path(zipfile)
toDir = unzipFile(zipfile, toDir)
unzipFilesInDirectoryRecursively(toDir)
return toDir
def unzipFilesInDirectoryRecursively(directory):
directory = pathlib.Path(directory)
for a_file in directory.iterdir():
logger.debug("processing " + str(a_file))
if isZipFile(a_file):
logger.debug("unzipping " + str(a_file))
unzipFileRecursively(a_file)
elif a_file.is_dir():
logger.debug("recursing " + str(a_file))
unzipFilesInDirectoryRecursively(a_file)
def unzipFile(zipfile, toDir=None):
'''
If toDir is None, zipfile is extracted to a directory whose name is the same
as the zipfile's name minus its extensions.
'''
zipfile = pathlib.Path(zipfile)
if toDir:
toDir = pathlib.Path(toDir)
else:
toDir = zipfile.parent / getFileNameWithoutExtension(zipfile)
shutil.unpack_archive(str(zipfile), str(toDir))
return toDir
def getFileNameWithoutExtension(theFile):
theFile = pathlib.Path(theFile)
extension = getFileExtension(theFile)
return theFile.name[:-len(extension)]
def isZipFile(zipfile):
zipfile = pathlib.Path(zipfile)
isZipFile = zipfile.is_file() and fileHasSupportedExtension(zipfile)
return isZipFile
def fileHasSupportedExtension(zipfile):
zipfile = pathlib.Path(zipfile)
extension = getFileExtension(zipfile)
return isSupportedExtension(extension)
def getFileExtension(theFile):
if len(theFile.suffixes) >= 2:
lastTwoSuffixes = ''.join(theFile.suffixes[-2:])
if lastTwoSuffixes == '.tar.gz':
return lastTwoSuffixes
else:
return theFile.suffix
def isSupportedExtension(extension):
return extension in getSupportedExtensions()
def getSupportedExtensions():
supported_extensions = []
for format_ in shutil.get_unpack_formats():
supported_extensions += format_[1]
return supported_extensions
def registerUnzipFormat(name, extensions, function):
shutil.register_unpack_format(name, extensions, function)
def installRarSupport():
try:
import rarfile
def unrar(zipFile, toDir):
with rarfile.RarFile(zipFile) as rf:
rf.extractall(path=toDir)
registerUnzipFormat('rar', ['.rar'], unrar)
except ImportError:
pass
def install7zipSupport():
if shutil.which('7z'):
import subprocess
def un7zip(zipFile, toDir):
subprocess.call(['7z', 'x', str(zipFile), '-o' + str(toDir)])
registerUnzipFormat('7zip', ['.7z'], un7zip)
installRarSupport()
install7zipSupport()
| gpl-3.0 | 7,436,011,763,455,285,000 | 31.117647 | 80 | 0.695971 | false |
postlund/home-assistant | tests/components/sonarr/test_sensor.py | 1 | 30602 | """The tests for the Sonarr platform."""
from datetime import datetime
import time
import unittest
import pytest
import homeassistant.components.sonarr.sensor as sonarr
from homeassistant.const import DATA_GIGABYTES
from tests.common import get_test_home_assistant
def mocked_exception(*args, **kwargs):
"""Mock exception thrown by requests.get."""
raise OSError
def mocked_requests_get(*args, **kwargs):
"""Mock requests.get invocations."""
class MockResponse:
"""Class to represent a mocked response."""
def __init__(self, json_data, status_code):
"""Initialize the mock response class."""
self.json_data = json_data
self.status_code = status_code
def json(self):
"""Return the json of the response."""
return self.json_data
today = datetime.date(datetime.fromtimestamp(time.time()))
url = str(args[0])
if "api/calendar" in url:
return MockResponse(
[
{
"seriesId": 3,
"episodeFileId": 0,
"seasonNumber": 4,
"episodeNumber": 11,
"title": "Easy Com-mercial, Easy Go-mercial",
"airDate": str(today),
"airDateUtc": "2014-01-27T01:30:00Z",
"overview": "To compete with fellow “restaurateur,” Ji...",
"hasFile": "false",
"monitored": "true",
"sceneEpisodeNumber": 0,
"sceneSeasonNumber": 0,
"tvDbEpisodeId": 0,
"series": {
"tvdbId": 194031,
"tvRageId": 24607,
"imdbId": "tt1561755",
"title": "Bob's Burgers",
"cleanTitle": "bobsburgers",
"status": "continuing",
"overview": "Bob's Burgers follows a third-generation ...",
"airTime": "5:30pm",
"monitored": "true",
"qualityProfileId": 1,
"seasonFolder": "true",
"lastInfoSync": "2014-01-26T19:25:55.4555946Z",
"runtime": 30,
"images": [
{
"coverType": "banner",
"url": "http://slurm.trakt.us/images/bann.jpg",
},
{
"coverType": "poster",
"url": "http://slurm.trakt.us/images/poster00.jpg",
},
{
"coverType": "fanart",
"url": "http://slurm.trakt.us/images/fan6.jpg",
},
],
"seriesType": "standard",
"network": "FOX",
"useSceneNumbering": "false",
"titleSlug": "bobs-burgers",
"path": "T:\\Bob's Burgers",
"year": 0,
"firstAired": "2011-01-10T01:30:00Z",
"qualityProfile": {
"value": {
"name": "SD",
"allowed": [
{"id": 1, "name": "SDTV", "weight": 1},
{"id": 8, "name": "WEBDL-480p", "weight": 2},
{"id": 2, "name": "DVD", "weight": 3},
],
"cutoff": {"id": 1, "name": "SDTV", "weight": 1},
"id": 1,
},
"isLoaded": "true",
},
"seasons": [
{"seasonNumber": 4, "monitored": "true"},
{"seasonNumber": 3, "monitored": "true"},
{"seasonNumber": 2, "monitored": "true"},
{"seasonNumber": 1, "monitored": "true"},
{"seasonNumber": 0, "monitored": "false"},
],
"id": 66,
},
"downloading": "false",
"id": 14402,
}
],
200,
)
if "api/command" in url:
return MockResponse(
[
{
"name": "RescanSeries",
"startedOn": "0001-01-01T00:00:00Z",
"stateChangeTime": "2014-02-05T05:09:09.2366139Z",
"sendUpdatesToClient": "true",
"state": "pending",
"id": 24,
}
],
200,
)
if "api/wanted/missing" in url or "totalRecords" in url:
return MockResponse(
{
"page": 1,
"pageSize": 15,
"sortKey": "airDateUtc",
"sortDirection": "descending",
"totalRecords": 1,
"records": [
{
"seriesId": 1,
"episodeFileId": 0,
"seasonNumber": 5,
"episodeNumber": 4,
"title": "Archer Vice: House Call",
"airDate": "2014-02-03",
"airDateUtc": "2014-02-04T03:00:00Z",
"overview": "Archer has to stage an that ... ",
"hasFile": "false",
"monitored": "true",
"sceneEpisodeNumber": 0,
"sceneSeasonNumber": 0,
"tvDbEpisodeId": 0,
"absoluteEpisodeNumber": 50,
"series": {
"tvdbId": 110381,
"tvRageId": 23354,
"imdbId": "tt1486217",
"title": "Archer (2009)",
"cleanTitle": "archer2009",
"status": "continuing",
"overview": "At ISIS, an international spy ...",
"airTime": "7:00pm",
"monitored": "true",
"qualityProfileId": 1,
"seasonFolder": "true",
"lastInfoSync": "2014-02-05T04:39:28.550495Z",
"runtime": 30,
"images": [
{
"coverType": "banner",
"url": "http://slurm.trakt.us//57.12.jpg",
},
{
"coverType": "poster",
"url": "http://slurm.trakt.u/57.12-300.jpg",
},
{
"coverType": "fanart",
"url": "http://slurm.trakt.us/image.12.jpg",
},
],
"seriesType": "standard",
"network": "FX",
"useSceneNumbering": "false",
"titleSlug": "archer-2009",
"path": "E:\\Test\\TV\\Archer (2009)",
"year": 2009,
"firstAired": "2009-09-18T02:00:00Z",
"qualityProfile": {
"value": {
"name": "SD",
"cutoff": {"id": 1, "name": "SDTV"},
"items": [
{
"quality": {"id": 1, "name": "SDTV"},
"allowed": "true",
},
{
"quality": {"id": 8, "name": "WEBDL-480p"},
"allowed": "true",
},
{
"quality": {"id": 2, "name": "DVD"},
"allowed": "true",
},
{
"quality": {"id": 4, "name": "HDTV-720p"},
"allowed": "false",
},
{
"quality": {"id": 9, "name": "HDTV-1080p"},
"allowed": "false",
},
{
"quality": {"id": 10, "name": "Raw-HD"},
"allowed": "false",
},
{
"quality": {"id": 5, "name": "WEBDL-720p"},
"allowed": "false",
},
{
"quality": {"id": 6, "name": "Bluray-720p"},
"allowed": "false",
},
{
"quality": {"id": 3, "name": "WEBDL-1080p"},
"allowed": "false",
},
{
"quality": {
"id": 7,
"name": "Bluray-1080p",
},
"allowed": "false",
},
],
"id": 1,
},
"isLoaded": "true",
},
"seasons": [
{"seasonNumber": 5, "monitored": "true"},
{"seasonNumber": 4, "monitored": "true"},
{"seasonNumber": 3, "monitored": "true"},
{"seasonNumber": 2, "monitored": "true"},
{"seasonNumber": 1, "monitored": "true"},
{"seasonNumber": 0, "monitored": "false"},
],
"id": 1,
},
"downloading": "false",
"id": 55,
}
],
},
200,
)
if "api/queue" in url:
return MockResponse(
[
{
"series": {
"title": "Game of Thrones",
"sortTitle": "game thrones",
"seasonCount": 6,
"status": "continuing",
"overview": "Seven noble families fight for land ...",
"network": "HBO",
"airTime": "21:00",
"images": [
{
"coverType": "fanart",
"url": "http://thetvdb.com/banners/fanart/-83.jpg",
},
{
"coverType": "banner",
"url": "http://thetvdb.com/banners/-g19.jpg",
},
{
"coverType": "poster",
"url": "http://thetvdb.com/banners/posters-34.jpg",
},
],
"seasons": [
{"seasonNumber": 0, "monitored": "false"},
{"seasonNumber": 1, "monitored": "false"},
{"seasonNumber": 2, "monitored": "true"},
{"seasonNumber": 3, "monitored": "false"},
{"seasonNumber": 4, "monitored": "false"},
{"seasonNumber": 5, "monitored": "true"},
{"seasonNumber": 6, "monitored": "true"},
],
"year": 2011,
"path": "/Volumes/Media/Shows/Game of Thrones",
"profileId": 5,
"seasonFolder": "true",
"monitored": "true",
"useSceneNumbering": "false",
"runtime": 60,
"tvdbId": 121361,
"tvRageId": 24493,
"tvMazeId": 82,
"firstAired": "2011-04-16T23:00:00Z",
"lastInfoSync": "2016-02-05T16:40:11.614176Z",
"seriesType": "standard",
"cleanTitle": "gamethrones",
"imdbId": "tt0944947",
"titleSlug": "game-of-thrones",
"certification": "TV-MA",
"genres": ["Adventure", "Drama", "Fantasy"],
"tags": [],
"added": "2015-12-28T13:44:24.204583Z",
"ratings": {"votes": 1128, "value": 9.4},
"qualityProfileId": 5,
"id": 17,
},
"episode": {
"seriesId": 17,
"episodeFileId": 0,
"seasonNumber": 3,
"episodeNumber": 8,
"title": "Second Sons",
"airDate": "2013-05-19",
"airDateUtc": "2013-05-20T01:00:00Z",
"overview": "King’s Landing hosts a wedding, and ...",
"hasFile": "false",
"monitored": "false",
"absoluteEpisodeNumber": 28,
"unverifiedSceneNumbering": "false",
"id": 889,
},
"quality": {
"quality": {"id": 7, "name": "Bluray-1080p"},
"revision": {"version": 1, "real": 0},
},
"size": 4472186820,
"title": "Game.of.Thrones.S03E08.Second.Sons.2013.1080p.",
"sizeleft": 0,
"timeleft": "00:00:00",
"estimatedCompletionTime": "2016-02-05T22:46:52.440104Z",
"status": "Downloading",
"trackedDownloadStatus": "Ok",
"statusMessages": [],
"downloadId": "SABnzbd_nzo_Mq2f_b",
"protocol": "usenet",
"id": 1503378561,
}
],
200,
)
if "api/series" in url:
return MockResponse(
[
{
"title": "Marvel's Daredevil",
"alternateTitles": [{"title": "Daredevil", "seasonNumber": -1}],
"sortTitle": "marvels daredevil",
"seasonCount": 2,
"totalEpisodeCount": 26,
"episodeCount": 26,
"episodeFileCount": 26,
"sizeOnDisk": 79282273693,
"status": "continuing",
"overview": "Matt Murdock was blinded in a tragic accident...",
"previousAiring": "2016-03-18T04:01:00Z",
"network": "Netflix",
"airTime": "00:01",
"images": [
{
"coverType": "fanart",
"url": "/sonarr/MediaCover/7/fanart.jpg?lastWrite=",
},
{
"coverType": "banner",
"url": "/sonarr/MediaCover/7/banner.jpg?lastWrite=",
},
{
"coverType": "poster",
"url": "/sonarr/MediaCover/7/poster.jpg?lastWrite=",
},
],
"seasons": [
{
"seasonNumber": 1,
"monitored": "false",
"statistics": {
"previousAiring": "2015-04-10T04:01:00Z",
"episodeFileCount": 13,
"episodeCount": 13,
"totalEpisodeCount": 13,
"sizeOnDisk": 22738179333,
"percentOfEpisodes": 100,
},
},
{
"seasonNumber": 2,
"monitored": "false",
"statistics": {
"previousAiring": "2016-03-18T04:01:00Z",
"episodeFileCount": 13,
"episodeCount": 13,
"totalEpisodeCount": 13,
"sizeOnDisk": 56544094360,
"percentOfEpisodes": 100,
},
},
],
"year": 2015,
"path": "F:\\TV_Shows\\Marvels Daredevil",
"profileId": 6,
"seasonFolder": "true",
"monitored": "true",
"useSceneNumbering": "false",
"runtime": 55,
"tvdbId": 281662,
"tvRageId": 38796,
"tvMazeId": 1369,
"firstAired": "2015-04-10T04:00:00Z",
"lastInfoSync": "2016-09-09T09:02:49.4402575Z",
"seriesType": "standard",
"cleanTitle": "marvelsdaredevil",
"imdbId": "tt3322312",
"titleSlug": "marvels-daredevil",
"certification": "TV-MA",
"genres": ["Action", "Crime", "Drama"],
"tags": [],
"added": "2015-05-15T00:20:32.7892744Z",
"ratings": {"votes": 461, "value": 8.9},
"qualityProfileId": 6,
"id": 7,
}
],
200,
)
if "api/diskspace" in url:
return MockResponse(
[
{
"path": "/data",
"label": "",
"freeSpace": 282500067328,
"totalSpace": 499738734592,
}
],
200,
)
if "api/system/status" in url:
return MockResponse(
{
"version": "2.0.0.1121",
"buildTime": "2014-02-08T20:49:36.5560392Z",
"isDebug": "false",
"isProduction": "true",
"isAdmin": "true",
"isUserInteractive": "false",
"startupPath": "C:\\ProgramData\\NzbDrone\\bin",
"appData": "C:\\ProgramData\\NzbDrone",
"osVersion": "6.2.9200.0",
"isMono": "false",
"isLinux": "false",
"isWindows": "true",
"branch": "develop",
"authentication": "false",
"startOfWeek": 0,
"urlBase": "",
},
200,
)
return MockResponse({"error": "Unauthorized"}, 401)
class TestSonarrSetup(unittest.TestCase):
"""Test the Sonarr platform."""
# pylint: disable=invalid-name
DEVICES = []
def add_entities(self, devices, update):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.DEVICES = []
self.hass = get_test_home_assistant()
self.hass.config.time_zone = "America/Los_Angeles"
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@unittest.mock.patch("requests.get", side_effect=mocked_requests_get)
def test_diskspace_no_paths(self, req_mock):
"""Test getting all disk space."""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": [],
"monitored_conditions": ["diskspace"],
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert "263.10" == device.state
assert "mdi:harddisk" == device.icon
assert DATA_GIGABYTES == device.unit_of_measurement
assert "Sonarr Disk Space" == device.name
assert "263.10/465.42GB (56.53%)" == device.device_state_attributes["/data"]
@unittest.mock.patch("requests.get", side_effect=mocked_requests_get)
def test_diskspace_paths(self, req_mock):
"""Test getting diskspace for included paths."""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["diskspace"],
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert "263.10" == device.state
assert "mdi:harddisk" == device.icon
assert DATA_GIGABYTES == device.unit_of_measurement
assert "Sonarr Disk Space" == device.name
assert "263.10/465.42GB (56.53%)" == device.device_state_attributes["/data"]
@unittest.mock.patch("requests.get", side_effect=mocked_requests_get)
def test_commands(self, req_mock):
"""Test getting running commands."""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["commands"],
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert "mdi:code-braces" == device.icon
assert "Commands" == device.unit_of_measurement
assert "Sonarr Commands" == device.name
assert "pending" == device.device_state_attributes["RescanSeries"]
@unittest.mock.patch("requests.get", side_effect=mocked_requests_get)
def test_queue(self, req_mock):
"""Test getting downloads in the queue."""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["queue"],
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert "mdi:download" == device.icon
assert "Episodes" == device.unit_of_measurement
assert "Sonarr Queue" == device.name
assert "100.00%" == device.device_state_attributes["Game of Thrones S03E08"]
@unittest.mock.patch("requests.get", side_effect=mocked_requests_get)
def test_series(self, req_mock):
"""Test getting the number of series."""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["series"],
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert "mdi:television" == device.icon
assert "Shows" == device.unit_of_measurement
assert "Sonarr Series" == device.name
assert (
"26/26 Episodes" == device.device_state_attributes["Marvel's Daredevil"]
)
@unittest.mock.patch("requests.get", side_effect=mocked_requests_get)
def test_wanted(self, req_mock):
"""Test getting wanted episodes."""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["wanted"],
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert "mdi:television" == device.icon
assert "Episodes" == device.unit_of_measurement
assert "Sonarr Wanted" == device.name
assert (
"2014-02-03" == device.device_state_attributes["Archer (2009) S05E04"]
)
@unittest.mock.patch("requests.get", side_effect=mocked_requests_get)
def test_upcoming_multiple_days(self, req_mock):
"""Test the upcoming episodes for multiple days."""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["upcoming"],
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert "mdi:television" == device.icon
assert "Episodes" == device.unit_of_measurement
assert "Sonarr Upcoming" == device.name
assert "S04E11" == device.device_state_attributes["Bob's Burgers"]
@pytest.mark.skip
@unittest.mock.patch("requests.get", side_effect=mocked_requests_get)
def test_upcoming_today(self, req_mock):
"""Test filtering for a single day.
Sonarr needs to respond with at least 2 days
"""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "1",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["upcoming"],
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert "mdi:television" == device.icon
assert "Episodes" == device.unit_of_measurement
assert "Sonarr Upcoming" == device.name
assert "S04E11" == device.device_state_attributes["Bob's Burgers"]
@unittest.mock.patch("requests.get", side_effect=mocked_requests_get)
def test_system_status(self, req_mock):
"""Test getting system status."""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["status"],
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert "2.0.0.1121" == device.state
assert "mdi:information" == device.icon
assert "Sonarr Status" == device.name
assert "6.2.9200.0" == device.device_state_attributes["osVersion"]
@pytest.mark.skip
@unittest.mock.patch("requests.get", side_effect=mocked_requests_get)
def test_ssl(self, req_mock):
"""Test SSL being enabled."""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "1",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["upcoming"],
"ssl": "true",
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert "s" == device.ssl
assert "mdi:television" == device.icon
assert "Episodes" == device.unit_of_measurement
assert "Sonarr Upcoming" == device.name
assert "S04E11" == device.device_state_attributes["Bob's Burgers"]
@unittest.mock.patch("requests.get", side_effect=mocked_exception)
def test_exception_handling(self, req_mock):
"""Test exception being handled."""
config = {
"platform": "sonarr",
"api_key": "foo",
"days": "1",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["upcoming"],
}
sonarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert device.state is None
| apache-2.0 | -6,594,276,752,610,751,000 | 41.612813 | 88 | 0.383187 | false |
aio-libs/aioredis | aioredis/connection.py | 1 | 59561 | import asyncio
import errno
import inspect
import io
import os
import socket
import ssl
import threading
import time
import warnings
from distutils.version import StrictVersion
from itertools import chain
from typing import (
Any,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from urllib.parse import ParseResult, parse_qs, unquote, urlparse
import async_timeout
from .compat import Protocol, TypedDict
from .exceptions import (
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
BusyLoadingError,
ChildDeadlockedError,
ConnectionError,
DataError,
ExecAbortError,
InvalidResponse,
ModuleError,
NoPermissionError,
NoScriptError,
ReadOnlyError,
RedisError,
ResponseError,
TimeoutError,
)
from .utils import str_if_bytes
NONBLOCKING_EXCEPTION_ERROR_NUMBERS = {
BlockingIOError: errno.EWOULDBLOCK,
ssl.SSLWantReadError: 2,
ssl.SSLWantWriteError: 2,
ssl.SSLError: 2,
}
NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys())
try:
import hiredis
except (ImportError, ModuleNotFoundError):
HIREDIS_AVAILABLE = False
else:
HIREDIS_AVAILABLE = True
hiredis_version = StrictVersion(hiredis.__version__)
if hiredis_version < StrictVersion("1.0.0"):
warnings.warn(
"aioredis supports hiredis @ 1.0.0 or higher. "
f"You have hiredis @ {hiredis.__version__}. "
"Pure-python parser will be used instead."
)
HIREDIS_AVAILABLE = False
SYM_STAR = b"*"
SYM_DOLLAR = b"$"
SYM_CRLF = b"\r\n"
SYM_LF = b"\n"
SYM_EMPTY = b""
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
SENTINEL = object()
MODULE_LOAD_ERROR = "Error loading the extension. Please check the server logs."
NO_SUCH_MODULE_ERROR = "Error unloading module: no such module with that name"
MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not possible."
MODULE_EXPORTS_DATA_TYPES_ERROR = (
"Error unloading module: the module "
"exports one or more module-side data "
"types, can't unload"
)
EncodedT = Union[bytes, memoryview]
DecodedT = Union[str, int, float]
EncodableT = Union[EncodedT, DecodedT, None]
class Encoder:
"""Encode strings to bytes-like and decode bytes-like to strings"""
__slots__ = "encoding", "encoding_errors", "decode_responses"
def __init__(self, encoding: str, encoding_errors: str, decode_responses: bool):
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
def encode(self, value: EncodableT) -> EncodedT:
"""Return a bytestring or bytes-like representation of the value"""
if isinstance(value, (bytes, memoryview)):
return value
if isinstance(value, bool):
# special case bool since it is a subclass of int
raise DataError(
"Invalid input of type: 'bool'. "
"Convert to a bytes, string, int or float first."
)
if isinstance(value, (int, float)):
return repr(value).encode()
if not isinstance(value, str):
# a value we don't know how to deal with. throw an error
typename = value.__class__.__name__
raise DataError(
f"Invalid input of type: {typename!r}. "
"Convert to a bytes, string, int or float first."
)
if isinstance(value, str):
return value.encode(self.encoding, self.encoding_errors)
return value
def decode(self, value: EncodableT, force=False) -> EncodableT:
"""Return a unicode string from the bytes-like representation"""
if self.decode_responses or force:
if isinstance(value, memoryview):
return value.tobytes().decode(self.encoding, self.encoding_errors)
if isinstance(value, bytes):
return value.decode(self.encoding, self.encoding_errors)
return value
ExceptionMappingT = Mapping[str, Union[Type[Exception], Mapping[str, Type[Exception]]]]
class BaseParser:
"""Plain Python parsing class"""
__slots__ = "_stream", "_buffer", "_read_size"
EXCEPTION_CLASSES: ExceptionMappingT = {
"ERR": {
"max number of clients reached": ConnectionError,
"Client sent AUTH, but no password is set": AuthenticationError,
"invalid password": AuthenticationError,
# some Redis server versions report invalid command syntax
# in lowercase
"wrong number of arguments for 'auth' command": AuthenticationWrongNumberOfArgsError,
# some Redis server versions report invalid command syntax
# in uppercase
"wrong number of arguments for 'AUTH' command": AuthenticationWrongNumberOfArgsError,
MODULE_LOAD_ERROR: ModuleError,
MODULE_EXPORTS_DATA_TYPES_ERROR: ModuleError,
NO_SUCH_MODULE_ERROR: ModuleError,
MODULE_UNLOAD_NOT_POSSIBLE_ERROR: ModuleError,
},
"EXECABORT": ExecAbortError,
"LOADING": BusyLoadingError,
"NOSCRIPT": NoScriptError,
"READONLY": ReadOnlyError,
"NOAUTH": AuthenticationError,
"NOPERM": NoPermissionError,
}
def __init__(self, socket_read_size: int):
self._stream: Optional[asyncio.StreamReader] = None
self._buffer: Optional[SocketBuffer] = None
self._read_size = socket_read_size
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def parse_error(self, response: str) -> ResponseError:
"""Parse an error response"""
error_code = response.split(" ")[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1 :]
exception_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(exception_class, dict):
exception_class = exception_class.get(response, ResponseError)
return exception_class(response)
return ResponseError(response)
def on_disconnect(self):
raise NotImplementedError()
def on_connect(self, connection: "Connection"):
raise NotImplementedError()
async def can_read(self, timeout: float) -> bool:
raise NotImplementedError()
async def read_response(self) -> Union[EncodableT, ResponseError, None]:
raise NotImplementedError()
class SocketBuffer:
"""Async-friendly re-impl of redis-py's SocketBuffer.
TODO: We're currently passing through two buffers,
the asyncio.StreamReader and this. I imagine we can reduce the layers here
while maintaining compliance with prior art.
"""
def __init__(
self,
stream_reader: asyncio.StreamReader,
socket_read_size: int,
socket_timeout: float,
):
self._stream = stream_reader
self.socket_read_size = socket_read_size
self.socket_timeout = socket_timeout
self._buffer = io.BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
async def _read_from_socket(
self,
length: int = None,
timeout: Optional[float] = SENTINEL, # type: ignore
raise_on_timeout: bool = True,
) -> bool:
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
timeout = timeout if timeout is not SENTINEL else self.socket_timeout
try:
while True:
async with async_timeout.timeout(timeout):
data = await self._stream.read(self.socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
return True
except (socket.timeout, asyncio.TimeoutError):
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket")
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError(f"Error while reading from socket: {ex.args}")
async def can_read(self, timeout: float) -> bool:
return bool(self.length) or await self._read_from_socket(
timeout=timeout, raise_on_timeout=False
)
async def read(self, length: int) -> bytes:
length = length + 2 # make sure to read the \r\n terminator
# make sure we've read enough data from the socket
if length > self.length:
await self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
async def readline(self) -> bytes:
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
await self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
try:
self.purge()
self._buffer.close()
except Exception:
# issue #633 suggests the purge/close somehow raised a
# BadFileDescriptor error. Perhaps the client ran out of
# memory or something else? It's probably OK to ignore
# any error being raised from purge/close since we're
# removing the reference to the instance below.
pass
self._buffer = None
self._stream = None
class PythonParser(BaseParser):
"""Plain Python parsing class"""
__slots__ = BaseParser.__slots__ + ("encoder",)
def __init__(self, socket_read_size: int):
super().__init__(socket_read_size)
self.encoder: Optional[Encoder] = None
def on_connect(self, connection: "Connection"):
"""Called when the stream connects"""
self._stream = connection._reader
self._buffer = SocketBuffer(
self._stream, self._read_size, connection.socket_timeout
)
self.encoder = connection.encoder
def on_disconnect(self):
"""Called when the stream disconnects"""
if self._stream is not None:
self._stream = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoder = None
async def can_read(self, timeout: float):
return self._buffer and bool(await self._buffer.can_read(timeout))
async def read_response(self) -> Union[EncodableT, ResponseError, None]:
if not self._buffer:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
raw = await self._buffer.readline()
if not raw:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
response: Any
byte, response = raw[:1], raw[1:]
if byte not in (b"-", b"+", b":", b"$", b"*"):
raise InvalidResponse(f"Protocol Error: {raw!r}")
# server returned an error
if byte == b"-":
response = response.decode("utf-8", errors="replace")
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == b"+":
pass
# int value
elif byte == b":":
response = int(response)
# bulk response
elif byte == b"$":
length = int(response)
if length == -1:
return None
response = await self._buffer.read(length)
# multi-bulk response
elif byte == b"*":
length = int(response)
if length == -1:
return None
response = [(await self.read_response()) for i in range(length)]
if isinstance(response, bytes):
response = self.encoder.decode(response)
return response
class HiredisParser(BaseParser):
"""Parser class for connections using Hiredis"""
__slots__ = BaseParser.__slots__ + ("_next_response", "_reader", "_socket_timeout")
def __init__(self, socket_read_size: int):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not available.")
super().__init__(socket_read_size=socket_read_size)
self._next_response = ...
self._reader: Optional[hiredis.Reader] = None
self._socket_timeout: Optional[float] = None
def on_connect(self, connection: "Connection"):
self._stream = connection._reader
kwargs = {
"protocolError": InvalidResponse,
"replyError": self.parse_error,
}
if connection.encoder.decode_responses:
kwargs.update(
encoding=connection.encoder.encoding,
errors=connection.encoder.encoding_errors,
)
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
self._socket_timeout = connection.socket_timeout
def on_disconnect(self):
self._stream = None
self._reader = None
self._next_response = False
async def can_read(self, timeout: float):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
if self._next_response is False:
return await self.read_from_socket(timeout=timeout, raise_on_timeout=False)
return True
async def read_from_socket(
self, timeout: Optional[float] = SENTINEL, raise_on_timeout: bool = True
):
timeout = self._socket_timeout if timeout is SENTINEL else timeout
try:
async with async_timeout.timeout(timeout):
buffer = await self._stream.read(self._read_size)
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
self._reader.feed(buffer)
# data was read from the socket and added to the buffer.
# return True to indicate that data was read.
return True
except asyncio.CancelledError:
raise
except (socket.timeout, asyncio.TimeoutError):
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket") from None
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError(f"Error while reading from socket: {ex.args}")
async def read_response(self) -> EncodableT:
if not self._stream or not self._reader:
self.on_disconnect()
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
while response is False:
await self.read_from_socket()
response = self._reader.gets()
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif (
isinstance(response, list)
and response
and isinstance(response[0], ConnectionError)
):
raise response[0]
return response
DefaultParser: Type[Union[PythonParser, HiredisParser]]
if HIREDIS_AVAILABLE:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class ConnectCallbackProtocol(Protocol):
def __call__(self, connection: "Connection"):
...
class AsyncConnectCallbackProtocol(Protocol):
async def __call__(self, connection: "Connection"):
...
ConnectCallbackT = Union[ConnectCallbackProtocol, AsyncConnectCallbackProtocol]
class Connection:
"""Manages TCP communication to and from a Redis server"""
__slots__ = (
"pid",
"host",
"port",
"db",
"username",
"client_name",
"password",
"socket_timeout",
"socket_connect_timeout",
"socket_keepalive",
"socket_keepalive_options",
"socket_type",
"retry_on_timeout",
"health_check_interval",
"next_health_check",
"last_active_at",
"encoder",
"ssl_context",
"_reader",
"_writer",
"_parser",
"_connect_callbacks",
"_buffer_cutoff",
"_loop",
"__dict__",
)
def __init__(
self,
*,
host: str = "localhost",
port: Union[str, int] = 6379,
db: Union[str, int] = 0,
password: str = None,
socket_timeout: float = None,
socket_connect_timeout: float = None,
socket_keepalive: bool = False,
socket_keepalive_options: dict = None,
socket_type: int = 0,
retry_on_timeout: bool = False,
encoding: str = "utf-8",
encoding_errors: str = "strict",
decode_responses: bool = False,
parser_class: Type[BaseParser] = DefaultParser,
socket_read_size: int = 65536,
health_check_interval: int = 0,
client_name: str = None,
username: str = None,
encoder_class: Type[Encoder] = Encoder,
loop: asyncio.AbstractEventLoop = None,
):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.username = username
self.client_name = client_name
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout or None
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.socket_type = socket_type
self.retry_on_timeout = retry_on_timeout
self.health_check_interval = health_check_interval
self.next_health_check = 0
self.ssl_context: Optional[RedisSSLContext] = None
self.encoder = encoder_class(encoding, encoding_errors, decode_responses)
self._reader: Optional[asyncio.StreamReader] = None
self._writer: Optional[asyncio.StreamWriter] = None
self._parser = parser_class(
socket_read_size=socket_read_size,
)
self._connect_callbacks: List[ConnectCallbackT] = []
self._buffer_cutoff = 6000
self._loop = loop
def __repr__(self):
repr_args = ",".join((f"{k}={v}" for k, v in self.repr_pieces()))
return f"{self.__class__.__name__}<{repr_args}>"
def repr_pieces(self):
pieces = [("host", self.host), ("port", self.port), ("db", self.db)]
if self.client_name:
pieces.append(("client_name", self.client_name))
return pieces
def __del__(self):
try:
if self.is_connected:
loop = self._loop or asyncio.get_event_loop()
coro = self.disconnect()
if loop.is_running():
loop.create_task(coro)
else:
loop.run_until_complete(self.disconnect())
except Exception:
pass
@property
def is_connected(self):
return bool(self._reader and self._writer)
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
async def connect(self):
"""Connects to the Redis server if not already connected"""
if self.is_connected:
return
try:
await self._connect()
except asyncio.CancelledError:
raise
except (socket.timeout, asyncio.TimeoutError):
raise TimeoutError("Timeout connecting to server")
except OSError as e:
raise ConnectionError(self._error_message(e))
except Exception as exc:
raise ConnectionError(exc) from exc
try:
await self.on_connect()
except RedisError:
# clean up after any error in on_connect
await self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
task = callback(self)
if task and inspect.isawaitable(task):
await task
async def _connect(self):
"""Create a TCP socket connection"""
with async_timeout.timeout(self.socket_connect_timeout):
reader, writer = await asyncio.open_connection(
host=self.host, port=self.port, ssl=self.ssl_context, loop=self._loop
)
self._reader = reader
self._writer = writer
sock = writer.transport.get_extra_info("socket")
if sock is not None:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in self.socket_keepalive_options.items():
sock.setsockopt(socket.SOL_TCP, k, v)
# set the socket_timeout now that we're connected
if self.socket_timeout is not None:
sock.settimeout(self.socket_timeout)
except (OSError, TypeError):
# `socket_keepalive_options` might contain invalid options
# causing an error. Do not leave the connection open.
writer.close()
raise
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return f"Error connecting to {self.host}:{self.port}. {exception.args[0]}."
else:
return (
f"Error {exception.args[0]} connecting to {self.host}:{self.port}. "
f"{exception.args[0]}."
)
async def on_connect(self):
"""Initialize the connection, authenticate and select a database"""
self._parser.on_connect(self)
# if username and/or password are set, authenticate
if self.username or self.password:
if self.username:
auth_args = (self.username, self.password or "")
else:
auth_args = (self.password,)
# avoid checking health here -- PING will fail if we try
# to check the health prior to the AUTH
await self.send_command("AUTH", *auth_args, check_health=False)
try:
auth_response = await self.read_response()
except AuthenticationWrongNumberOfArgsError:
# a username and password were specified but the Redis
# server seems to be < 6.0.0 which expects a single password
# arg. retry auth with just the password.
# https://github.com/andymccurdy/redis-py/issues/1274
await self.send_command("AUTH", self.password, check_health=False)
auth_response = await self.read_response()
if str_if_bytes(auth_response) != "OK":
raise AuthenticationError("Invalid Username or Password")
# if a client_name is given, set it
if self.client_name:
await self.send_command("CLIENT", "SETNAME", self.client_name)
if str_if_bytes(self.read_response()) != "OK":
raise ConnectionError("Error setting client name")
# if a database is specified, switch to it
if self.db:
await self.send_command("SELECT", self.db)
if str_if_bytes(await self.read_response()) != "OK":
raise ConnectionError("Invalid Database")
async def disconnect(self):
"""Disconnects from the Redis server"""
try:
async with async_timeout.timeout(self.socket_connect_timeout):
self._parser.on_disconnect()
if not self.is_connected:
return
try:
if os.getpid() == self.pid:
self._writer.close()
# py3.6 doesn't have this method
if hasattr(self._writer, "wait_closed"):
await self._writer.wait_closed()
except OSError:
pass
self._reader = None
self._writer = None
except asyncio.TimeoutError:
raise TimeoutError(
f"Timed out closing connection after {self.socket_connect_timeout}"
) from None
async def check_health(self):
"""Check the health of the connection with a PING/PONG"""
if self.health_check_interval and time.time() > self.next_health_check:
try:
await self.send_command("PING", check_health=False)
if str_if_bytes(await self.read_response()) != "PONG":
raise ConnectionError("Bad response from PING health check")
except (ConnectionError, TimeoutError) as err:
await self.disconnect()
try:
await self.send_command("PING", check_health=False)
if str_if_bytes(await self.read_response()) != "PONG":
raise ConnectionError(
"Bad response from PING health check"
) from None
except BaseException as err2:
raise err2 from err
async def send_packed_command(
self,
command: Union[bytes, str, Iterable[Union[bytes, str]]],
check_health: bool = True,
):
"""Send an already packed command to the Redis server"""
if not self._writer:
await self.connect()
# guard against health check recursion
if check_health:
await self.check_health()
try:
if isinstance(command, str):
command = command.encode()
if isinstance(command, bytes):
command = [command]
self._writer.writelines(command)
await self._writer.drain()
except asyncio.TimeoutError:
await self.disconnect()
raise TimeoutError("Timeout writing to socket") from None
except OSError as e:
await self.disconnect()
if len(e.args) == 1:
errno, errmsg = "UNKNOWN", e.args[0]
else:
errno = e.args[0]
errmsg = e.args[1]
raise ConnectionError(
f"Error {errno} while writing to socket. {errmsg}."
) from e
except BaseException:
await self.disconnect()
raise
async def send_command(self, *args, **kwargs):
"""Pack and send a command to the Redis server"""
if not self.is_connected:
await self.connect()
await self.send_packed_command(
self.pack_command(*args), check_health=kwargs.get("check_health", True)
)
async def can_read(self, timeout: float = 0):
"""Poll the socket to see if there's data that can be read."""
if not self.is_connected:
await self.connect()
return await self._parser.can_read(timeout)
async def read_response(self):
"""Read the response from a previously sent command"""
try:
with async_timeout.timeout(self.socket_timeout):
response = await self._parser.read_response()
except asyncio.TimeoutError:
await self.disconnect()
raise TimeoutError(f"Timeout reading from {self.host}:{self.port}")
except BaseException:
await self.disconnect()
raise
if self.health_check_interval:
self.next_health_check = time.time() + self.health_check_interval
if isinstance(response, ResponseError):
raise response from None
return response
def pack_command(self, *args: EncodableT) -> List[bytes]:
"""Pack a series of arguments into the Redis protocol"""
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. These arguments should be bytestrings so that they are
# not encoded.
if isinstance(args[0], str):
args = tuple(args[0].encode().split()) + args[1:]
elif b" " in args[0]:
args = tuple(args[0].split()) + args[1:]
buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))
buffer_cutoff = self._buffer_cutoff
for arg in map(self.encoder.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values or memoryviews
arg_length = len(arg)
if (
len(buff) > buffer_cutoff
or arg_length > buffer_cutoff
or isinstance(arg, memoryview)
):
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF)
)
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join(
(
buff,
SYM_DOLLAR,
str(arg_length).encode(),
SYM_CRLF,
arg,
SYM_CRLF,
)
)
output.append(buff)
return output
def pack_commands(self, commands: Iterable[Iterable[EncodableT]]) -> List[bytes]:
"""Pack multiple commands into the Redis protocol"""
output: List[bytes] = []
pieces: List[bytes] = []
buffer_length = 0
buffer_cutoff = self._buffer_cutoff
for cmd in commands:
for chunk in self.pack_command(*cmd):
chunklen = len(chunk)
if (
buffer_length > buffer_cutoff
or chunklen > buffer_cutoff
or isinstance(chunk, memoryview)
):
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if chunklen > buffer_cutoff or isinstance(chunk, memoryview):
output.append(chunk)
else:
pieces.append(chunk)
buffer_length += chunklen
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
def __init__(
self,
ssl_keyfile: str = None,
ssl_certfile: str = None,
ssl_cert_reqs: str = "required",
ssl_ca_certs: str = None,
ssl_check_hostname: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.ssl_context = RedisSSLContext(
keyfile=ssl_keyfile,
certfile=ssl_certfile,
cert_reqs=ssl_cert_reqs,
ca_certs=ssl_ca_certs,
check_hostname=ssl_check_hostname,
)
@property
def keyfile(self):
return self.ssl_context.keyfile
@property
def certfile(self):
return self.ssl_context.certfile
@property
def cert_reqs(self):
return self.ssl_context.cert_reqs
@property
def ca_certs(self):
return self.ssl_context.ca_certs
@property
def check_hostname(self):
return self.ssl_context.check_hostname
class RedisSSLContext:
__slots__ = (
"keyfile",
"certfile",
"cert_reqs",
"ca_certs",
"context",
"check_hostname",
)
def __init__(
self,
keyfile: str = None,
certfile: str = None,
cert_reqs: str = None,
ca_certs: str = None,
check_hostname: bool = False,
):
self.keyfile = keyfile
self.certfile = certfile
if cert_reqs is None:
self.cert_reqs = ssl.CERT_NONE
elif isinstance(cert_reqs, str):
CERT_REQS = {
"none": ssl.CERT_NONE,
"optional": ssl.CERT_OPTIONAL,
"required": ssl.CERT_REQUIRED,
}
if cert_reqs not in CERT_REQS:
raise RedisError(
"Invalid SSL Certificate Requirements Flag: %s" % cert_reqs
)
self.cert_reqs = CERT_REQS[cert_reqs]
self.ca_certs = ca_certs
self.check_hostname = check_hostname
self.context = None
def get(self) -> ssl.SSLContext:
if not self.context:
context = ssl.create_default_context()
context.check_hostname = self.check_hostname
context.verify_mode = self.cert_reqs
if self.certfile and self.keyfile:
context.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile)
if self.ca_certs:
context.load_verify_locations(self.ca_certs)
self.context = context
return self.context
class UnixDomainSocketConnection(Connection): # lgtm [py/missing-call-to-init]
def __init__(
self,
*,
path: str = "",
db: Union[str, int] = 0,
username: str = None,
password: str = None,
socket_timeout: float = None,
encoding: str = "utf-8",
encoding_errors: str = "strict",
decode_responses: bool = False,
retry_on_timeout: bool = False,
parser_class: Type[BaseParser] = DefaultParser,
socket_read_size: int = 65536,
health_check_interval: float = 0.0,
client_name=None,
loop: asyncio.AbstractEventLoop = None,
):
self.pid = os.getpid()
self.path = path
self.db = db
self.username = username
self.client_name = client_name
self.password = password
self.socket_timeout = socket_timeout
self.retry_on_timeout = retry_on_timeout
self.health_check_interval = health_check_interval
self.next_health_check = 0
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._connect_callbacks = []
self._buffer_cutoff = 6000
self._loop = loop
def repr_pieces(self) -> Iterable[Tuple[str, Union[str, int]]]:
pieces = [
("path", self.path),
("db", self.db),
]
if self.client_name:
pieces.append(("client_name", self.client_name))
return pieces
async def _connect(self):
with async_timeout.timeout(self._connect_timeout):
reader, writer = await asyncio.open_unix_connection(path=self.path)
self._reader = reader
self._writer = writer
await self.on_connect()
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return f"Error connecting to unix socket: {self.path}. {exception.args[0]}."
else:
return (
f"Error {exception.args[0]} connecting to unix socket: "
f"{self.path}. {exception.args[1]}."
)
FALSE_STRINGS = ("0", "F", "FALSE", "N", "NO")
def to_bool(value) -> bool:
if value is None or value == "":
return None
if isinstance(value, str) and value.upper() in FALSE_STRINGS:
return False
return bool(value)
URL_QUERY_ARGUMENT_PARSERS = {
"db": int,
"socket_timeout": float,
"socket_connect_timeout": float,
"socket_keepalive": to_bool,
"retry_on_timeout": to_bool,
"max_connections": int,
"health_check_interval": int,
"ssl_check_hostname": to_bool,
}
class ConnectKwargs(TypedDict, total=False):
username: str
password: str
connection_class: Type[Connection]
host: str
port: int
db: int
def parse_url(url: str) -> ConnectKwargs:
parsed: ParseResult = urlparse(url)
kwargs: ConnectKwargs = {}
for name, value in parse_qs(parsed.query).items():
if value and len(value) > 0:
value = unquote(value[0])
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
kwargs[name] = parser(value)
except (TypeError, ValueError):
raise ValueError("Invalid value for `%s` in connection URL." % name)
else:
kwargs[name] = value
if parsed.username:
kwargs["username"] = unquote(parsed.username)
if parsed.password:
kwargs["password"] = unquote(parsed.password)
# We only support redis://, rediss:// and unix:// schemes.
if parsed.scheme == "unix":
if parsed.path:
kwargs["path"] = unquote(parsed.path)
kwargs["connection_class"] = UnixDomainSocketConnection
elif parsed.scheme in ("redis", "rediss"):
if parsed.hostname:
kwargs["host"] = unquote(parsed.hostname)
if parsed.port:
kwargs["port"] = int(parsed.port)
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if parsed.path and "db" not in kwargs:
try:
kwargs["db"] = int(unquote(parsed.path).replace("/", ""))
except (AttributeError, ValueError):
pass
if parsed.scheme == "rediss":
kwargs["connection_class"] = SSLConnection
else:
valid_schemes = "redis://, rediss://, unix://"
raise ValueError(
"Redis URL must specify one of the following "
"schemes (%s)" % valid_schemes
)
return kwargs
_CP = TypeVar("_CP")
class ConnectionPool:
"""
Create a connection pool. ``If max_connections`` is set, then this
object raises :py:class:`~redis.ConnectionError` when the pool's
limit is reached.
By default, TCP connections are created unless ``connection_class``
is specified. Use :py:class:`~redis.UnixDomainSocketConnection` for
unix sockets.
Any additional keyword arguments are passed to the constructor of
``connection_class``.
"""
@classmethod
def from_url(cls: Type[_CP], url: str, **kwargs) -> _CP:
"""
Return a connection pool configured from the given URL.
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
unix://[[username]:[password]]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- `redis://` creates a TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/redis>
- `rediss://` creates a SSL wrapped TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/rediss>
- ``unix://``: creates a Unix Domain Socket connection.
The username, password, hostname, path and all querystring values
are passed through urllib.parse.unquote in order to replace any
percent-encoded values with their corresponding characters.
There are several ways to specify a database number. The first value
found will be used:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// or rediss:// schemes, the path argument
of the url, e.g. redis://localhost/0
3. A ``db`` keyword argument to this function.
If none of these options are specified, the default db=0 is used.
All querystring options are cast to their appropriate Python types.
Boolean arguments can be specified with string values "True"/"False"
or "Yes"/"No". Values that cannot be properly cast cause a
``ValueError`` to be raised. Once parsed, the querystring arguments
and keyword arguments are passed to the ``ConnectionPool``'s
class initializer. In the case of conflicting arguments, querystring
arguments always win.
"""
url_options = parse_url(url)
kwargs.update(url_options)
return cls(**kwargs)
def __init__(
self,
connection_class: Type[Connection] = Connection,
max_connections: int = None,
**connection_kwargs,
):
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, int) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
# a lock to protect the critical section in _checkpid().
# this lock is acquired when the process id changes, such as
# after a fork. during this time, multiple threads in the child
# process could attempt to acquire this lock. the first thread
# to acquire the lock will reset the data structures and lock
# object of this pool. subsequent threads acquiring this lock
# will notice the first thread already did the work and simply
# release the lock.
self._fork_lock = threading.Lock()
self._lock: asyncio.Lock
self._created_connections: int
self._available_connections: List[Connection]
self._in_use_connections: Set[Connection]
self.reset() # lgtm [py/init-calls-subclass]
self.loop = self.connection_kwargs.get("loop")
self.encoder_class = self.connection_kwargs.get("encoder_class", Encoder)
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"<{self.connection_class(**self.connection_kwargs)!r}>"
)
def reset(self):
self._lock = asyncio.Lock()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
# this must be the last operation in this method. while reset() is
# called when holding _fork_lock, other threads in this process
# can call _checkpid() which compares self.pid and os.getpid() without
# holding any lock (for performance reasons). keeping this assignment
# as the last operation ensures that those other threads will also
# notice a pid difference and block waiting for the first thread to
# release _fork_lock. when each of these threads eventually acquire
# _fork_lock, they will notice that another thread already called
# reset() and they will immediately release _fork_lock and continue on.
self.pid = os.getpid()
def _checkpid(self):
# _checkpid() attempts to keep ConnectionPool fork-safe on modern
# systems. this is called by all ConnectionPool methods that
# manipulate the pool's state such as get_connection() and release().
#
# _checkpid() determines whether the process has forked by comparing
# the current process id to the process id saved on the ConnectionPool
# instance. if these values are the same, _checkpid() simply returns.
#
# when the process ids differ, _checkpid() assumes that the process
# has forked and that we're now running in the child process. the child
# process cannot use the parent's file descriptors (e.g., sockets).
# therefore, when _checkpid() sees the process id change, it calls
# reset() in order to reinitialize the child's ConnectionPool. this
# will cause the child to make all new connection objects.
#
# _checkpid() is protected by self._fork_lock to ensure that multiple
# threads in the child process do not call reset() multiple times.
#
# there is an extremely small chance this could fail in the following
# scenario:
# 1. process A calls _checkpid() for the first time and acquires
# self._fork_lock.
# 2. while holding self._fork_lock, process A forks (the fork()
# could happen in a different thread owned by process A)
# 3. process B (the forked child process) inherits the
# ConnectionPool's state from the parent. that state includes
# a locked _fork_lock. process B will not be notified when
# process A releases the _fork_lock and will thus never be
# able to acquire the _fork_lock.
#
# to mitigate this possible deadlock, _checkpid() will only wait 5
# seconds to acquire _fork_lock. if _fork_lock cannot be acquired in
# that time it is assumed that the child is deadlocked and a
# redis.ChildDeadlockedError error is raised.
if self.pid != os.getpid():
acquired = self._fork_lock.acquire(timeout=5)
if not acquired:
raise ChildDeadlockedError
# reset() the instance for the new process if another thread
# hasn't already done so
try:
if self.pid != os.getpid():
self.reset()
finally:
self._fork_lock.release()
async def get_connection(self, command_name, *keys, **options):
"""Get a connection from the pool"""
self._checkpid()
async with self._lock:
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
try:
# ensure this connection is connected to Redis
await connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
try:
if await connection.can_read():
raise ConnectionError("Connection has data") from None
except ConnectionError:
await connection.disconnect()
await connection.connect()
if await connection.can_read():
raise ConnectionError("Connection not ready") from None
except BaseException:
# release the connection back to the pool so that we don't
# leak it
await self.release(connection)
raise
return connection
def get_encoder(self):
"""Return an encoder based on encoding settings"""
kwargs = self.connection_kwargs
return self.encoder_class(
encoding=kwargs.get("encoding", "utf-8"),
encoding_errors=kwargs.get("encoding_errors", "strict"),
decode_responses=kwargs.get("decode_responses", False),
)
def make_connection(self):
"""Create a new connection"""
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
async def release(self, connection: Connection):
"""Releases the connection back to the pool"""
self._checkpid()
async with self._lock:
try:
self._in_use_connections.remove(connection)
except KeyError:
# Gracefully fail when a connection is returned to this pool
# that the pool doesn't actually own
pass
if self.owns_connection(connection):
self._available_connections.append(connection)
else:
# pool doesn't own this connection. do not add it back
# to the pool and decrement the count so that another
# connection can take its place if needed
self._created_connections -= 1
await connection.disconnect()
return
def owns_connection(self, connection: Connection):
return connection.pid == self.pid
async def disconnect(self, inuse_connections: bool = True):
"""
Disconnects connections in the pool
If ``inuse_connections`` is True, disconnect connections that are
current in use, potentially by other tasks. Otherwise only disconnect
connections that are idle in the pool.
"""
self._checkpid()
async with self._lock:
if inuse_connections:
connections = chain(
self._available_connections, self._in_use_connections
)
else:
connections = self._available_connections
resp = await asyncio.gather(
*(connection.disconnect() for connection in connections),
return_exceptions=True,
)
exc = next((r for r in resp if isinstance(r, BaseException)), None)
if exc:
raise exc
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from aioredis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
:py:class:`~redis.ConnectionPool` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a :py:class:`~redis.ConnectionError` (as the default
:py:class:`~redis.ConnectionPool` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
>>> # Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
>>> # Raise a ``ConnectionError`` after five seconds if a connection is
>>> # not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(
self,
max_connections: int = 50,
timeout: Optional[int] = 20,
connection_class: Type[Connection] = Connection,
queue_class: Type[asyncio.Queue] = asyncio.LifoQueue,
**connection_kwargs,
):
self.queue_class = queue_class
self.timeout = timeout
self._connections: List[Connection]
super().__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs,
)
def reset(self):
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except asyncio.QueueFull:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
# this must be the last operation in this method. while reset() is
# called when holding _fork_lock, other threads in this process
# can call _checkpid() which compares self.pid and os.getpid() without
# holding any lock (for performance reasons). keeping this assignment
# as the last operation ensures that those other threads will also
# notice a pid difference and block waiting for the first thread to
# release _fork_lock. when each of these threads eventually acquire
# _fork_lock, they will notice that another thread already called
# reset() and they will immediately release _fork_lock and continue on.
self.pid = os.getpid()
def make_connection(self):
"""Make a fresh connection."""
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
async def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
async with async_timeout.timeout(self.timeout):
connection = await self.pool.get()
except (asyncio.QueueEmpty, asyncio.TimeoutError):
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
try:
# ensure this connection is connected to Redis
await connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
try:
if await connection.can_read():
raise ConnectionError("Connection has data") from None
except ConnectionError:
await connection.disconnect()
await connection.connect()
if await connection.can_read():
raise ConnectionError("Connection not ready") from None
except BaseException:
# release the connection back to the pool so that we don't leak it
await self.release(connection)
raise
return connection
async def release(self, connection: Connection):
"""Releases the connection back to the pool."""
# Make sure we haven't changed process.
self._checkpid()
if not self.owns_connection(connection):
# pool doesn't own this connection. do not add it back
# to the pool. instead add a None value which is a placeholder
# that will cause the pool to recreate the connection if
# its needed.
await connection.disconnect()
self.pool.put_nowait(None)
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except asyncio.QueueFull:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
async def disconnect(self, inuse_connections: bool = True):
"""Disconnects all connections in the pool."""
self._checkpid()
async with self._lock:
resp = await asyncio.gather(
*(connection.disconnect() for connection in self._connections),
return_exceptions=True,
)
exc = next((r for r in resp if isinstance(r, BaseException)), None)
if exc:
raise exc
| mit | 5,627,629,003,666,229,000 | 36.155958 | 97 | 0.589312 | false |
toast38coza/ansible-modules | library/kong_plugin.py | 1 | 3447 | #!/usr/bin/python
import requests
class KongPlugin:
def __init__(self, base_url, api_name):
self.base_url = "{}/apis/{}/plugins" . format(base_url, api_name)
self.api = api_name
def list(self):
return requests.get(self.base_url)
def _get_plugin_id(self, name, plugins_list):
"""Scans the list of plugins for an ID.
returns None if no matching name is found"""
for plugin in plugins_list:
if plugin.get("name") == name:
return plugin.get("id")
return None
def add_or_update(self, name, config=None):
# does it exist already?
plugins_response = self.list()
plugins_list = plugins_response.json().get('data', [])
data = {
"name": name,
}
if config is not None:
data.update(config)
plugin_id = self._get_plugin_id(name, plugins_list)
if plugin_id is None:
return requests.post(self.base_url, data)
else:
url = "{}/{}" . format (self.base_url, plugin_id)
return requests.patch(url, data)
def delete(self, id):
url = "{}/{}" . format (self.base_url, id)
return requests.delete(url)
class ModuleHelper:
def get_module(self):
args = dict(
kong_admin_uri = dict(required=True, type='str'),
api_name = dict(required=False, type='str'),
plugin_name = dict(required=False, type='str'),
plugin_id = dict(required=False, type='str'),
config = dict(required=False, type='dict'),
state = dict(required=False, default="present", choices=['present', 'absent', 'list'], type='str'),
)
return AnsibleModule(argument_spec=args,supports_check_mode=False)
def prepare_inputs(self, module):
url = module.params['kong_admin_uri']
api_name = module.params['api_name']
state = module.params['state']
data = {
"name": module.params['plugin_name'],
"config": module.params['config']
}
return (url, api_name, data, state)
def get_response(self, response, state):
if state == "present":
meta = json.dumps(response.content)
has_changed = response.status_code == 201
if state == "absent":
meta = {}
has_changed = response.status_code == 204
if state == "list":
meta = response.json()
has_changed = False
return (has_changed, meta)
def main():
state_to_method = {
"present": "add",
"absent": "delete"
}
helper = ModuleHelper()
global module # might not need this
module = helper.get_module()
base_url, api_name, data, state = helper.prepare_inputs(module)
method_to_call = state_to_method.get(state)
api = KongPlugin(base_url, api_name)
if state == "present":
response = api.add_or_update(**data)
if state == "absent":
response = api.delete(module.params['plugin_id'])
if state == "list":
response = api.list()
has_changed, meta = helper.get_response(response, state)
module.exit_json(changed=has_changed, meta=meta)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main() | mit | -6,333,525,935,479,312,000 | 27.262295 | 115 | 0.551494 | false |
SmartCash/smartcash | contrib/seeds/generate-seeds.py | 1 | 4382 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9678)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19678)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| mit | -8,129,644,415,492,418,000 | 30.753623 | 98 | 0.573026 | false |
mathandy/Classifiers2LearnWith | experiments/vgg16_pre-trained/vgg16_pre-trained.py | 1 | 9463 | """A pre-trained implimentation of VGG16 with weights trained on ImageNet."""
##########################################################################
# Special thanks to
# http://www.cs.toronto.edu/~frossard/post/vgg16/
# for converting the caffe VGG16 pre-trained weights to TensorFlow
# this file is essentially just a restylized version of his vgg16.py
##########################################################################
from __future__ import print_function, absolute_import, division
import os
import numpy as np
from scipy.misc import imread, imresize
import tensorflow as tf
_debug = True
def conv_layer(input_tensor, diameter, in_dim, out_dim, name=None):
r"""Creates a convolutional layer with
Args:
input_tensor: A `Tensor`.
diameter: An `int`, the width and also height of the filter.
in_dim: An `int`, the number of input channels.
out_dim: An `int`, the number of output channels.
name: A `str`, the name for the operation defined by this function.
"""
with tf.name_scope(name):
filter_shape = (diameter, diameter, in_dim, out_dim)
initial_weights = tf.truncated_normal(filter_shape, stddev=0.1)
weights = tf.Variable(initial_weights, name='weights')
conv = tf.nn.conv2d(input=input_tensor,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME',
name='convolution')
initial_biases = tf.constant(1.0, shape=[out_dim], dtype=tf.float32)
biases = tf.Variable(initial_biases, name='biases')
preactivations = tf.nn.bias_add(conv, biases, name='bias_addition')
activations = tf.nn.relu(preactivations, name='activation')
return activations, weights, biases
def fc_layer(in_tensor, in_dim, out_dim, sigmoid=tf.nn.relu, name=None):
r"""Creates a fully-connected (ReLU by default) layer with
Args:
in_tensor: A `Tensor`.
in_dim: An `int`, the number of input channels.
out_dim: An `int`, the number of output channels.
sigmoid: A `function`, the activation operation, defaults to tf.nn.relu.
name: A `str`, the name for the operation defined by this function.
"""
with tf.name_scope(name):
initial_weights = tf.truncated_normal((in_dim, out_dim), stddev=0.1)
weights = tf.Variable(initial_weights, name='weights')
initial_biases = tf.constant(0.0, shape=[out_dim], dtype=tf.float32)
biases = tf.Variable(initial_biases, name='biases')
preactivations = tf.nn.bias_add(tf.matmul(in_tensor, weights), biases)
activations = sigmoid(preactivations, name='activation')
return activations, weights, biases
class PreTrainedVGG16:
def __init__(self, weights=None, session=None):
self.input_images = tf.placeholder(tf.float32, (None, 224, 224, 3))
self.activations, self.parameters = self._build_graph()
self.output = self.activations['fc3']
if weights is not None and session is not None:
self.load_weights(weights, session)
def load_weights(self, weight_file, session):
weights = np.load(weight_file)
keys = sorted(weights.keys())
for i, k in enumerate(keys):
session.run(self.parameters[i].assign(weights[k]))
@staticmethod
def get_class_names():
with open('ImageNet_Classes.txt') as names_file:
return [l.replace('\n', '') for l in names_file]
def get_output(self, images, auto_resize=True):
""""Takes in a list of images and returns softmax probabilities."""
if auto_resize:
images_ = [imresize(im, (224, 224)) for im in images]
else:
images_ = images
feed_dict = {self.input_images: images_}
return sess.run(vgg.output, feed_dict)[0]
def get_activations(self, images, auto_resize=True):
""""Takes in a list of images and returns the activation dictionary."""
if auto_resize:
images_ = [imresize(im, (224, 224)) for im in images]
else:
images_ = images
feed_dict = {self.input_images: images_}
return sess.run(vgg.activations, feed_dict)[0]
def _build_graph(self):
parameters = [] # storage for trainable parameters
# pooling arguments
_ksize = [1, 2, 2, 1]
_strides = [1, 2, 2, 1]
# center the input images
with tf.name_scope('preprocess_centering'):
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32,
shape=[1, 1, 1, 3], name='img_mean')
c_images = self.input_images - mean
# images --> conv1_1 --> conv1_2 --> pool1
print("hi", tf.shape(c_images))
conv1_1, weights1, biases1 = conv_layer(c_images, 3, 3, 64, 'conv1_1')
conv1_2, weights2, biases2 = conv_layer(conv1_1, 3, 64, 64, 'conv1_2')
pool1 = tf.nn.max_pool(conv1_2, _ksize, _strides, 'SAME', name='pool1')
parameters += [weights1, biases1, weights2, biases2]
# pool1 --> conv2_1 --> conv2_2 --> pool2
conv2_1, weights1, biases1 = conv_layer(pool1, 3, 64, 128, 'conv2_1')
conv2_2, weights2, biases2 = conv_layer(conv2_1, 3, 128, 128, 'conv2_2')
pool2 = tf.nn.max_pool(conv2_2, _ksize, _strides, 'SAME', name='pool2')
parameters += [weights1, biases1, weights2, biases2]
# pool2 --> conv3_1 --> conv3_2 --> conv3_3 --> pool3
conv3_1, weights1, biases1 = conv_layer(pool2, 3, 128, 256, 'conv3_1')
conv3_2, weights2, biases2 = conv_layer(conv3_1, 3, 256, 256, 'conv3_2')
conv3_3, weights3, biases3 = conv_layer(conv3_2, 3, 256, 256, 'conv3_3')
pool3 = tf.nn.max_pool(conv3_3, _ksize, _strides, 'SAME', name='pool3')
parameters += [weights1, biases1, weights2, biases2, weights3, biases3]
# pool3 --> conv4_1 --> conv4_2 --> conv4_3 --> pool4
conv4_1, weights1, biases1 = conv_layer(pool3, 3, 256, 512, 'conv4_1')
conv4_2, weights2, biases2 = conv_layer(conv4_1, 3, 512, 512, 'conv4_2')
conv4_3, weights3, biases3 = conv_layer(conv4_2, 3, 512, 512, 'conv4_3')
pool4 = tf.nn.max_pool(conv4_3, _ksize, _strides, 'SAME', name='pool4')
parameters += [weights1, biases1, weights2, biases2, weights3, biases3]
# pool4 --> conv5_1 --> conv5_2 --> conv5_3 --> pool5
conv5_1, weights1, biases1 = conv_layer(pool4, 3, 512, 512, 'conv5_1')
conv5_2, weights2, biases2 = conv_layer(conv5_1, 3, 512, 512, 'conv5_2')
conv5_3, weights3, biases3 = conv_layer(conv5_2, 3, 512, 512, 'conv5_3')
pool5 = tf.nn.max_pool(conv5_3, _ksize, _strides, 'SAME', name='pool5')
parameters += [weights1, biases1, weights2, biases2, weights3, biases3]
# pool5 --> flatten --> fc1 --> fc2 --> fc3
shape = int(np.prod(pool5.get_shape()[1:]))
pool5_flat = tf.reshape(pool5, [-1, shape])
fc1, weights1, biases1 = fc_layer(pool5_flat, shape, 4096, name='fc1')
fc2, weights2, biases2 = fc_layer(fc1, 4096, 4096, name='fc2')
fc3, weights3, biases3 = fc_layer(fc2, 4096, 1000, tf.nn.softmax, 'fc3')
parameters += [weights1, biases1, weights2, biases2, weights3, biases3]
activations = {
'conv1_1': conv1_1, 'conv1_2': conv1_2, 'pool1': pool1,
'conv2_1': conv2_1, 'conv2_2': conv2_2, 'pool2': pool2,
'conv3_1': conv3_1, 'conv3_2': conv3_2, 'conv3_3': conv3_3, 'pool3': pool3,
'conv4_1': conv4_1, 'conv4_2': conv4_2, 'conv4_3': conv4_3, 'pool4': pool4,
'conv5_1': conv5_1, 'conv5_2': conv5_2, 'conv5_3': conv5_3, 'pool5': pool5,
'fc1': fc1, 'fc2': fc2, 'fc3': fc3
}
return activations, parameters
if __name__ == '__main__':
# Get input
imlist = ['testflash.jpg', 'testme.jpg']
im_names = [os.path.splitext(os.path.basename(imf))[0] for imf in imlist]
input_images = [imread(f, mode='RGB') for f in imlist]
# Check 'vgg16_weights.npz exists
if not os.path.isfile('vgg16_weights.npz'):
raise Exception(
"The weights I use here were converted from the Caffe Model Zoo "
"weights by Davi Frossard. He didn't include a license so I'm "
"hesistant to re-post them. Please download them from his "
"website:\nhttp://www.cs.toronto.edu/~frossard/post/vgg16/")
# Build VGG16
if _debug:
sess = tf.InteractiveSession()
tf.summary.FileWriter('TensorBoard', sess.graph)
else:
sess = tf.Session()
vgg = PreTrainedVGG16('vgg16_weights.npz', sess)
# Run images through network, return softmax probabilities
class_probabilities = vgg.get_output(input_images)
print(class_probabilities.shape)
# Get Class Names
class_names = vgg.get_class_names()
#NOTE: only one file at a time is working... must fix
# Report results
# for imf, cps in zip(imlist, class_probabilities_list):
imf = im_names[0]
print("Top Five Results for", imf + ':')
top5 = (np.argsort(class_probabilities)[::-1])[0:5]
with open(imf + '_results.txt', 'w') as fout:
for p in np.argsort(class_probabilities)[::-1]:
fout.write(str(class_probabilities[p]) + ' : ' + class_names[p] + '\n')
for p in top5:
print(class_probabilities[p], ' : ', class_names[p])
| mit | 468,354,589,904,741,300 | 43.219626 | 87 | 0.594843 | false |
burgerdev/volumina | volumina/utility/preferencesManager.py | 1 | 4045 | ###############################################################################
# volumina: volume slicing and editing library
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import os
import threading
import cPickle as pickle
from volumina.utility import Singleton
class PreferencesManager():
# TODO: Maybe this should be a wrapper API around QSettings (but with pickle strings)
# Pros:
# - Settings would be stored in standard locations for each platform
# Cons:
# - QT dependency (currently there are no non-gui preferences, but maybe someday)
__metaclass__ = Singleton
def get(self, group, setting, default=None):
try:
return self._prefs[group][setting]
except KeyError:
return default
def set(self, group, setting, value):
if group not in self._prefs:
self._prefs[group] = {}
if setting not in self._prefs[group] or self._prefs[group][setting] != value:
self._prefs[group][setting] = value
self._dirty = True
if not self._poolingSave:
self._save()
def __init__(self):
self._filePath = os.path.expanduser('~/.ilastik_preferences')
self._lock = threading.Lock()
self._prefs = self._load()
self._poolingSave = False
self._dirty = False
def _load(self):
with self._lock:
if not os.path.exists(self._filePath):
return {}
else:
try:
with open(self._filePath, 'rb') as f:
return pickle.load(f)
except EOFError:
os.remove(self._filePath)
return {}
def _save(self):
if self._dirty:
with self._lock:
with open(self._filePath, 'wb') as f:
pickle.dump(self._prefs, f)
self._dirty = False
# We support the 'with' keyword, in which case a sequence of settings can be set,
# and the preferences file won't be updated until the __exit__ function is called.
# (Otherwise, each call to set() triggers a new save.)
def __enter__(self):
self._poolingSave = True
return self
def __exit__(self, *args):
self._poolingSave = False
self._save()
if __name__ == "__main__":
prefsMgr = PreferencesManager()
prefsMgr2 = PreferencesManager()
assert id(prefsMgr) == id(prefsMgr2), "It's supposed to be a singleton!"
with PreferencesManager() as prefsMgr:
prefsMgr.set("Group 1", "Setting1", [1,2,3])
prefsMgr.set("Group 1", "Setting2", ['a', 'b', 'c'])
prefsMgr.set("Group 2", "Setting1", "Forty-two")
# Force a new instance
PreferencesManager.instance = None
prefsMgr = PreferencesManager()
assert prefsMgr != prefsMgr2, "For this test, I want a separate instance"
assert prefsMgr.get("Group 1", "Setting1") == [1,2,3]
assert prefsMgr.get("Group 1", "Setting2") == ['a', 'b', 'c']
assert prefsMgr.get("Group 2", "Setting1") == "Forty-two"
| lgpl-3.0 | -6,503,057,252,922,043,000 | 36.453704 | 93 | 0.578245 | false |
pydanny/dj-stripe | tests/test_source.py | 1 | 3473 | """
dj-stripe Card Model Tests.
"""
import sys
from copy import deepcopy
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from djstripe.models import Source
from . import (
FAKE_CUSTOMER_III,
FAKE_SOURCE,
FAKE_SOURCE_II,
AssertStripeFksMixin,
SourceDict,
default_account,
)
class SourceTest(AssertStripeFksMixin, TestCase):
def setUp(self):
self.account = default_account()
self.user = get_user_model().objects.create_user(
username="testuser", email="[email protected]"
)
self.customer = FAKE_CUSTOMER_III.create_for_user(self.user)
self.customer.sources.all().delete()
self.customer.legacy_cards.all().delete()
def test_attach_objects_hook_without_customer(self):
source = Source.sync_from_stripe_data(deepcopy(FAKE_SOURCE_II))
self.assertEqual(source.customer, None)
self.assert_fks(
source,
expected_blank_fks={
"djstripe.Source.customer",
"djstripe.Customer.default_payment_method",
},
)
def test_sync_source_finds_customer(self):
source = Source.sync_from_stripe_data(deepcopy(FAKE_SOURCE))
self.assertEqual(self.customer, source.customer)
self.assert_fks(
source,
expected_blank_fks={
"djstripe.Customer.coupon",
"djstripe.Customer.default_payment_method",
},
)
def test_str(self):
fake_source = deepcopy(FAKE_SOURCE)
source = Source.sync_from_stripe_data(fake_source)
self.assertEqual("<id={}>".format(fake_source["id"]), str(source))
self.assert_fks(
source,
expected_blank_fks={
"djstripe.Customer.coupon",
"djstripe.Customer.default_payment_method",
},
)
@patch("stripe.Source.retrieve", return_value=deepcopy(FAKE_SOURCE), autospec=True)
def test_detach(self, source_retrieve_mock):
original_detach = SourceDict.detach
def mocked_detach(self):
return original_detach(self)
Source.sync_from_stripe_data(deepcopy(FAKE_SOURCE))
self.assertEqual(0, self.customer.legacy_cards.count())
self.assertEqual(1, self.customer.sources.count())
source = self.customer.sources.first()
with patch(
"tests.SourceDict.detach", side_effect=mocked_detach, autospec=True
) as mock_detach:
source.detach()
self.assertEqual(0, self.customer.sources.count())
# need to refresh_from_db since default_source was cleared with a query
self.customer.refresh_from_db()
self.assertIsNone(self.customer.default_source)
# need to refresh_from_db due to the implementation of Source.detach() -
# see TODO in method
source.refresh_from_db()
self.assertIsNone(source.customer)
self.assertEqual(source.status, "consumed")
if sys.version_info >= (3, 6):
# this mock isn't working on py34, py35, but it's not strictly necessary
# for the test
mock_detach.assert_called()
self.assert_fks(
source,
expected_blank_fks={
"djstripe.Source.customer",
"djstripe.Customer.default_payment_method",
},
)
| bsd-3-clause | 3,751,142,363,640,409,000 | 29.734513 | 87 | 0.610135 | false |
Mause/pyalp | pyalp/gs_interface/interface.py | 1 | 2747 | from os.path import join, dirname
HERE = dirname(__file__)
import logging
from apps.tournaments.models import Server, GameRequest
from .rpc_client import get_interface
import yaml
resource_load = lambda name: yaml.load(open(
join(HERE, 'resources', name)
))
game_ports = resource_load('game_ports.yaml')
game_names = resource_load('game_names.yaml')
interface = get_interface()
class NonExistantProtocol(Exception):
pass
class CouldNotReachServer(Exception):
pass
def calcqport(port, qgame):
assert qgame, qgame
assert qgame in game_ports, "Game Type not a valid type: {}".format(qgame)
portdiff = game_ports[qgame]
# check out value:
if portdiff[0] == '+': # if it starts with a + or -, it's an offset.
return port + int(portdiff[1:])
elif portdiff[0] == '-': # if it's 0, it means no change.
return port - int(portdiff[1:])
elif portdiff[0] == '0': # anything else is a static port.
return port
else:
return portdiff
def _query_server(
serv, address, port, protocol,
get_players=False, get_rules=False):
qport = calcqport(port, serv.game.short)
if qport is False: # zero could be returned and eval'd as False
print("Unable to calculate query port for address")
else:
port = qport
logging.debug(port, "==>", qport)
logging.debug("querying {}:{} over the {} protocol".format(
address, port, protocol
))
if not interface.protocol_exists(protocol):
raise NonExistantProtocol(protocol)
return interface.query_server(
protocol,
address,
port,
get_players,
get_rules
)
def queryServer(address, port, protocol, get_players=False, get_rules=False):
logging.info('queryServer request for {}:{} for {}'.format(
address, port, protocol
))
result = Server.objects.filter(
ipaddress=address, game__querystr2=protocol
).select_related('game')
if not result:
result = GameRequest.objects.filter(
ipaddress=address, game__querystr2=protocol
)
if not result:
raise CouldNotReachServer(
'{}:{} with protocol {}'.format(address, port, protocol)
)
return _query_server(
address, port, protocol,
get_players, get_rules
)
def query_server_from_instance(serv, get_players=False, get_rules=False):
return _query_server(
serv,
serv.address,
serv.queryport,
serv.game.engine_type,
get_players,
get_rules
)
def game_title(gamename):
gamename = gamename.lower()
try:
return game_names[gamename]
except KeyError:
return "Game Status"
| mit | -7,299,512,025,326,176,000 | 22.478632 | 78 | 0.626866 | false |
guziy/basemap | setup.py | 1 | 6013 | from __future__ import (absolute_import, division, print_function)
import glob
import io
import os
import sys
from setuptools.dist import Distribution
if sys.version_info < (2, 6):
raise SystemExit("""matplotlib and the basemap toolkit require Python 2.6 or later.""")
# Do not require numpy for just querying the package
# Taken from the netcdf-python setup file (which took it from h5py setup file).
inc_dirs = []
if any('--' + opt in sys.argv for opt in Distribution.display_option_names +
['help-commands', 'help']) or sys.argv[1] == 'egg_info':
from setuptools import setup, Extension
else:
import numpy
# Use numpy versions if they are available.
from numpy.distutils.core import setup, Extension
# append numpy include dir.
inc_dirs.append(numpy.get_include())
def get_install_requirements(path):
path = os.path.join(os.path.dirname(__file__), path)
with io.open(path, encoding='utf-8') as fp:
content = fp.read()
return [req for req in content.split("\n")
if req != '' and not req.startswith('#')]
def checkversion(GEOS_dir):
"""check geos C-API header file (geos_c.h)"""
try:
f = open(os.path.join(GEOS_dir, 'include', 'geos_c.h'))
except IOError:
return None
geos_version = None
for line in f:
if line.startswith('#define GEOS_VERSION'):
geos_version = line.split()[2]
return geos_version
# get location of geos lib from environment variable if it is set.
if 'GEOS_DIR' in os.environ:
GEOS_dir = os.environ.get('GEOS_DIR')
else:
# set GEOS_dir manually here if automatic detection fails.
GEOS_dir = None
user_home = os.path.expanduser('~')
geos_search_locations = [user_home, os.path.join(user_home, 'local'),
'/usr', '/usr/local', '/sw', '/opt', '/opt/local']
if GEOS_dir is None:
# if GEOS_dir not set, check a few standard locations.
GEOS_dirs = geos_search_locations
for direc in GEOS_dirs:
geos_version = checkversion(direc)
sys.stdout.write('checking for GEOS lib in %s ....\n' % direc)
if geos_version is None or geos_version < '"3.1.1"':
continue
else:
sys.stdout.write('GEOS lib (version %s) found in %s\n' %\
(geos_version[1:-1],direc))
GEOS_dir = direc
break
else:
geos_version = checkversion(GEOS_dir)
if GEOS_dir is None:
raise SystemExit("""
Can't find geos library in standard locations ('%s').
Please install the corresponding packages using your
systems software management system (e.g. for Debian Linux do:
'apt-get install libgeos-3.3.3 libgeos-c1 libgeos-dev' and/or
set the environment variable GEOS_DIR to point to the location
where geos is installed (for example, if geos_c.h
is in /usr/local/include, and libgeos_c is in /usr/local/lib,
set GEOS_DIR to /usr/local), or edit the setup.py script
manually and set the variable GEOS_dir (right after the line
that says "set GEOS_dir manually here".""" % "', '".join(geos_search_locations))
else:
geos_include_dirs=[os.path.join(GEOS_dir,'include')] + inc_dirs
geos_library_dirs=[os.path.join(GEOS_dir,'lib'),os.path.join(GEOS_dir,'lib64')]
packages = ['mpl_toolkits','mpl_toolkits.basemap']
namespace_packages = ['mpl_toolkits']
package_dirs = {'':'lib'}
# can't install _geoslib in mpl_toolkits.basemap namespace,
# or Basemap objects won't be pickleable.
# don't use runtime_library_dirs on windows (workaround
# for a distutils bug - http://bugs.python.org/issue2437).
if sys.platform == 'win32':
runtime_lib_dirs = []
else:
runtime_lib_dirs = geos_library_dirs
extensions = [ Extension("_geoslib",['src/_geoslib.c'],
library_dirs=geos_library_dirs,
runtime_library_dirs=runtime_lib_dirs,
include_dirs=geos_include_dirs,
libraries=['geos_c']) ]
# Specify all the required mpl data
pathout =\
os.path.join('lib',os.path.join('mpl_toolkits',os.path.join('basemap','data')))
datafiles = glob.glob(os.path.join(pathout,'*'))
datafiles = [os.path.join('data',os.path.basename(f)) for f in datafiles]
package_data = {'mpl_toolkits.basemap':datafiles}
install_requires = get_install_requirements("requirements.txt")
__version__ = "1.2.1"
setup(
name = "basemap",
version = __version__,
description = "Plot data on map projections with matplotlib",
long_description = """
An add-on toolkit for matplotlib that lets you plot data
on map projections with coastlines, lakes, rivers and political boundaries.
See http://matplotlib.org/basemap/users/examples.html for
examples of what it can do.""",
url = "https://matplotlib.org/basemap/",
download_url = "https://github.com/matplotlib/basemap/archive/v{0}rel.tar.gz".format(__version__),
author = "Jeff Whitaker",
author_email = "[email protected]",
maintainer = "Ben Root",
maintainer_email = "[email protected]",
install_requires = install_requires,
platforms = ["any"],
license = "OSI Approved",
keywords = ["python","plotting","plots","graphs","charts","GIS","mapping","map projections","maps"],
classifiers = ["Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent"],
packages = packages,
namespace_packages = namespace_packages,
package_dir = package_dirs,
ext_modules = extensions,
package_data = package_data
)
| gpl-2.0 | 8,225,000,807,982,011,000 | 38.559211 | 111 | 0.634791 | false |
alexandrul-ci/robotframework | src/robot/utils/normalizing.py | 1 | 3987 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from collections import MutableMapping
from .platform import PY3
from .robottypes import is_dict_like
def normalize(string, ignore=(), caseless=True, spaceless=True):
"""Normalizes given string according to given spec.
By default string is turned to lower case and all whitespace is removed.
Additional characters can be removed by giving them in ``ignore`` list.
"""
empty = type(string)()
if PY3 and isinstance(ignore, bytes):
# Iterating bytes in Python3 yields integers.
ignore = [bytes([i]) for i in ignore]
if spaceless:
string = empty.join(string.split())
if caseless:
string = lower(string)
ignore = [lower(i) for i in ignore]
for ign in ignore:
if ign in string: # performance optimization
string = string.replace(ign, empty)
return string
# http://ironpython.codeplex.com/workitem/33133
if sys.platform == 'cli' and sys.version_info < (2, 7, 5):
def lower(string):
return ('A' + string).lower()[1:]
else:
def lower(string):
return string.lower()
class NormalizedDict(MutableMapping):
"""Custom dictionary implementation automatically normalizing keys."""
def __init__(self, initial=None, ignore=(), caseless=True, spaceless=True):
"""Initialized with possible initial value and normalizing spec.
Initial values can be either a dictionary or an iterable of name/value
pairs. In the latter case items are added in the given order.
Normalizing spec has exact same semantics as with the :func:`normalize`
function.
"""
self._data = {}
self._keys = {}
self._normalize = lambda s: normalize(s, ignore, caseless, spaceless)
if initial:
self._add_initial(initial)
def _add_initial(self, initial):
items = initial.items() if hasattr(initial, 'items') else initial
for key, value in items:
self[key] = value
def __getitem__(self, key):
return self._data[self._normalize(key)]
def __setitem__(self, key, value):
norm_key = self._normalize(key)
self._data[norm_key] = value
self._keys.setdefault(norm_key, key)
def __delitem__(self, key):
norm_key = self._normalize(key)
del self._data[norm_key]
del self._keys[norm_key]
def __iter__(self):
return (self._keys[norm_key] for norm_key in sorted(self._keys))
def __len__(self):
return len(self._data)
def __str__(self):
return '{%s}' % ', '.join('%r: %r' % (key, self[key]) for key in self)
def __eq__(self, other):
if not is_dict_like(other):
return False
if not isinstance(other, NormalizedDict):
other = NormalizedDict(other)
return self._data == other._data
def __ne__(self, other):
return not self == other
def copy(self):
copy = NormalizedDict()
copy._data = self._data.copy()
copy._keys = self._keys.copy()
copy._normalize = self._normalize
return copy
# Speed-ups. Following methods are faster than default implementations.
def __contains__(self, key):
return self._normalize(key) in self._data
def clear(self):
self._data.clear()
self._keys.clear()
| apache-2.0 | -7,067,595,486,309,116,000 | 31.680328 | 79 | 0.635566 | false |
joshishungry/artificial_intel | assignments/lab4/map_coloring_csp.py | 1 | 2656 | #!/usr/bin/env python
"""
Implementation of the Map coloring problem from 2006 Quiz 2
"""
import sys
from csp import CSP, Variable, BinaryConstraint, solve_csp_problem, \
basic_constraint_checker
def map_coloring_csp_problem():
constraints = []
variables = []
# order of the variables here is the order given in the problem
variables.append(Variable("MA", ["B"]))
variables.append(Variable("TX", ["R"]))
variables.append(Variable("NE", ["R", "B", "Y"]))
variables.append(Variable("OV", ["R", "B", "Y"]))
variables.append(Variable("SE", ["R", "B", "Y"]))
variables.append(Variable("GL", ["R", "B", "Y"]))
variables.append(Variable("MID",["R", "B", "Y"]))
variables.append(Variable("MW", ["R", "B", "Y"]))
variables.append(Variable("SO", ["R", "B"]))
variables.append(Variable("NY", ["R", "B"]))
variables.append(Variable("FL", ["R", "B"]))
# these are all variable pairing of adjacent seats
edges = [("NE", "NY"),
("NE", "MA"),
("MA", "NY"),
("GL", "NY"),
("GL", "OV"),
("MID", "NY"),
("OV", "NY"),
("OV", "MID"),
("MW", "OV"),
("MW", "TX"),
("TX", "SO"),
("SO", "OV"),
("SO", "FL"),
("FL", "SE"),
("SE", "MID"),
("SE", "SO")]
# duplicate the edges the other way.
all_edges = []
for edge in edges:
all_edges.append((edge[0], edge[1]))
all_edges.append((edge[1], edge[0]))
forbidden = [("R", "B"), ("B", "R"), ("Y", "Y")]
# not allowed constraints:
def forbidden_edge(val_a, val_b, name_a, name_b):
if (val_a, val_b) in forbidden or (val_b, val_a) in forbidden:
return False
return True
for pair in all_edges:
constraints.append(
BinaryConstraint(pair[0], pair[1],
forbidden_edge,
"R-B, B-R, Y-Y edges are not allowed"))
return CSP(constraints, variables)
if __name__ == "__main__":
if len(sys.argv) > 1:
checker_type = sys.argv[1]
else:
checker_type = "dfs"
if checker_type == "dfs":
checker = basic_constraint_checker
elif checker_type == "fc":
import lab4
checker = lab4.forward_checking
elif checker_type == "fcps":
import lab4
checker = lab4.forward_checking_prop_singleton
else:
import lab4
checker = lab4.forward_checking_prop_singleton
solve_csp_problem(map_coloring_csp_problem, checker, verbose=True)
| apache-2.0 | -4,624,747,162,642,793,000 | 31 | 70 | 0.508283 | false |
ianrenton/playbulb-tools | weathercheck/weathercheck.py | 1 | 1370 | # Python script to set a Playbulb LED colour based on the current weather.
# Run me as a cron job for ambient weather information!
# by Ian Renton
# https://github.com/ianrenton/playbulb-tools
# Uses python OpenWeatherMap wrapper from https://github.com/csparpa/pyowm
import pyowm, re, subprocess
#### Config ####
# Your location
LOCATION = 'London'
# Your Playbulb address (obtained with 'sudo hcitool lescan')
PLAYBULB_ADDRESS = '01:23:45:67:89:10'
# Weather to colour dict
COLOUR_MAP = { 'clear': 'FFFF6000',
'clouds': '80000000',
'rain': '000000FF',
'drizzle': '0000FFFF',
'snow': 'FFFFFFFF',
'thunderstorm': '80FF0000'}
#### Code below ####
# Show the name of the playbulb
proc = subprocess.Popen(('gatttool -b ' + PLAYBULB_ADDRESS + ' --char-read -a 0x0003').split(), stdout = subprocess.PIPE)
for line in iter(proc.stdout.readline,''):
name = ''.join(x.strip() for x in re.findall(r'[0-9a-f]{2}\s', line)).decode("hex")
print 'Playbulb name: ' + name
# Get weather forecast
weather = pyowm.OWM().weather_at_place(LOCATION).get_weather().get_status()
colour = COLOUR_MAP[weather]
print 'Weather for ' + LOCATION + ': ' + weather + ', colour ' + colour
# Set Playbulb colour
subprocess.call(('gatttool -b ' + PLAYBULB_ADDRESS + ' --char-write -a 0x0016 -n ' + colour).split())
| bsd-3-clause | 7,456,926,169,413,411,000 | 34.128205 | 121 | 0.651095 | false |
gogoair/foremast | src/foremast/securitygroup/create_securitygroup.py | 1 | 11172 | # Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create Security Groups for Spinnaker Pipelines.
Security Group port specifications will be sourced from the `application.json`
files for each environment.
Examples:
application-master.json::
{
"security_group": {
"description": "Security Group description",
"ingress": {
"eureka": [
{"start_port": 80, "end_port": 8080, "protocol": "tcp"}
],
"coreforrest": [
8080,
8443
],
"0.0.0.0/0": [
8080
]
}
}
}
"""
import ipaddress
import logging
from contextlib import suppress
import boto3
from boto3.exceptions import botocore
from deepmerge import conservative_merger
from ..consts import DEFAULT_SECURITYGROUP_RULES
from ..exceptions import (ForemastConfigurationFileError, SpinnakerSecurityGroupCreationFailed,
SpinnakerSecurityGroupError)
from ..utils import get_details, get_properties, get_security_group_id, get_template, get_vpc_id, wait_for_task
class SpinnakerSecurityGroup:
"""Manipulate Spinnaker Security Groups.
Args:
app (str): Application name.
env (str): Deployment environment.
prop_path (str): Path to the raw.properties.json.
region (str): AWS Region.
"""
def __init__(self, app=None, env=None, region=None, prop_path=None):
self.log = logging.getLogger(__name__)
self.app_name = app
self.env = env
self.region = region
self.properties = get_properties(properties_file=prop_path, env=self.env, region=self.region)
self.generated = get_details(app=self.app_name)
self.group = self.generated.data['project']
def _validate_cidr(self, rule):
"""Validate the cidr block in a rule.
Returns:
True: Upon successful completion.
Raises:
SpinnakerSecurityGroupCreationFailed: CIDR definition is invalid or
the network range is too wide.
"""
try:
network = ipaddress.IPv4Network(rule['app'])
except (ipaddress.NetmaskValueError, ValueError) as error:
raise SpinnakerSecurityGroupCreationFailed(error)
self.log.debug('Validating CIDR: %s', network.exploded)
return True
def _process_rules(self, rules):
"""Process rules into cidr and non-cidr lists.
Args:
rules (list): Allowed Security Group ports and protocols.
Returns:
(list, list): Security Group reference rules and custom CIDR rules.
"""
cidr = []
non_cidr = []
for rule in rules:
if '.' in rule['app']:
self.log.debug('Custom CIDR rule: %s', rule)
self._validate_cidr(rule)
cidr.append(rule)
else:
self.log.debug('SG reference rule: %s', rule)
non_cidr.append(rule)
self.log.debug('Custom CIDR rules: %s', cidr)
self.log.debug('SG reference rules: %s', non_cidr)
return non_cidr, cidr
def add_tags(self):
"""Add tags to security group.
Returns:
True: Upon successful completion.
"""
session = boto3.session.Session(profile_name=self.env, region_name=self.region)
resource = session.resource('ec2')
group_id = get_security_group_id(self.app_name, self.env, self.region)
security_group = resource.SecurityGroup(group_id)
try:
tag = security_group.create_tags(
DryRun=False,
Tags=[{
'Key': 'app_group',
'Value': self.group
}, {
'Key': 'app_name',
'Value': self.app_name
}])
self.log.debug('Security group has been tagged: %s', tag)
except botocore.exceptions.ClientError as error:
self.log.warning(error)
return True
def add_cidr_rules(self, rules):
"""Add cidr rules to security group via boto.
Args:
rules (list): Allowed Security Group ports and protocols.
Returns:
True: Upon successful completion.
Raises:
SpinnakerSecurityGroupError: boto3 call failed to add CIDR block to
Security Group.
"""
session = boto3.session.Session(profile_name=self.env, region_name=self.region)
client = session.client('ec2')
group_id = get_security_group_id(self.app_name, self.env, self.region)
for rule in rules:
data = {
'DryRun':
False,
'GroupId':
group_id,
'IpPermissions': [{
'IpProtocol': rule['protocol'],
'FromPort': rule['start_port'],
'ToPort': rule['end_port'],
'IpRanges': [{
'CidrIp': rule['app']
}]
}]
}
self.log.debug('Security Group rule: %s', data)
try:
client.authorize_security_group_ingress(**data)
except botocore.exceptions.ClientError as error:
if 'InvalidPermission.Duplicate' in str(error):
self.log.debug('Duplicate rule exist, that is OK.')
else:
msg = 'Unable to add cidr rules to {}'.format(rule.get('app'))
self.log.error(msg)
raise SpinnakerSecurityGroupError(msg)
return True
def resolve_self_references(self, rules):
"""Resolves `$self` references to actual application name in security group rules."""
with suppress(KeyError):
rule = rules.pop('$self')
rules[self.app_name] = rule
return rules
def update_default_rules(self):
"""Concatinate application and global security group rules."""
app_ingress = self.properties['security_group']['ingress']
ingress = conservative_merger.merge(DEFAULT_SECURITYGROUP_RULES, app_ingress)
resolved_ingress = self.resolve_self_references(ingress)
self.log.info('Updated default rules:\n%s', ingress)
return resolved_ingress
def _create_security_group(self, ingress):
"""Send a POST to spinnaker to create a new security group.
Returns:
boolean: True if created successfully
"""
template_kwargs = {
'app': self.app_name,
'env': self.env,
'region': self.region,
'vpc': get_vpc_id(self.env, self.region),
'description': self.properties['security_group']['description'],
'ingress': ingress,
}
secgroup_json = get_template(
template_file='infrastructure/securitygroup_data.json.j2', formats=self.generated, **template_kwargs)
wait_for_task(secgroup_json)
return True
def create_security_group(self): # noqa
"""Send a POST to spinnaker to create or update a security group.
Returns:
boolean: True if created successfully
Raises:
ForemastConfigurationFileError: Missing environment configuration or
misconfigured Security Group definition.
"""
ingress_rules = []
try:
security_id = get_security_group_id(name=self.app_name, env=self.env, region=self.region)
except (SpinnakerSecurityGroupError, AssertionError):
self._create_security_group(ingress_rules)
else:
self.log.debug('Security Group ID %s found for %s.', security_id, self.app_name)
try:
ingress = self.update_default_rules()
except KeyError:
msg = 'Possible missing configuration for "{0}".'.format(self.env)
self.log.error(msg)
raise ForemastConfigurationFileError(msg)
for app in ingress:
rules = ingress[app]
# Essentially we have two formats: simple, advanced
# - simple: is just a list of ports
# - advanced: selects ports ranges and protocols
for rule in rules:
ingress_rule = self.create_ingress_rule(app, rule)
ingress_rules.append(ingress_rule)
ingress_rules_no_cidr, ingress_rules_cidr = self._process_rules(ingress_rules)
self._create_security_group(ingress_rules_no_cidr)
# Append cidr rules
self.add_cidr_rules(ingress_rules_cidr)
# Tag security group
self.add_tags()
self.log.info('Successfully created %s security group', self.app_name)
return True
def create_ingress_rule(self, app, rule):
"""Create a normalized ingress rule.
Args:
app (str): Application name
rule (dict or int): Allowed Security Group ports and protocols.
Returns:
dict: Contains app, start_port, end_port, protocol, cross_account_env and cross_account_vpc_id
"""
if isinstance(rule, dict):
# Advanced
start_port = rule.get('start_port')
end_port = rule.get('end_port')
protocol = rule.get('protocol', 'tcp')
requested_cross_account = rule.get('env', self.env)
if self.env == requested_cross_account:
# We are trying to use cross-account security group settings within the same account
# We should not allow this.
cross_account_env = None
cross_account_vpc_id = None
else:
cross_account_env = requested_cross_account
cross_account_vpc_id = get_vpc_id(cross_account_env, self.region)
else:
start_port = rule
end_port = rule
protocol = 'tcp'
cross_account_env = None
cross_account_vpc_id = None
created_rule = {
'app': app,
'start_port': start_port,
'end_port': end_port,
'protocol': protocol,
'cross_account_env': cross_account_env,
'cross_account_vpc_id': cross_account_vpc_id
}
self.log.debug('Normalized ingress rule: %s', created_rule)
return created_rule
| apache-2.0 | -642,778,247,066,717,200 | 33.481481 | 113 | 0.568654 | false |
ReactiveX/RxPY | rx/core/operators/merge.py | 1 | 4401 | from typing import Callable, Optional
import rx
from rx import from_future
from rx.core import Observable
from rx.disposable import CompositeDisposable, SingleAssignmentDisposable
from rx.internal.concurrency import synchronized
from rx.internal.utils import is_future
def _merge(*sources: Observable,
max_concurrent: Optional[int] = None
) -> Callable[[Observable], Observable]:
def merge(source: Observable) -> Observable:
"""Merges an observable sequence of observable sequences into
an observable sequence, limiting the number of concurrent
subscriptions to inner sequences. Or merges two observable
sequences into a single observable sequence.
Examples:
>>> res = merge(sources)
Args:
source: Source observable.
Returns:
The observable sequence that merges the elements of the
inner sequences.
"""
if max_concurrent is None:
sources_ = tuple([source]) + sources
return rx.merge(*sources_)
def subscribe(observer, scheduler=None):
active_count = [0]
group = CompositeDisposable()
is_stopped = [False]
queue = []
def subscribe(xs):
subscription = SingleAssignmentDisposable()
group.add(subscription)
@synchronized(source.lock)
def on_completed():
group.remove(subscription)
if queue:
s = queue.pop(0)
subscribe(s)
else:
active_count[0] -= 1
if is_stopped[0] and active_count[0] == 0:
observer.on_completed()
on_next = synchronized(source.lock)(observer.on_next)
on_error = synchronized(source.lock)(observer.on_error)
subscription.disposable = xs.subscribe_(on_next, on_error, on_completed, scheduler)
def on_next(inner_source):
if active_count[0] < max_concurrent:
active_count[0] += 1
subscribe(inner_source)
else:
queue.append(inner_source)
def on_completed():
is_stopped[0] = True
if active_count[0] == 0:
observer.on_completed()
group.add(source.subscribe_(on_next, observer.on_error, on_completed, scheduler))
return group
return Observable(subscribe)
return merge
def _merge_all() -> Callable[[Observable], Observable]:
def merge_all(source: Observable) -> Observable:
"""Partially applied merge_all operator.
Merges an observable sequence of observable sequences into an
observable sequence.
Args:
source: Source observable to merge.
Returns:
The observable sequence that merges the elements of the inner
sequences.
"""
def subscribe(observer, scheduler=None):
group = CompositeDisposable()
is_stopped = [False]
m = SingleAssignmentDisposable()
group.add(m)
def on_next(inner_source):
inner_subscription = SingleAssignmentDisposable()
group.add(inner_subscription)
inner_source = from_future(inner_source) if is_future(inner_source) else inner_source
@synchronized(source.lock)
def on_completed():
group.remove(inner_subscription)
if is_stopped[0] and len(group) == 1:
observer.on_completed()
on_next = synchronized(source.lock)(observer.on_next)
on_error = synchronized(source.lock)(observer.on_error)
subscription = inner_source.subscribe_(on_next, on_error, on_completed, scheduler)
inner_subscription.disposable = subscription
def on_completed():
is_stopped[0] = True
if len(group) == 1:
observer.on_completed()
m.disposable = source.subscribe_(on_next, observer.on_error, on_completed, scheduler)
return group
return Observable(subscribe)
return merge_all
| mit | 6,199,957,247,772,746,000 | 34.208 | 101 | 0.561918 | false |
chesterbarryang/odoo_ultrasteel | stock_ownership_availability_rules/__openerp__.py | 1 | 1316 | # -*- coding: utf-8 -*-
# Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{'name': 'Stock Ownership Availability Rules',
'summary': 'Enforce ownership on stock availability',
'description': """
This version is for Odoo 9 release
""",
'version': '0.2',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Purchase Management',
'license': 'AGPL-3',
'images': [],
'depends': ['stock',
],
'demo': [],
'data': [
'view/quant.xml',
'view/move.xml',
'security/group.xml'
],
"pre_init_hook": 'fill_quant_owner',
'auto_install': False,
'installable': True,
}
| lgpl-3.0 | -5,975,717,935,321,529,000 | 33.631579 | 77 | 0.674012 | false |
ioam/paramtk | paramtk/odict.py | 1 | 46083 | from __future__ import generators
# odict.py
# An Ordered Dictionary object
# Copyright (C) 2005 Nicola Larosa, Michael Foord
# E-mail: nico AT tekNico DOT net, fuzzyman AT voidspace DOT org DOT uk
# This software is licensed under the terms of the BSD license.
# http://www.voidspace.org.uk/python/license.shtml
# Basically you're free to copy, modify, distribute and relicense it,
# So long as you keep a copy of the license with it.
# Documentation at http://www.voidspace.org.uk/python/odict.html
# For information about bugfixes, updates and support, please join the
# Pythonutils mailing list:
# http://groups.google.com/group/pythonutils/
# Comments, suggestions and bug reports welcome.
"""A dict that keeps keys in insertion order"""
__author__ = ('Nicola Larosa <[email protected]>,'
'Michael Foord <fuzzyman AT voidspace DOT org DOT uk>')
__docformat__ = "restructuredtext en"
__revision__ = '$Id: external.py 12024 2012-05-02 21:13:18Z ceball $'
__version__ = '0.2.2'
__all__ = ['OrderedDict', 'SequenceOrderedDict']
import sys
INTP_VER = sys.version_info[:2]
if INTP_VER < (2, 2):
raise RuntimeError("Python v.2.2 or later required")
import types, warnings
class OrderedDict(dict):
"""
A class of dictionary that keeps the insertion order of keys.
All appropriate methods return keys, items, or values in an ordered way.
All normal dictionary methods are available. Update and comparison is
restricted to other OrderedDict objects.
Various sequence methods are available, including the ability to explicitly
mutate the key ordering.
__contains__ tests:
>>> d = OrderedDict(((1, 3),))
>>> 1 in d
1
>>> 4 in d
0
__getitem__ tests:
>>> OrderedDict(((1, 3), (3, 2), (2, 1)))[2]
1
>>> OrderedDict(((1, 3), (3, 2), (2, 1)))[4]
Traceback (most recent call last):
KeyError: 4
__len__ tests:
>>> len(OrderedDict())
0
>>> len(OrderedDict(((1, 3), (3, 2), (2, 1))))
3
get tests:
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.get(1)
3
>>> d.get(4) is None
1
>>> d.get(4, 5)
5
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1)])
has_key tests:
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.has_key(1)
1
>>> d.has_key(4)
0
"""
def __init__(self, init_val=(), strict=False):
"""
Create a new ordered dictionary. Cannot init from a normal dict,
nor from kwargs, since items order is undefined in those cases.
If the ``strict`` keyword argument is ``True`` (``False`` is the
default) then when doing slice assignment - the ``OrderedDict`` you are
assigning from *must not* contain any keys in the remaining dict.
>>> OrderedDict()
OrderedDict([])
>>> OrderedDict({1: 1})
Traceback (most recent call last):
TypeError: undefined order, cannot get items from dict
>>> OrderedDict({1: 1}.items())
OrderedDict([(1, 1)])
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1)])
>>> OrderedDict(d)
OrderedDict([(1, 3), (3, 2), (2, 1)])
"""
self.strict = strict
dict.__init__(self)
if isinstance(init_val, OrderedDict):
self._sequence = init_val.keys()
dict.update(self, init_val)
elif isinstance(init_val, dict):
# we lose compatibility with other ordered dict types this way
raise TypeError('undefined order, cannot get items from dict')
else:
self._sequence = []
self.update(init_val)
### Special methods ###
def __delitem__(self, key):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> del d[3]
>>> d
OrderedDict([(1, 3), (2, 1)])
>>> del d[3]
Traceback (most recent call last):
KeyError: 3
>>> d[3] = 2
>>> d
OrderedDict([(1, 3), (2, 1), (3, 2)])
>>> del d[0:1]
>>> d
OrderedDict([(2, 1), (3, 2)])
"""
if isinstance(key, types.SliceType):
# FIXME: efficiency?
keys = self._sequence[key]
for entry in keys:
dict.__delitem__(self, entry)
del self._sequence[key]
else:
# do the dict.__delitem__ *first* as it raises
# the more appropriate error
dict.__delitem__(self, key)
self._sequence.remove(key)
def __eq__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d == OrderedDict(d)
True
>>> d == OrderedDict(((1, 3), (2, 1), (3, 2)))
False
>>> d == OrderedDict(((1, 0), (3, 2), (2, 1)))
False
>>> d == OrderedDict(((0, 3), (3, 2), (2, 1)))
False
>>> d == dict(d)
False
>>> d == False
False
"""
if isinstance(other, OrderedDict):
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() == other.items())
else:
return False
def __lt__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> c < d
True
>>> d < c
False
>>> d < dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() < other.items())
def __le__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> e = OrderedDict(d)
>>> c <= d
True
>>> d <= c
False
>>> d <= dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
>>> d <= e
True
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() <= other.items())
def __ne__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d != OrderedDict(d)
False
>>> d != OrderedDict(((1, 3), (2, 1), (3, 2)))
True
>>> d != OrderedDict(((1, 0), (3, 2), (2, 1)))
True
>>> d == OrderedDict(((0, 3), (3, 2), (2, 1)))
False
>>> d != dict(d)
True
>>> d != False
True
"""
if isinstance(other, OrderedDict):
# FIXME: efficiency?
# Generate both item lists for each compare
return not (self.items() == other.items())
else:
return True
def __gt__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> d > c
True
>>> c > d
False
>>> d > dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() > other.items())
def __ge__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> e = OrderedDict(d)
>>> c >= d
False
>>> d >= c
True
>>> d >= dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
>>> e >= d
True
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() >= other.items())
def __repr__(self):
"""
Used for __repr__ and __str__
>>> r1 = repr(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f'))))
>>> r1
"OrderedDict([('a', 'b'), ('c', 'd'), ('e', 'f')])"
>>> r2 = repr(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd'))))
>>> r2
"OrderedDict([('a', 'b'), ('e', 'f'), ('c', 'd')])"
>>> r1 == str(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f'))))
True
>>> r2 == str(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd'))))
True
"""
return '%s([%s])' % (self.__class__.__name__, ', '.join(
['(%r, %r)' % (key, self[key]) for key in self._sequence]))
def __setitem__(self, key, val):
"""
Allows slice assignment, so long as the slice is an OrderedDict
>>> d = OrderedDict()
>>> d['a'] = 'b'
>>> d['b'] = 'a'
>>> d[3] = 12
>>> d
OrderedDict([('a', 'b'), ('b', 'a'), (3, 12)])
>>> d[:] = OrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
OrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d[::2] = OrderedDict(((7, 8), (9, 10)))
>>> d
OrderedDict([(7, 8), (2, 3), (9, 10)])
>>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)))
>>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8)))
>>> d
OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)])
>>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)), strict=True)
>>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8)))
>>> d
OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)])
>>> a = OrderedDict(((0, 1), (1, 2), (2, 3)), strict=True)
>>> a[3] = 4
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)])
Traceback (most recent call last):
ValueError: slice assignment must be from unique keys
>>> a = OrderedDict(((0, 1), (1, 2), (2, 3)))
>>> a[3] = 4
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::-1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(3, 4), (2, 3), (1, 2), (0, 1)])
>>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> d[:1] = 3
Traceback (most recent call last):
TypeError: slice assignment requires an OrderedDict
>>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> d[:1] = OrderedDict([(9, 8)])
>>> d
OrderedDict([(9, 8), (1, 2), (2, 3), (3, 4)])
"""
if isinstance(key, types.SliceType):
if not isinstance(val, OrderedDict):
# FIXME: allow a list of tuples?
raise TypeError('slice assignment requires an OrderedDict')
keys = self._sequence[key]
# NOTE: Could use ``range(*key.indices(len(self._sequence)))``
indexes = range(len(self._sequence))[key]
if key.step is None:
# NOTE: new slice may not be the same size as the one being
# overwritten !
# NOTE: What is the algorithm for an impossible slice?
# e.g. d[5:3]
pos = key.start or 0
del self[key]
newkeys = val.keys()
for k in newkeys:
if k in self:
if self.strict:
raise ValueError('slice assignment must be from '
'unique keys')
else:
# NOTE: This removes duplicate keys *first*
# so start position might have changed?
del self[k]
self._sequence = (self._sequence[:pos] + newkeys +
self._sequence[pos:])
dict.update(self, val)
else:
# extended slice - length of new slice must be the same
# as the one being replaced
if len(keys) != len(val):
raise ValueError('attempt to assign sequence of size %s '
'to extended slice of size %s' % (len(val), len(keys)))
# FIXME: efficiency?
del self[key]
item_list = zip(indexes, val.items())
# smallest indexes first - higher indexes not guaranteed to
# exist
item_list.sort()
for pos, (newkey, newval) in item_list:
if self.strict and newkey in self:
raise ValueError('slice assignment must be from unique'
' keys')
self.insert(pos, newkey, newval)
else:
if key not in self:
self._sequence.append(key)
dict.__setitem__(self, key, val)
def __getitem__(self, key):
"""
Allows slicing. Returns an OrderedDict if you slice.
>>> b = OrderedDict([(7, 0), (6, 1), (5, 2), (4, 3), (3, 4), (2, 5), (1, 6)])
>>> b[::-1]
OrderedDict([(1, 6), (2, 5), (3, 4), (4, 3), (5, 2), (6, 1), (7, 0)])
>>> b[2:5]
OrderedDict([(5, 2), (4, 3), (3, 4)])
"""
if isinstance(key, types.SliceType):
# FIXME: does this raise the error we want?
keys = self._sequence[key]
# FIXME: efficiency?
return OrderedDict([(entry, self[entry]) for entry in keys])
else:
return dict.__getitem__(self, key)
__str__ = __repr__
def __setattr__(self, name, value):
"""
Implemented so that accesses to ``sequence`` raise a warning and are
diverted to the new ``setkeys`` method.
"""
if name == 'sequence':
warnings.warn('Use of the sequence attribute is deprecated.'
' Use the keys method instead.', DeprecationWarning)
# NOTE: doesn't return anything
self.setkeys(value)
else:
# FIXME: do we want to allow arbitrary setting of attributes?
# Or do we want to manage it?
object.__setattr__(self, name, value)
def __getattr__(self, name):
"""
Implemented so that access to ``sequence`` raises a warning.
>>> d = OrderedDict()
>>> d.sequence
[]
"""
if name == 'sequence':
warnings.warn('Use of the sequence attribute is deprecated.'
' Use the keys method instead.', DeprecationWarning)
# NOTE: Still (currently) returns a direct reference. Need to
# because code that uses sequence will expect to be able to
# mutate it in place.
return self._sequence
else:
# raise the appropriate error
raise AttributeError("OrderedDict has no '%s' attribute" % name)
def __deepcopy__(self, memo):
"""
To allow deepcopy to work with OrderedDict.
>>> from copy import deepcopy
>>> a = OrderedDict([(1, 1), (2, 2), (3, 3)])
>>> a['test'] = {}
>>> b = deepcopy(a)
>>> b == a
True
>>> b is a
False
>>> a['test'] is b['test']
False
"""
from copy import deepcopy
return self.__class__(deepcopy(self.items(), memo), self.strict)
### Read-only methods ###
def copy(self):
"""
>>> OrderedDict(((1, 3), (3, 2), (2, 1))).copy()
OrderedDict([(1, 3), (3, 2), (2, 1)])
"""
return OrderedDict(self)
def items(self):
"""
``items`` returns a list of tuples representing all the
``(key, value)`` pairs in the dictionary.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.items()
[(1, 3), (3, 2), (2, 1)]
>>> d.clear()
>>> d.items()
[]
"""
return zip(self._sequence, self.values())
def keys(self):
"""
Return a list of keys in the ``OrderedDict``.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.keys()
[1, 3, 2]
"""
return self._sequence[:]
def values(self, values=None):
"""
Return a list of all the values in the OrderedDict.
Optionally you can pass in a list of values, which will replace the
current list. The value list must be the same len as the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.values()
[3, 2, 1]
"""
return [self[key] for key in self._sequence]
def iteritems(self):
"""
>>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iteritems()
>>> ii.next()
(1, 3)
>>> ii.next()
(3, 2)
>>> ii.next()
(2, 1)
>>> ii.next()
Traceback (most recent call last):
StopIteration
"""
def make_iter(self=self):
keys = self.iterkeys()
while True:
key = keys.next()
yield (key, self[key])
return make_iter()
def iterkeys(self):
"""
>>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iterkeys()
>>> ii.next()
1
>>> ii.next()
3
>>> ii.next()
2
>>> ii.next()
Traceback (most recent call last):
StopIteration
"""
return iter(self._sequence)
__iter__ = iterkeys
def itervalues(self):
"""
>>> iv = OrderedDict(((1, 3), (3, 2), (2, 1))).itervalues()
>>> iv.next()
3
>>> iv.next()
2
>>> iv.next()
1
>>> iv.next()
Traceback (most recent call last):
StopIteration
"""
def make_iter(self=self):
keys = self.iterkeys()
while True:
yield self[keys.next()]
return make_iter()
### Read-write methods ###
def clear(self):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.clear()
>>> d
OrderedDict([])
"""
dict.clear(self)
self._sequence = []
def pop(self, key, *args):
"""
No dict.pop in Python 2.2, gotta reimplement it
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.pop(3)
2
>>> d
OrderedDict([(1, 3), (2, 1)])
>>> d.pop(4)
Traceback (most recent call last):
KeyError: 4
>>> d.pop(4, 0)
0
>>> d.pop(4, 0, 1)
Traceback (most recent call last):
TypeError: pop expected at most 2 arguments, got 3
"""
if len(args) > 1:
raise TypeError, ('pop expected at most 2 arguments, got %s' %
(len(args) + 1))
if key in self:
val = self[key]
del self[key]
else:
try:
val = args[0]
except IndexError:
raise KeyError(key)
return val
def popitem(self, i=-1):
"""
Delete and return an item specified by index, not a random one as in
dict. The index is -1 by default (the last item).
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.popitem()
(2, 1)
>>> d
OrderedDict([(1, 3), (3, 2)])
>>> d.popitem(0)
(1, 3)
>>> OrderedDict().popitem()
Traceback (most recent call last):
KeyError: 'popitem(): dictionary is empty'
>>> d.popitem(2)
Traceback (most recent call last):
IndexError: popitem(): index 2 not valid
"""
if not self._sequence:
raise KeyError('popitem(): dictionary is empty')
try:
key = self._sequence[i]
except IndexError:
raise IndexError('popitem(): index %s not valid' % i)
return (key, self.pop(key))
def setdefault(self, key, defval = None):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.setdefault(1)
3
>>> d.setdefault(4) is None
True
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1), (4, None)])
>>> d.setdefault(5, 0)
0
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1), (4, None), (5, 0)])
"""
if key in self:
return self[key]
else:
self[key] = defval
return defval
def update(self, from_od):
"""
Update from another OrderedDict or sequence of (key, value) pairs
>>> d = OrderedDict(((1, 0), (0, 1)))
>>> d.update(OrderedDict(((1, 3), (3, 2), (2, 1))))
>>> d
OrderedDict([(1, 3), (0, 1), (3, 2), (2, 1)])
>>> d.update({4: 4})
Traceback (most recent call last):
TypeError: undefined order, cannot get items from dict
>>> d.update((4, 4))
Traceback (most recent call last):
TypeError: cannot convert dictionary update sequence element "4" to a 2-item sequence
"""
if isinstance(from_od, OrderedDict):
for key, val in from_od.items():
self[key] = val
elif isinstance(from_od, dict):
# we lose compatibility with other ordered dict types this way
raise TypeError('undefined order, cannot get items from dict')
else:
# FIXME: efficiency?
# sequence of 2-item sequences, or error
for item in from_od:
try:
key, val = item
except TypeError:
raise TypeError('cannot convert dictionary update'
' sequence element "%s" to a 2-item sequence' % item)
self[key] = val
def rename(self, old_key, new_key):
"""
Rename the key for a given value, without modifying sequence order.
For the case where new_key already exists this raise an exception,
since if new_key exists, it is ambiguous as to what happens to the
associated values, and the position of new_key in the sequence.
>>> od = OrderedDict()
>>> od['a'] = 1
>>> od['b'] = 2
>>> od.items()
[('a', 1), ('b', 2)]
>>> od.rename('b', 'c')
>>> od.items()
[('a', 1), ('c', 2)]
>>> od.rename('c', 'a')
Traceback (most recent call last):
ValueError: New key already exists: 'a'
>>> od.rename('d', 'b')
Traceback (most recent call last):
KeyError: 'd'
"""
if new_key == old_key:
# no-op
return
if new_key in self:
raise ValueError("New key already exists: %r" % new_key)
# rename sequence entry
value = self[old_key]
old_idx = self._sequence.index(old_key)
self._sequence[old_idx] = new_key
# rename internal dict entry
dict.__delitem__(self, old_key)
dict.__setitem__(self, new_key, value)
def setitems(self, items):
"""
This method allows you to set the items in the dict.
It takes a list of tuples - of the same sort returned by the ``items``
method.
>>> d = OrderedDict()
>>> d.setitems(((3, 1), (2, 3), (1, 2)))
>>> d
OrderedDict([(3, 1), (2, 3), (1, 2)])
"""
self.clear()
# FIXME: this allows you to pass in an OrderedDict as well :-)
self.update(items)
def setkeys(self, keys):
"""
``setkeys`` all ows you to pass in a new list of keys which will
replace the current set. This must contain the same set of keys, but
need not be in the same order.
If you pass in new keys that don't match, a ``KeyError`` will be
raised.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.keys()
[1, 3, 2]
>>> d.setkeys((1, 2, 3))
>>> d
OrderedDict([(1, 3), (2, 1), (3, 2)])
>>> d.setkeys(['a', 'b', 'c'])
Traceback (most recent call last):
KeyError: 'Keylist is not the same as current keylist.'
"""
# FIXME: Efficiency? (use set for Python 2.4 :-)
# NOTE: list(keys) rather than keys[:] because keys[:] returns
# a tuple, if keys is a tuple.
kcopy = list(keys)
kcopy.sort()
self._sequence.sort()
if kcopy != self._sequence:
raise KeyError('Keylist is not the same as current keylist.')
# NOTE: This makes the _sequence attribute a new object, instead
# of changing it in place.
# FIXME: efficiency?
self._sequence = list(keys)
def setvalues(self, values):
"""
You can pass in a list of values, which will replace the
current list. The value list must be the same len as the OrderedDict.
(Or a ``ValueError`` is raised.)
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.setvalues((1, 2, 3))
>>> d
OrderedDict([(1, 1), (3, 2), (2, 3)])
>>> d.setvalues([6])
Traceback (most recent call last):
ValueError: Value list is not the same length as the OrderedDict.
"""
if len(values) != len(self):
# FIXME: correct error to raise?
raise ValueError('Value list is not the same length as the '
'OrderedDict.')
self.update(zip(self, values))
### Sequence Methods ###
def index(self, key):
"""
Return the position of the specified key in the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.index(3)
1
>>> d.index(4)
Traceback (most recent call last):
...
ValueError: 4 is not in list
"""
return self._sequence.index(key)
def insert(self, index, key, value):
"""
Takes ``index``, ``key``, and ``value`` as arguments.
Sets ``key`` to ``value``, so that ``key`` is at position ``index`` in
the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.insert(0, 4, 0)
>>> d
OrderedDict([(4, 0), (1, 3), (3, 2), (2, 1)])
>>> d.insert(0, 2, 1)
>>> d
OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2)])
>>> d.insert(8, 8, 1)
>>> d
OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2), (8, 1)])
"""
if key in self:
# FIXME: efficiency?
del self[key]
self._sequence.insert(index, key)
dict.__setitem__(self, key, value)
def reverse(self):
"""
Reverse the order of the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.reverse()
>>> d
OrderedDict([(2, 1), (3, 2), (1, 3)])
"""
self._sequence.reverse()
def sort(self, *args, **kwargs):
"""
Sort the key order in the OrderedDict.
This method takes the same arguments as the ``list.sort`` method on
your version of Python.
>>> d = OrderedDict(((4, 1), (2, 2), (3, 3), (1, 4)))
>>> d.sort()
>>> d
OrderedDict([(1, 4), (2, 2), (3, 3), (4, 1)])
"""
self._sequence.sort(*args, **kwargs)
class Keys(object):
# FIXME: should this object be a subclass of list?
"""
Custom object for accessing the keys of an OrderedDict.
Can be called like the normal ``OrderedDict.keys`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the keys method."""
return self._main._keys()
def __getitem__(self, index):
"""Fetch the key at position i."""
# NOTE: this automatically supports slicing :-)
return self._main._sequence[index]
def __setitem__(self, index, name):
"""
You cannot assign to keys, but you can do slice assignment to re-order
them.
You can only do slice assignment if the new set of keys is a reordering
of the original set.
"""
if isinstance(index, types.SliceType):
# FIXME: efficiency?
# check length is the same
indexes = range(len(self._main._sequence))[index]
if len(indexes) != len(name):
raise ValueError('attempt to assign sequence of size %s '
'to slice of size %s' % (len(name), len(indexes)))
# check they are the same keys
# FIXME: Use set
old_keys = self._main._sequence[index]
new_keys = list(name)
old_keys.sort()
new_keys.sort()
if old_keys != new_keys:
raise KeyError('Keylist is not the same as current keylist.')
orig_vals = [self._main[k] for k in name]
del self._main[index]
vals = zip(indexes, name, orig_vals)
vals.sort()
for i, k, v in vals:
if self._main.strict and k in self._main:
raise ValueError('slice assignment must be from '
'unique keys')
self._main.insert(i, k, v)
else:
raise ValueError('Cannot assign to keys')
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main._sequence)
# FIXME: do we need to check if we are comparing with another ``Keys``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main._sequence < other
def __le__(self, other): return self._main._sequence <= other
def __eq__(self, other): return self._main._sequence == other
def __ne__(self, other): return self._main._sequence != other
def __gt__(self, other): return self._main._sequence > other
def __ge__(self, other): return self._main._sequence >= other
# FIXME: do we need __cmp__ as well as rich comparisons?
def __cmp__(self, other): return cmp(self._main._sequence, other)
def __contains__(self, item): return item in self._main._sequence
def __len__(self): return len(self._main._sequence)
def __iter__(self): return self._main.iterkeys()
def count(self, item): return self._main._sequence.count(item)
def index(self, item, *args): return self._main._sequence.index(item, *args)
def reverse(self): self._main._sequence.reverse()
def sort(self, *args, **kwds): self._main._sequence.sort(*args, **kwds)
def __mul__(self, n): return self._main._sequence*n
__rmul__ = __mul__
def __add__(self, other): return self._main._sequence + other
def __radd__(self, other): return other + self._main._sequence
## following methods not implemented for keys ##
def __delitem__(self, i): raise TypeError('Can\'t delete items from keys')
def __iadd__(self, other): raise TypeError('Can\'t add in place to keys')
def __imul__(self, n): raise TypeError('Can\'t multiply keys in place')
def append(self, item): raise TypeError('Can\'t append items to keys')
def insert(self, i, item): raise TypeError('Can\'t insert items into keys')
def pop(self, i=-1): raise TypeError('Can\'t pop items from keys')
def remove(self, item): raise TypeError('Can\'t remove items from keys')
def extend(self, other): raise TypeError('Can\'t extend keys')
class Items(object):
"""
Custom object for accessing the items of an OrderedDict.
Can be called like the normal ``OrderedDict.items`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the items method."""
return self._main._items()
def __getitem__(self, index):
"""Fetch the item at position i."""
if isinstance(index, types.SliceType):
# fetching a slice returns an OrderedDict
return self._main[index].items()
key = self._main._sequence[index]
return (key, self._main[key])
def __setitem__(self, index, item):
"""Set item at position i to item."""
if isinstance(index, types.SliceType):
# NOTE: item must be an iterable (list of tuples)
self._main[index] = OrderedDict(item)
else:
# FIXME: Does this raise a sensible error?
orig = self._main.keys[index]
key, value = item
if self._main.strict and key in self and (key != orig):
raise ValueError('slice assignment must be from '
'unique keys')
# delete the current one
del self._main[self._main._sequence[index]]
self._main.insert(index, key, value)
def __delitem__(self, i):
"""Delete the item at position i."""
key = self._main._sequence[i]
if isinstance(i, types.SliceType):
for k in key:
# FIXME: efficiency?
del self._main[k]
else:
del self._main[key]
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main.items())
# FIXME: do we need to check if we are comparing with another ``Items``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main.items() < other
def __le__(self, other): return self._main.items() <= other
def __eq__(self, other): return self._main.items() == other
def __ne__(self, other): return self._main.items() != other
def __gt__(self, other): return self._main.items() > other
def __ge__(self, other): return self._main.items() >= other
def __cmp__(self, other): return cmp(self._main.items(), other)
def __contains__(self, item): return item in self._main.items()
def __len__(self): return len(self._main._sequence) # easier :-)
def __iter__(self): return self._main.iteritems()
def count(self, item): return self._main.items().count(item)
def index(self, item, *args): return self._main.items().index(item, *args)
def reverse(self): self._main.reverse()
def sort(self, *args, **kwds): self._main.sort(*args, **kwds)
def __mul__(self, n): return self._main.items()*n
__rmul__ = __mul__
def __add__(self, other): return self._main.items() + other
def __radd__(self, other): return other + self._main.items()
def append(self, item):
"""Add an item to the end."""
# FIXME: this is only append if the key isn't already present
key, value = item
self._main[key] = value
def insert(self, i, item):
key, value = item
self._main.insert(i, key, value)
def pop(self, i=-1):
key = self._main._sequence[i]
return (key, self._main.pop(key))
def remove(self, item):
key, value = item
try:
assert value == self._main[key]
except (KeyError, AssertionError):
raise ValueError('ValueError: list.remove(x): x not in list')
else:
del self._main[key]
def extend(self, other):
# FIXME: is only a true extend if none of the keys already present
for item in other:
key, value = item
self._main[key] = value
def __iadd__(self, other):
self.extend(other)
## following methods not implemented for items ##
def __imul__(self, n): raise TypeError('Can\'t multiply items in place')
class Values(object):
"""
Custom object for accessing the values of an OrderedDict.
Can be called like the normal ``OrderedDict.values`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the values method."""
return self._main._values()
def __getitem__(self, index):
"""Fetch the value at position i."""
if isinstance(index, types.SliceType):
return [self._main[key] for key in self._main._sequence[index]]
else:
return self._main[self._main._sequence[index]]
def __setitem__(self, index, value):
"""
Set the value at position i to value.
You can only do slice assignment to values if you supply a sequence of
equal length to the slice you are replacing.
"""
if isinstance(index, types.SliceType):
keys = self._main._sequence[index]
if len(keys) != len(value):
raise ValueError('attempt to assign sequence of size %s '
'to slice of size %s' % (len(value), len(keys)))
# FIXME: efficiency? Would be better to calculate the indexes
# directly from the slice object
# NOTE: the new keys can collide with existing keys (or even
# contain duplicates) - these will overwrite
for key, val in zip(keys, value):
self._main[key] = val
else:
self._main[self._main._sequence[index]] = value
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main.values())
# FIXME: do we need to check if we are comparing with another ``Values``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main.values() < other
def __le__(self, other): return self._main.values() <= other
def __eq__(self, other): return self._main.values() == other
def __ne__(self, other): return self._main.values() != other
def __gt__(self, other): return self._main.values() > other
def __ge__(self, other): return self._main.values() >= other
def __cmp__(self, other): return cmp(self._main.values(), other)
def __contains__(self, item): return item in self._main.values()
def __len__(self): return len(self._main._sequence) # easier :-)
def __iter__(self): return self._main.itervalues()
def count(self, item): return self._main.values().count(item)
def index(self, item, *args): return self._main.values().index(item, *args)
def reverse(self):
"""Reverse the values"""
vals = self._main.values()
vals.reverse()
# FIXME: efficiency
self[:] = vals
def sort(self, *args, **kwds):
"""Sort the values."""
vals = self._main.values()
vals.sort(*args, **kwds)
self[:] = vals
def __mul__(self, n): return self._main.values()*n
__rmul__ = __mul__
def __add__(self, other): return self._main.values() + other
def __radd__(self, other): return other + self._main.values()
## following methods not implemented for values ##
def __delitem__(self, i): raise TypeError('Can\'t delete items from values')
def __iadd__(self, other): raise TypeError('Can\'t add in place to values')
def __imul__(self, n): raise TypeError('Can\'t multiply values in place')
def append(self, item): raise TypeError('Can\'t append items to values')
def insert(self, i, item): raise TypeError('Can\'t insert items into values')
def pop(self, i=-1): raise TypeError('Can\'t pop items from values')
def remove(self, item): raise TypeError('Can\'t remove items from values')
def extend(self, other): raise TypeError('Can\'t extend values')
class SequenceOrderedDict(OrderedDict):
"""
Experimental version of OrderedDict that has a custom object for ``keys``,
``values``, and ``items``.
These are callable sequence objects that work as methods, or can be
manipulated directly as sequences.
Test for ``keys``, ``items`` and ``values``.
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.keys
[1, 2, 3]
>>> d.keys()
[1, 2, 3]
>>> d.setkeys((3, 2, 1))
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.setkeys((1, 2, 3))
>>> d.keys[0]
1
>>> d.keys[:]
[1, 2, 3]
>>> d.keys[-1]
3
>>> d.keys[-2]
2
>>> d.keys[0:2] = [2, 1]
>>> d
SequenceOrderedDict([(2, 3), (1, 2), (3, 4)])
>>> d.keys.reverse()
>>> d.keys
[3, 1, 2]
>>> d.keys = [1, 2, 3]
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.keys = [3, 1, 2]
>>> d
SequenceOrderedDict([(3, 4), (1, 2), (2, 3)])
>>> a = SequenceOrderedDict()
>>> b = SequenceOrderedDict()
>>> a.keys == b.keys
1
>>> a['a'] = 3
>>> a.keys == b.keys
0
>>> b['a'] = 3
>>> a.keys == b.keys
1
>>> b['b'] = 3
>>> a.keys == b.keys
0
>>> a.keys > b.keys
0
>>> a.keys < b.keys
1
>>> 'a' in a.keys
1
>>> len(b.keys)
2
>>> 'c' in d.keys
0
>>> 1 in d.keys
1
>>> [v for v in d.keys]
[3, 1, 2]
>>> d.keys.sort()
>>> d.keys
[1, 2, 3]
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)), strict=True)
>>> d.keys[::-1] = [1, 2, 3]
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.keys[:2]
[3, 2]
>>> d.keys[:2] = [1, 3]
Traceback (most recent call last):
KeyError: 'Keylist is not the same as current keylist.'
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.values
[2, 3, 4]
>>> d.values()
[2, 3, 4]
>>> d.setvalues((4, 3, 2))
>>> d
SequenceOrderedDict([(1, 4), (2, 3), (3, 2)])
>>> d.values[::-1]
[2, 3, 4]
>>> d.values[0]
4
>>> d.values[-2]
3
>>> del d.values[0]
Traceback (most recent call last):
TypeError: Can't delete items from values
>>> d.values[::2] = [2, 4]
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> 7 in d.values
0
>>> len(d.values)
3
>>> [val for val in d.values]
[2, 3, 4]
>>> d.values[-1] = 2
>>> d.values.count(2)
2
>>> d.values.index(2)
0
>>> d.values[-1] = 7
>>> d.values
[2, 3, 7]
>>> d.values.reverse()
>>> d.values
[7, 3, 2]
>>> d.values.sort()
>>> d.values
[2, 3, 7]
>>> d.values.append('anything')
Traceback (most recent call last):
TypeError: Can't append items to values
>>> d.values = (1, 2, 3)
>>> d
SequenceOrderedDict([(1, 1), (2, 2), (3, 3)])
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.items()
[(1, 2), (2, 3), (3, 4)]
>>> d.setitems([(3, 4), (2 ,3), (1, 2)])
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.items[0]
(3, 4)
>>> d.items[:-1]
[(3, 4), (2, 3)]
>>> d.items[1] = (6, 3)
>>> d.items
[(3, 4), (6, 3), (1, 2)]
>>> d.items[1:2] = [(9, 9)]
>>> d
SequenceOrderedDict([(3, 4), (9, 9), (1, 2)])
>>> del d.items[1:2]
>>> d
SequenceOrderedDict([(3, 4), (1, 2)])
>>> (3, 4) in d.items
1
>>> (4, 3) in d.items
0
>>> len(d.items)
2
>>> [v for v in d.items]
[(3, 4), (1, 2)]
>>> d.items.count((3, 4))
1
>>> d.items.index((1, 2))
1
>>> d.items.index((2, 1))
Traceback (most recent call last):
...
ValueError: (2, 1) is not in list
>>> d.items.reverse()
>>> d.items
[(1, 2), (3, 4)]
>>> d.items.reverse()
>>> d.items.sort()
>>> d.items
[(1, 2), (3, 4)]
>>> d.items.append((5, 6))
>>> d.items
[(1, 2), (3, 4), (5, 6)]
>>> d.items.insert(0, (0, 0))
>>> d.items
[(0, 0), (1, 2), (3, 4), (5, 6)]
>>> d.items.insert(-1, (7, 8))
>>> d.items
[(0, 0), (1, 2), (3, 4), (7, 8), (5, 6)]
>>> d.items.pop()
(5, 6)
>>> d.items
[(0, 0), (1, 2), (3, 4), (7, 8)]
>>> d.items.remove((1, 2))
>>> d.items
[(0, 0), (3, 4), (7, 8)]
>>> d.items.extend([(1, 2), (5, 6)])
>>> d.items
[(0, 0), (3, 4), (7, 8), (1, 2), (5, 6)]
"""
def __init__(self, init_val=(), strict=True):
OrderedDict.__init__(self, init_val, strict=strict)
self._keys = self.keys
self._values = self.values
self._items = self.items
self.keys = Keys(self)
self.values = Values(self)
self.items = Items(self)
self._att_dict = {
'keys': self.setkeys,
'items': self.setitems,
'values': self.setvalues,
}
def __setattr__(self, name, value):
"""Protect keys, items, and values."""
if not '_att_dict' in self.__dict__:
object.__setattr__(self, name, value)
else:
try:
fun = self._att_dict[name]
except KeyError:
OrderedDict.__setattr__(self, name, value)
else:
fun(value)
| bsd-3-clause | 3,860,535,775,826,466,300 | 32.248918 | 93 | 0.493306 | false |
drichner/docklr | docklr_tests.py | 1 | 2273 | __author__ = 'drichner'
"""
docklr -- docklr_tests.py
Copyright (C) 2014 Dan Richner
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import os
import run as docklr
import unittest
from appinit import db
from docklrapp.models import Config
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
docklr.app.config['TESTING'] = True
docklr.app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///docklr-test.db"
db.create_all()
self.app = docklr.app.test_client()
def tearDown(self):
os.remove('docklr-test.db')
def test_page(self):
rv = self.app.get('/')
assert 'Docklr Home' in rv.data
def test_etcd(self):
rv = self.app.get('/etcd/')
assert 'etcd Start' in rv.data
def test_add_config(self):
config = Config()
config.cluster_name = "Test Cluster"
config.cluster_etcd_locator_url = "https://discovery.etcd.io/50347750807ecec710d21b67e6b63c88"
db.session.add(config)
db.session.commit()
assert len(Config.query.all()) == 1
rv = self.app.get('/')
assert 'Test Cluster' in rv.data
# helper methods
def getConfigRecord(self):
testconfig = Config.query.first()
if not testconfig:
config = Config()
config.cluster_name = "Test Cluster"
config.cluster_etcd_locator_url = "https://discovery.etcd.io/50347750807ecec710d21b67e6b63c88"
db.session.add(config)
db.session.commit()
return self.getConfigRecord()
return testconfig
if __name__ == '__main__':
unittest.main() | gpl-2.0 | 8,067,488,863,393,696,000 | 27.78481 | 106 | 0.6674 | false |
Rosebotics/pymata-aio | examples/sparkfun_redbot/sparkfun_experiments/Exp5_Bumpers.py | 1 | 2908 | #!/usr/bin/python
"""
Exp5_Bumpers -- RedBot Experiment 5
Now let's experiment with the whisker bumpers. These super-simple switches
let you detect a collision before it really happens- the whisker will
bump something before your robot crashes into it.
This sketch was written by SparkFun Electronics, with lots of help from
the Arduino community.
This code is completely free for any use.
Visit https://learn.sparkfun.com/tutorials/redbot-inventors-kit-guide
for SIK information.
8 Oct 2013 M. Hord
Revised 30 Oct 2014 B. Huang
Revised 2 Oct 2015 L. Mathews
"""
import sys
import signal
from pymata_aio.pymata3 import PyMata3
from library.redbot import RedBotMotors, RedBotBumper
WIFLY_IP_ADDRESS = None # Leave set as None if not using WiFly
WIFLY_IP_ADDRESS = "137.112.217.88" # If using a WiFly on the RedBot, set the ip address here.
if WIFLY_IP_ADDRESS:
board = PyMata3(ip_address=WIFLY_IP_ADDRESS)
else:
# Use a USB cable to RedBot or an XBee connection instead of WiFly.
COM_PORT = None # Use None for automatic com port detection, or set if needed i.e. "COM7"
board = PyMata3(com_port=COM_PORT)
# Instantiate the motor control object. This only needs to be done once.
motors = RedBotMotors(board)
left_bumper = RedBotBumper(board, 3) # initializes bumper object on pin 3
right_bumper = RedBotBumper(board, 11) # initializes bumper object on pin 11
BUTTON_PIN = 12
def signal_handler(sig, frame):
"""Helper method to shutdown the RedBot if Ctrl-c is pressed"""
print('\nYou pressed Ctrl+C')
if board is not None:
board.send_reset()
board.shutdown()
sys.exit(0)
def setup():
signal.signal(signal.SIGINT, signal_handler)
print("Experiment 5 - Bump sensors")
def loop():
motors.drive(255)
board.sleep(0.1) # When using a wireless connection a small sleep is necessary
left_bumper_state = left_bumper.read()
right_bumper_state = right_bumper.read()
if left_bumper_state == 0: # left bumper is bumped
print("Left bump")
reverse()
turn_right()
if right_bumper_state == 0: # left bumper is bumped
print("Right bump")
reverse()
turn_left()
def reverse():
"""backs up at full power"""
motors.drive(-255)
board.sleep(0.5)
motors.brake()
board.sleep(0.1)
def turn_right():
"""turns RedBot to the Right"""
motors.left_motor(-150) # spin CCW
motors.right_motor(-150) # spin CCW
board.sleep(0.5)
motors.brake();
board.sleep(0.1) # short delay to let robot fully stop
def turn_left():
"""turns RedBot to the Left"""
motors.left_motor(150) # spin CCW
motors.right_motor(150) # spin CCW
board.sleep(0.5)
motors.brake();
board.sleep(0.1) # short delay to let robot fully stop
if __name__ == "__main__":
setup()
while True:
loop()
| gpl-3.0 | 8,720,228,147,168,475,000 | 25.436364 | 95 | 0.671939 | false |
pgr-me/metis_projects | 04-marijuana/library/utilities.py | 1 | 3623 | from nytimesarticle import articleAPI
import collections
import math
import datetime
import re
import pickle
# api keys
prasmuss = '7b4597b0dc6845688a8f90c00f3e60b6'
peter_gray_rasmussen = '67391c8a5c6c2d8926eb3d9c5d136c59:7:72273330'
proton = 'f8c34c7cda7848f997a9c273815d28a9'
api = articleAPI(proton)
def convert(data):
'''
this function encodes dictionary of unicode entries into utf8
from http://stackoverflow.com/questions/1254454/fastest-way-to-convert-a-dicts-keys-values-from-unicode-to-str
'''
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
def get_nyt_article_stats(articles_and_meta):
'''
returns the number of hits, number of hits in 100 pages, and hits per page
'''
num_hits = articles_and_meta['response']['meta']['hits'] # total number of articles for query
hits_per_query_set = articles_and_meta['response']['meta']['offset'] # each query gets up to 100 pages
hits_per_page = len(articles_and_meta['response']['docs']) # hits per page
pages = hits_per_query_set / hits_per_page
queries = int(math.ceil(num_hits / float(hits_per_page)))
return num_hits, hits_per_query_set, pages, hits_per_page, queries
def get_last_date_plus_one(articles_and_meta):
"""
returns last (not necessarily most recent) date
"""
date_li = articles_and_meta['response']['docs'][-1]['pub_date'].split('T')[0].split('-')
date_str = ''.join(date_li)
date_date = datetime.datetime.strptime(date_str, '%Y%m%d').date()
date_date_plus_one = str(date_date + datetime.timedelta(days=1))
output = re.sub('-', '', date_date_plus_one)
return output
def extract_to_mongod(query, date_begin, date_end, mdb):
"""
pings nyt api and writes to mongodb
"""
data_converted = None
while data_converted is None:
try:
data = api.search(q=query, begin_date=date_begin, end_date='20160430', sort='oldest')
data_converted = convert(data) # convert unicode to strings
except:
pass
date_date = datetime.datetime.strptime(date_begin, '%Y%m%d').date()
date_date_plus_one = str(date_date + datetime.timedelta(days=1))
date_begin = re.sub('-', '', date_date_plus_one)
stats = get_nyt_article_stats(data_converted) # outputs key stats from first ping
pings = stats[-1] # number of pings required
pings_list = range(0, pings - 1)
d_begin = date_begin
for ping in pings_list:
print d_begin
# get data from api
try:
data2 = api.search(q=query, begin_date=d_begin, end_date='20160430', sort='oldest')
data_converted2 = convert(data2) # convert unicode to strings
last_date_plus_one = get_last_date_plus_one(data_converted2)
mdb.insert_one(data_converted2) # insert one set of articles into db
d_begin = last_date_plus_one # update date
except:
date_date = datetime.datetime.strptime(d_begin, '%Y%m%d').date()
date_date_plus_one = str(date_date + datetime.timedelta(days=1))
d_begin = re.sub('-', '', date_date_plus_one)
return 'success'
def pickle_mongo(collection_name, filename):
cur = collection_name.find()
l = []
for doc in cur:
l.append(doc)
file_name = filename + '.pickle'
with open (file_name, 'wb') as f:
pickle.dump(l, f) | gpl-3.0 | 5,287,285,362,401,634,000 | 35.979592 | 114 | 0.645598 | false |
gmelenka/BraidedCompositeDesign | main.py | 1 | 49822 | from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty
from kivy.uix.spinner import Spinner
from kivy.uix.listview import ListItemButton
from kivy.uix.widget import Widget
from kivy.graphics import Color, Line, Rectangle
from kivy.metrics import dp
from kivy.vector import Vector
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from thumbchooser import FileChooserThumbView
from kivy.core.window import Window
from os import listdir
from os.path import dirname, join
import os
import glob
import math
import shutil
#version required for Buildozer
__version__ = "1.0"
__author__ = "Garrett Melenka, Marcus Ivey"
__copyright__ = "Copyright 2015, The Multipurpose Composites Group- University of Alberta"
__credits__ = ["Garrett Melenka", "Marcus Ivey", "Jason Carey"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Garrett Melenka"
__email__ = "[email protected]"
__status__ = "Production"
class MainScreen(FloatLayout):
pass
#Load dialog for angle measure popup window
class LoadDialog(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
#make sure image file have been selected
def fileSelect(self, path, name):
print name
if name:
self.load(path, name)
#Angle Measurement layout
class AngleLayout(BoxLayout):
def dismiss_popup(self):
self._popup.dismiss()
def show_load(self):
content = LoadDialog(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Select Image", content=content, size_hint=(0.99, 0.99))
self._popup.open()
def load(self, path, filename):
filename = filename[0]
image = self.ids['image']
image.source = filename
self.dismiss_popup()
def initiate_angle_finder(self):
button = self.ids['toggle']
image = self.ids['scatter']
anchor = self.ids['anchor']
if button.state == 'down':
image.do_scale = False
image.do_rotation = False
image.do_translation_x = False
image.do_translation_y = False
button.text = 'Clear'
angleFinder = AngleFinder()
anchor.add_widget(angleFinder)
if button.state == 'normal':
button.text = 'Draw'
image.do_scale = True
image.do_rotation = True
image.do_translation_x = True
image.do_translation_y = True
anchor.clear_widgets()
def reset_image(self):
button = self.ids['toggle']
anchor = self.ids['anchor']
scatter = self.ids['scatter']
image = self.ids['image']
anchor2 = self.ids['anchor2']
scatter.scale = 1
scatter.rotation = 0
scatter.pos = (anchor2.center_x - image.center_x, anchor2.center_y - image.center_y)
button.state = 'normal'
button.text = 'Draw'
scatter.do_scale = True
scatter.do_rotation = True
scatter.do_translation_x = True
scatter.do_translation_y = True
anchor.clear_widgets()
#This is the angle finder widget that allows drawing two straight lines and calculates the minor angle between the two.
class AngleFinder(Widget):
#This function initializes the widget with a touch count of zero
def __init__(self, **kwargs):
super(AngleFinder, self).__init__(**kwargs)
self.touch_down_count = 0
#This function defines the actions that take place when a touch event occurs
def on_touch_down(self, touch):
#Record the touch coordinates in x and y as variables
x1 = touch.x
y1 = touch.y
#when the touch count is 0 or 1, we will record the touch coordinates and draw a crosshair at the touch location
if self.touch_down_count > 1:
return
with self.canvas:
touch.ud['label'] = TextInput()
self.initiate_touch_label(touch.ud['label'], touch)
self.add_widget(touch.ud['label'])
#save the touch points to the user dictionary
touch.ud['x1'] = x1
touch.ud['y1'] = y1
#set parameters for crosshair display
Color(1, 0, 0)
l = dp(25)
w = dp(1)
#draw crosshair
Rectangle(pos=(touch.ud['x1'] - w / 2, touch.ud['y1'] - l / 2), size=(w, l))
Rectangle(pos=(touch.ud['x1'] - l / 2, touch.ud['y1'] - w / 2), size=(l, w))
#Initialize the vector v1
if self.touch_down_count == 0:
#Record the touch coordinates to variables
x2 = touch.x
y2 = touch.y
#Save touch coordinates to the user dictionary
touch.ud['x2'] = x2
touch.ud['y2'] = y2
#When the touch count is zero (first touch), we define a vector v1 based on the touch positions in ud
v1 = (touch.ud['x2'] - touch.ud['x1'], touch.ud['y2'] - touch.ud['y1'])
self.v1 = v1
#Function to define what happens on a drag action
def on_touch_move(self, touch):
#Record the touch coordinates to variables
x2 = touch.x
y2 = touch.y
#Save touch coordinates to the user dictionary
touch.ud['x2'] = x2
touch.ud['y2'] = y2
ud = touch.ud
#define a group, g, that will be assigned to the line drawn to allow the line to be redrawn as movements occur, leaving only one line on the screen
ud['group'] = g = str(touch.uid)
self.canvas.remove_group(g)
#When the touch count is zero (first touch), we define a vector v1 based on the touch positions in ud
if self.touch_down_count == 0:
v1 = (touch.ud['x2'] - touch.ud['x1'], touch.ud['y2'] - touch.ud['y1'])
self.v1 = v1
#When the touch count is 1 (second touch), we define a vector v2 based on the touch positions in ud. The angle between vectors v1 and v2 is then calculated.
if self.touch_down_count == 1:
v2 = (touch.ud['x2'] - touch.ud['x1'], touch.ud['y2'] - touch.ud['y1'])
self.v2 = v2
angle = Vector(self.v1).angle(self.v2)
absoluteAngle = abs(angle)
#The following if statement is used to ensure the minor angle is always calculated
if absoluteAngle > 90:
absoluteAngle = 180 - absoluteAngle
#The next two lines are used to update the angle label value as the lines are moved around
touch.ud['angle'] = absoluteAngle
self.update_touch_label(touch.ud['label'], touch)
#If the touch count is greater than 1 (third touch), then this function will end and the canvas will clear as in the previous function
if self.touch_down_count > 1:
return
#This defines the line and crosshair that is drawn between the initial touch point and where the finger has been dragged
with self.canvas:
Color(1, 0, 0)
l = dp(25)
w = dp(1)
Line(points=[touch.ud['x1'], touch.ud['y1'], x2, y2], width=w, group=g)
Rectangle(pos=(touch.ud['x2'] - w / 2, touch.ud['y2'] - l / 2), size=(w, l), group=g)
Rectangle(pos=(touch.ud['x2'] - l / 2, touch.ud['y2'] - w / 2), size=(l, w), group=g)
#this function defines what to do when a touch is released. The touch count is simply incremented
def on_touch_up(self, touch):
self.touch_down_count += 1
#This function defines how the angle label is to be updated. It indicates the number of digits to show, the label size and position, color, and font type
def update_touch_label(self, label, touch):
degree = unichr(176)
label.text = '%.1f%s' % ((touch.ud['angle']), degree)
label.pos = (self.center_x - dp(40), self.height + dp(70))
label.font_size = dp(24)
label.size = dp(75), dp(40)
label.padding_x = [dp(10), dp(10)]
label.padding_y = [dp(5), dp(5)]
label.readonly = True
label.multiline = False
def initiate_touch_label(self, label, touch):
degree = unichr(176)
label.text = '%s%s' % ('---', degree)
label.pos = (self.center_x - dp(40), self.height + dp(70))
label.font_size = dp(24)
label.size = dp(75), dp(40)
label.padding_x = [dp(10), dp(10)]
label.padding_y = [dp(5), dp(5)]
label.readonly = True
label.multiline = False
pass
#Layout for static about screen
class About_Screen(FloatLayout):
pass
#About popup for Micromechanics window
class MicroMechanicsAbout(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
#Calculation of micromechanics properties for a unidirectional lamina
class MicroMechanics(FloatLayout):
def dismiss_popup(self):
self._popup.dismiss()
def AboutMicromechanics(self):
content = MicroMechanicsAbout(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Micro-mechanics About", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def load(self):
pass
def fiberSelect(self):
fiberType = self.ids.yarnSelectSpinner.text
#E-glass properties
if fiberType == "E-Glass":
self.ids.longitudinalModulus.text = str(73.0)
self.ids.transverseModulus.text = str(73.0)
self.ids.shearModulus.text = str(30.0)
self.ids.majorPoissonRatio.text = str(0.23)
#S-glass properties
if fiberType == "S-Glass":
self.ids.longitudinalModulus.text = str(86.0)
self.ids.transverseModulus.text = str(86.0)
self.ids.shearModulus.text = str(35)
self.ids.majorPoissonRatio.text = str(0.23)
#AS4 Carbon fiber properties
if fiberType == "AS4-Carbon":
self.ids.longitudinalModulus.text = str(235.0)
self.ids.transverseModulus.text = str(15)
self.ids.shearModulus.text = str(27)
self.ids.majorPoissonRatio.text = str(0.20)
#T300 Carbon fiber properties
if fiberType == "T300 Carbon":
self.ids.longitudinalModulus.text = str(230.0)
self.ids.transverseModulus.text = str(15)
self.ids.shearModulus.text = str(27)
self.ids.majorPoissonRatio.text = str(0.20)
#Boron fiber properties
if fiberType == "Boron":
self.ids.longitudinalModulus.text = str(395.0)
self.ids.transverseModulus.text = str(395)
self.ids.shearModulus.text = str(165)
self.ids.majorPoissonRatio.text = str(0.13)
#Kevlar 49 fiber properties
if fiberType == "Kevlar 49":
self.ids.longitudinalModulus.text = str(131.0)
self.ids.transverseModulus.text = str(7)
self.ids.shearModulus.text = str(21)
self.ids.majorPoissonRatio.text = str(0.33)
#
if fiberType == "Custom":
self.ids.longitudinalModulus.text = str(100.0)
self.ids.transverseModulus.text = str(10)
self.ids.shearModulus.text = str(20)
self.ids.majorPoissonRatio.text = str(0.20)
def matrixSelect(self):
matrixType = self.ids.matrixSelectSpinner.text
#Epoxy mechanical properties
if matrixType == "Epoxy":
self.ids.matrixModulus.text = str(4.3)
self.ids.matrixShearModulus.text = str(1.6)
self.ids.matrixPoissonRatio.text = str(0.35)
#polyester mechanical properties
if matrixType == "Polyester":
self.ids.matrixModulus.text = str(3.2)
self.ids.matrixShearModulus.text = str(0.7)
self.ids.matrixPoissonRatio.text = str(0.35)
#Polyimides mechanical properties
if matrixType == "Polyimides":
self.ids.matrixModulus.text = str(1.4)
self.ids.matrixShearModulus.text = str(3.1)
self.ids.matrixPoissonRatio.text = str(0.35)
#PEEK mechanical properties
if matrixType == "PEEK":
self.ids.matrixModulus.text = str(1.32)
self.ids.matrixShearModulus.text = str(3.7)
self.ids.matrixPoissonRatio.text = str(0.35)
def CalculateVF(self, data):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn' and matrixVal != 'Select Matrix':
Ef1 = float(self.ids.longitudinalModulus.text)
Ef2 = float(self.ids.transverseModulus.text)
Gf12 = float(self.ids.shearModulus.text)
nuf12 = float(self.ids.majorPoissonRatio.text)
VF = float(self.ids.volumeFraction.text)
Em = float(self.ids.matrixModulus.text)
Gm = float(self.ids.matrixShearModulus.text)
num = float(self.ids.matrixPoissonRatio.text)
if VF>=0 and VF <1:
#Calculate Longitudinal Elastic Modulus
E1 = Ef1 * VF + Em * (1-VF)
#Calculate Transverse Elastic Modulus
E2 = Ef2*Em / (Ef2*(1-VF) + Em*VF)
#Calculate Major Poisson's Ratio
nu12 = nuf12*VF + num*(1-VF)
#Calculate Shear Modulus
G12 = Gf12*Gm / (Gm * VF+ Gf12 * (1 - VF))
#Write values to screen
self.ids.modulusE1.text = '{0:.3f}'.format(E1)
self.ids.modulusE2.text = '{0:.3f}'.format(E2)
self.ids.modulusG12.text = '{0:.3f}'.format(G12)
self.ids.poissonNu12.text = '{0:.3f}'.format(nu12)
def volumeFractionUp(self):
VF = float(self.ids.volumeFraction.text)
VF_new = VF + 0.1
if VF_new <= 1.0:
self.ids.volumeFraction.text = str(VF_new)
def volumeFractionDown(self):
VF = float(self.ids.volumeFraction.text)
VF_new = VF - 0.1
if VF_new > 0.0:
self.ids.volumeFraction.text = str(VF_new)
def EF1Up(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
EF1 = float(self.ids.longitudinalModulus.text)
EF1_new = EF1 + 1.0
#if EF1_new <= 1.0:
self.ids.longitudinalModulus.text = str(EF1_new)
def EF1Down(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
EF1 = float(self.ids.longitudinalModulus.text)
EF1_new = EF1 - 1.0
if EF1_new > 0.0:
self.ids.longitudinalModulus.text = str(EF1_new)
def EF2Up(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
EF2 = float(self.ids.transverseModulus.text)
EF2_new = EF2 + 1.0
#if EF1_new <= 1.0:
self.ids.transverseModulus.text = str(EF2_new)
def EF2Down(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
EF2 = float(self.ids.transverseModulus.text)
EF2_new = EF2 - 1.0
if EF2_new > 0.0:
self.ids.transverseModulus.text = str(EF2_new)
def GF12Up(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
GF12 = float(self.ids.shearModulus.text)
GF12_new = GF12 + 1.0
#if EF1_new <= 1.0:
self.ids.shearModulus.text = str(GF12_new)
def GF12Down(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
GF12 = float(self.ids.shearModulus.text)
GF12_new = GF12 - 1.0
if GF12_new > 0.0:
self.ids.shearModulus.text = str(GF12_new)
def nuf12Up(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
nuf12 = float(self.ids.majorPoissonRatio.text)
nuf12_new = nuf12 + 0.1
#if EF1_new <= 1.0:
self.ids.majorPoissonRatio.text = str(nuf12_new)
def nuf12Down(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
nuf12 = float(self.ids.majorPoissonRatio.text)
nuf12_new = nuf12 - 0.1
if nuf12_new > 0.0:
self.ids.majorPoissonRatio.text = str(nuf12_new)
def EmUp(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Em = float(self.ids.matrixModulus.text)
Em_new = Em + 0.1
#if EF1_new <= 1.0:
self.ids.matrixModulus.text = str(Em_new)
def EmDown(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Em = float(self.ids.matrixModulus.text)
Em_new = Em - 0.1
if Em_new > 0.0:
self.ids.matrixModulus.text = str(Em_new)
def GmUp(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Gm = float(self.ids.matrixShearModulus.text)
Gm_new = Gm + 0.1
#if EF1_new <= 1.0:
self.ids.matrixShearModulus.text = str(Gm_new)
def GmDown(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Gm = float(self.ids.matrixShearModulus.text)
Gm_new = Gm - 0.1
if Gm_new > 0.0:
self.ids.matrixShearModulus.text = str(Gm_new)
def NuMUp(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Num = float(self.ids.matrixPoissonRatio.text)
Num_new = Num + 0.1
#if EF1_new <= 1.0:
self.ids.matrixPoissonRatio.text = str(Num_new)
def NuMDown(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Num = float(self.ids.matrixPoissonRatio.text)
Num_new = Num - 0.1
if Num_new > 0.0:
self.ids.matrixPoissonRatio.text = str(Num_new)
#About popup for lamina strength window
class LaminaStrengthAbout(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
#Calculation of lamina strength properties of a unidirectional lamina
class LaminaStrength(BoxLayout):
def dismiss_popup(self):
self._popup.dismiss()
def AboutStrength(self):
content = LaminaStrengthAbout(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Lamina Strength About", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def load(self):
pass
def fiberSelect(self):
fiberType = self.ids.yarnSelectSpinner.text
#E-glass properties
if fiberType == "E-Glass":
self.ids.fiberStrength.text = str(3450.0)
self.ids.fiberModulus.text = str(73.0)
#S-glass properties
if fiberType == "S-Glass":
self.ids.fiberStrength.text = str(4500.0)
self.ids.fiberModulus.text = str(86.0)
#AS4 Carbon fiber properties
if fiberType == "AS4-Carbon":
self.ids.fiberStrength.text = str(3700.0)
self.ids.fiberModulus.text = str(235.0)
#T300 Carbon fiber properties
if fiberType == "T300 Carbon":
self.ids.fiberStrength.text = str(3100.0)
self.ids.fiberModulus.text = str(230.0)
#Boron fiber properties
if fiberType == "Boron":
self.ids.fiberStrength.text = str(3450.0)
self.ids.fiberModulus.text = str(395.0)
#Kevlar 49 fiber properties
if fiberType == "Kevlar 49":
self.ids.fiberStrength.text = str(3800.0)
self.ids.fiberModulus.text = str(131.0)
#
if fiberType == "Custom":
self.ids.fiberStrength.text = str(1000.0)
self.ids.fiberModulus.text = str(100.0)
def matrixSelect(self):
matrixType = self.ids.matrixSelectSpinner.text
#Epoxy mechanical properties
if matrixType == "Epoxy":
self.ids.matrixModulus.text = str(4.3)
self.ids.matrixShearModulus.text = str(1.6)
#self.ids.matrixPoissonRatio.text = str(0.35)
#polyester mechanical properties
if matrixType == "Polyester":
self.ids.matrixModulus.text = str(3.2)
self.ids.matrixShearModulus.text = str(0.7)
#self.ids.matrixPoissonRatio.text = str(0.35)
#Polyimides mechanical properties
if matrixType == "Polyimides":
self.ids.matrixModulus.text = str(1.4)
self.ids.matrixShearModulus.text = str(3.1)
#self.ids.matrixPoissonRatio.text = str(0.35)
#PEEK mechanical properties
if matrixType == "PEEK":
self.ids.matrixModulus.text = str(1.32)
self.ids.matrixShearModulus.text = str(3.7)
#self.ids.matrixPoissonRatio.text = str(0.35)
#Calculate the strength properties of a composite lamina
def CalculateLaminaStrength(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if yarnVal != 'Select Yarn' and matrixVal != 'SelectMatrix':
Ef = float(self.ids.fiberModulus.text)
Em = float(self.ids.matrixModulus.text)
Gm = float(self.ids.matrixShearModulus.text)
Vf = float(self.ids.volumeFraction.text)
sigmaF = float(self.ids.fiberStrength.text)
if Ef > 0:
if Vf >=0 and Vf <1:
#Calculate ultimate tensile strength
sigmaFiber = sigmaF * Vf + sigmaF * (Em / Ef) * (1-Vf)
self.ids.tensileStrength.text = '{0:.1f}'.format(sigmaFiber)
#Calculate ultimate compressive strength
num = Vf * Em * Ef
den = 3 * (1 - Vf)
sqrt = math.sqrt(num / den)
sigmaC = 2 * Vf * sqrt
self.ids.compressiveStrength.text = '{0:.1f}'.format(sigmaC)
#Calculate ultimate compressive strength shear mode
sigmaCshear = (Gm / (1 - Vf)) * 1000
self.ids.compressiveStrengthShear.text = '{0:.1f}'.format(sigmaCshear)
def volumeFractionUp(self):
VF = float(self.ids.volumeFraction.text)
VF_new = VF + 0.1
if VF_new <= 1.0:
self.ids.volumeFraction.text = str(VF_new)
def volumeFractionDown(self):
VF = float(self.ids.volumeFraction.text)
VF_new = VF - 0.1
if VF_new > 0.0:
self.ids.volumeFraction.text = str(VF_new)
def fiberModulusUp(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if yarnVal != 'Select Yarn':
EF = float(self.ids.fiberModulus.text)
EF_new = EF + 1.0
#if EF_new <= 1.0:
self.ids.fiberModulus.text = str(EF_new)
def fiberModulusDown(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if yarnVal != 'Select Yarn':
EF = float(self.ids.fiberModulus.text)
EF_new = EF - 1.0
if EF_new > 0.0:
self.ids.fiberModulus.text = str(EF_new)
def fiberStrengthUp(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if yarnVal != 'Select Yarn':
SF = float(self.ids.fiberStrength.text)
SF_new = SF + 10.0
#if EF_new <= 1.0:
self.ids.fiberStrength.text = str(SF_new)
def fiberStrengthDown(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if yarnVal != 'Select Yarn':
SF = float(self.ids.fiberStrength.text)
SF_new = SF - 10.0
if SF_new > 0.0:
self.ids.fiberStrength.text = str(SF_new)
def matrixModulusUp(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if matrixVal != 'Select Matrix':
EM = float(self.ids.matrixModulus.text)
EM_new = EM + 1.0
#if EF_new <= 1.0:
self.ids.matrixModulus.text = str(EM_new)
def matrixModulusDown(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if matrixVal != 'Select Matrix':
EM = float(self.ids.matrixModulus.text)
EM_new = EM - 1.0
if EM_new > 0.0:
self.ids.matrixModulus.text = str(EM_new)
def matrixShearModulusUp(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if matrixVal != 'Select Matrix':
GM = float(self.ids.matrixShearModulus.text)
GM_new = GM + 1.0
#if EF_new <= 1.0:
self.ids.matrixShearModulus.text = str(GM_new)
def matrixShearModulusDown(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if matrixVal != 'Select Matrix':
GM = float(self.ids.matrixShearModulus.text)
GM_new = GM - 1.0
if GM_new > 0.0:
self.ids.matrixShearModulus.text = str(GM_new)
#cooridnate system transfomation matrix
class CoordinateTransform(BoxLayout):
def CalculateTransform(self):
angle = float(self.ids.braidAngle.text)
angleRad = angle * (math.pi / 180)
T11 = math.cos(angleRad) ** 2
T12 = math.cos(angleRad) ** 2
T13 = 2*math.cos(angleRad) * math.sin(angleRad)
T21 = T12
T22 = math.sin(angleRad) ** 2
T23 = -T13
T31 = math.cos(angleRad) * math.sin(angleRad)
T32 = -T31
T33 = (math.cos(angleRad) ** 2) - (math.sin(angleRad) ** 2)
T11inv = T11
T12inv = T12
T13inv = -T13
T21inv = T12inv
T22inv = T22
T23inv = T13
T31inv = -T31
T32inv = -T31inv
T33inv = T33
#Display transformation matrix to screen
#format output for 3 digits after decimal place
self.ids.T11.text = '{0:.3f}'.format(T11)
self.ids.T12.text = '{0:.3f}'.format(T12)
self.ids.T13.text = '{0:.3f}'.format(T13)
self.ids.T21.text = '{0:.3f}'.format(T21)
self.ids.T22.text = '{0:.3f}'.format(T22)
self.ids.T23.text = '{0:.3f}'.format(T23)
self.ids.T31.text = '{0:.3f}'.format(T31)
self.ids.T32.text = '{0:.3f}'.format(T32)
self.ids.T33.text = '{0:.3f}'.format(T33)
#Display inverse transformation matrix to screen
self.ids.T11inv.text = '{0:.3f}'.format(T11inv)
self.ids.T12inv.text = '{0:.3f}'.format(T12inv)
self.ids.T13inv.text = '{0:.3f}'.format(T13inv)
self.ids.T21inv.text = '{0:.3f}'.format(T21inv)
self.ids.T22inv.text = '{0:.3f}'.format(T22inv)
self.ids.T23inv.text = '{0:.3f}'.format(T23inv)
self.ids.T31inv.text = '{0:.3f}'.format(T31inv)
self.ids.T32inv.text = '{0:.3f}'.format(T32inv)
self.ids.T33inv.text = '{0:.3f}'.format(T33inv)
def AngleDown(self):
angle = float(self.ids.braidAngle.text)
angleNew = angle - 1.0
self.ids.braidAngle.text = '{0:.1f}'.format(angleNew)
def AngleUp(self):
angle = float(self.ids.braidAngle.text)
angleNew = angle + 1.0
self.ids.braidAngle.text = '{0:.1f}'.format(angleNew)
#Calculation of braid manufacturing parameters using input braid geometry and braid machine kinematics
class BraidManufacturingAbout(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
class BraidManufacturing(BoxLayout):
def dismiss_popup(self):
self._popup.dismiss()
def AboutManufacturing(self):
content = BraidManufacturingAbout(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Braid Manufacturing About", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def load(self):
pass
def CalculateManufacture(self):
R = float(self.ids.radius.text)
mandrelVelocity = float(self.ids.mandrelVelocity.text)
rotationalVelocity = float(self.ids.carrierSpeed.text)
Wy = float(self.ids.yarnWidth.text)
gamma = math.radians(float(self.ids.halfConeAngle.text))
N = float(self.ids.carriers.text)
#Calculate braid angle from mandrel/ carrier speed
rho = 2 * math.pi * R * (mandrelVelocity / rotationalVelocity)
angle = math.atan(rho) * (180 / math.pi)
self.ids.braidAngle.text = '{0:.1f}'.format(angle)
#Calculate Braid Jam Angle
numerator = Wy * math.sin(gamma)
denominator = 2 * R * math.sin(2 * math.pi * math.sin(gamma) / N)
thetaJammed = (math.acos(numerator / denominator)) * (180 / math.pi)
self.ids.braidJamAngle.text = '{0:.1f}'.format(thetaJammed)
#Calculate Yarn Undulation and Shift angle
#angle = float(self.ids.braidAngle.text)
beta = 2 * math.pi / N
Angle = math.radians(angle)
Lund = R*beta / math.sin(Angle)
self.ids.yarnUndulation.text = '{0:.3f}'.format(Lund)
self.ids.shiftAngle.text = '{0:.3f}'.format(beta)
def RadiusDown(self):
radius = float(self.ids.radius.text)
radiusNew = radius - 0.1
if radiusNew > 0.0:
self.ids.radius.text = '{0:.1f}'.format(radiusNew)
def RadiusUp(self):
radius = float(self.ids.radius.text)
radiusNew = radius + 0.1
self.ids.radius.text = '{0:.1f}'.format(radiusNew)
def yarnWidthDown(self):
yarnWidth = float(self.ids.yarnWidth.text)
yarnWidthNew = yarnWidth - 0.1
if yarnWidthNew > 0.0:
self.ids.yarnWidth.text = '{0:.1f}'.format(yarnWidthNew)
def yarnWidthUp(self):
yarnWidth = float(self.ids.yarnWidth.text)
yarnWidthNew =yarnWidth + 0.1
self.ids.yarnWidth.text = '{0:.1f}'.format(yarnWidthNew)
def CarriersDown(self):
carriers = int(self.ids.carriers.text)
carriersNew = carriers - 1
if carriersNew > 0:
self.ids.carriers.text = '{0:d}'.format(carriersNew)
def CarriersUp(self):
carriers = int(self.ids.carriers.text)
carriersNew =carriers + 1
self.ids.carriers.text = '{0:d}'.format(carriersNew)
def mandrelVelocityDown(self):
mandrelVelocity = float(self.ids.mandrelVelocity.text)
mandrelVelocityNew = mandrelVelocity - 1.0
if mandrelVelocityNew > 0.0:
self.ids.mandrelVelocity.text = '{0:.1f}'.format(mandrelVelocityNew)
def mandrelVelocityUp(self):
mandrelVelocity = float(self.ids.mandrelVelocity.text)
mandrelVelocityNew =mandrelVelocity + 1.0
self.ids.mandrelVelocity.text = '{0:.1f}'.format(mandrelVelocityNew)
def carrierSpeedDown(self):
carrierSpeed = float(self.ids.carrierSpeed.text)
carrierSpeedNew = carrierSpeed - 1.0
if carrierSpeedNew > 0.0:
self.ids.carrierSpeed.text = '{0:.1f}'.format(carrierSpeedNew)
def carrierSpeedUp(self):
carrierSpeed = float(self.ids.carrierSpeed.text)
carrierSpeedNew = carrierSpeed + 1.0
self.ids.carrierSpeed.text = '{0:.1f}'.format(carrierSpeedNew)
def halfConeAngleDown(self):
halfConeAngle = float(self.ids.halfConeAngle.text)
halfConeAngleNew = halfConeAngle - 1.0
if halfConeAngleNew > 0.0:
self.ids.halfConeAngle.text = '{0:.1f}'.format(halfConeAngleNew)
def halfConeAngleUp(self):
halfConeAngle = float(self.ids.halfConeAngle.text)
halfConeAngleNew = halfConeAngle + 1.0
self.ids.halfConeAngle.text = '{0:.1f}'.format(halfConeAngleNew)
class ScreenMenu(Spinner):
pass
class MainBar(BoxLayout):
pass
class MachineSetupAbout(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
#Visual guide for the setup of a braiding machine to produce different braiding patterns
class MachineSetup(BoxLayout):
def dismiss_popup(self):
self._popup.dismiss()
def AboutMachineSetup(self):
content = MachineSetupAbout(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Braid Machine Setup About", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def load(self):
pass
fileNames = 'None'
def patternSelect(self):
braidPattern = self.ids.braidPatternSpinner.text
curdir = os.path.dirname(os.path.realpath(__file__))
braidFileType = "*.jpg"
startImg = 0
if braidPattern == "Diamond Full":
DiamondBraid = '\Diamond_FullLoad\BraidMachine_V3_Step01-01.tif'
folder = "Diamond_FullLoad"
pathName = os.path.join(curdir, folder)
names = os.path.join(pathName, braidFileType)
fileNames = sorted(glob.glob(names))
self.ids.patternImage.source = fileNames[startImg]
numImg = len(fileNames)
self.ids.imageProgressBar.max = numImg - 1
self.ids.imageProgressBar.value = startImg
elif braidPattern == "Diamond Half":
braid = "\Diamond_HalfLoad\*.jpg"
folder = "Diamond_HalfLoad"
pathName = os.path.join(curdir, folder)
names = os.path.join(pathName, braidFileType)
fileNames = sorted(glob.glob(names))
self.ids.patternImage.source = fileNames[startImg]
numImg = len(fileNames)
self.ids.imageProgressBar.max = numImg - 1
self.ids.imageProgressBar.value = startImg
elif braidPattern == "Regular Full":
braid = "\RegularFullLoad\*.jpg"
folder = "RegularFullLoad"
pathName = os.path.join(curdir, folder)
names = os.path.join(pathName, braidFileType)
fileNames = sorted(glob.glob(names))
self.ids.patternImage.source = fileNames[startImg]
numImg = len(fileNames)
self.ids.imageProgressBar.max = numImg - 1
self.ids.imageProgressBar.value = startImg
elif braidPattern == "Regular One-Third":
braid = "\RegularThirdLoad\*.jpg"
folder = "RegularThirdLoad"
pathName = os.path.join(curdir, folder)
names = os.path.join(pathName, braidFileType)
fileNames = sorted(glob.glob(names))
self.ids.patternImage.source = fileNames[startImg]
numImg = len(fileNames)
self.ids.imageProgressBar.max = numImg - 1
self.ids.imageProgressBar.value = startImg
elif braidPattern == "Hercules":
braid = "\HerculesHalfLoad\*.jpg"
folder = "HerculesHalfLoad"
pathName = os.path.join(curdir, folder)
names = os.path.join(pathName, braidFileType)
fileNames = sorted(glob.glob(names))
self.ids.patternImage.source = fileNames[startImg]
numImg = len(fileNames)
self.ids.imageProgressBar.max = numImg - 1
self.ids.imageProgressBar.value = startImg
global fileNames, startImg
def backButton(self):
braidPatternVal = self.ids.braidPatternSpinner.text
if braidPatternVal != 'Select Braid Pattern':
global fileNames, startImg
if startImg > 0:
startImg = startImg - 1
self.ids.patternImage.source = fileNames[startImg]
self.ids.imageProgressBar.value = startImg
def forwardButton(self):
braidPatternVal = self.ids.braidPatternSpinner.text
if braidPatternVal != 'Select Braid Pattern':
global fileNames, startImg
numImg = len(fileNames)
if startImg < numImg:
self.ids.patternImage.source = fileNames[startImg]
self.ids.imageProgressBar.value = startImg
startImg = startImg + 1
#print startImg
class VolumeFractionAbout(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
#Volume Fraction and Cover Factor Calculation
class VolumeFraction(BoxLayout):
def dismiss_popup(self):
self._popup.dismiss()
def AboutVolumeFraction(self):
content = VolumeFractionAbout(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Volume Fraction About", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def load(self):
pass
def CalculateVF(self, data):
braidVal = self.ids.braidType.text
yarnShapeVal = self.ids.braidCrossSection.text
if braidVal != 'Braid Type' and yarnShapeVal != "Select Yarn Shape":
#get input values from user
r0 = float(self.ids.braidRadius.text)
yarnWidth = float(self.ids.yarnWidth.text)
yarnThickness = float(self.ids.yarnThickness.text)
numberYarns = float(self.ids.numberYarns.text)
thetaDeg = float(self.ids.theta.text)
theta = math.radians(thetaDeg)
braidType = self.ids.braidType.text
braidCrossSection = self.ids.braidCrossSection.text
#print braidCrossSection
if yarnThickness > 0 and yarnWidth > 0 and numberYarns > 0 and theta > 0 and r0 > 0:
t = 2* yarnThickness
#calculate yarn cross sectional shape
if braidCrossSection == "Ellipse":
yarnArea = math.pi * yarnWidth * 0.5 * yarnThickness * 0.5
elif braidCrossSection == "Circle":
yarnArea = math.pi * (math.pow(yarnWidth * 0.5, 2))
elif braidCrossSection == "Rectangle":
yarnArea = yarnWidth * yarnThickness
if braidType == "Diamond":
jammed = yarnArea * 4 * numberYarns / (2 * math.pi * r0 * t*math.cos(theta))
if jammed <= 1:
Vf = yarnArea * 4 * numberYarns / (2 * math.pi * r0 * t*math.cos(theta))
elif jammed >=1:
Vf = 1
coverJammed = yarnWidth * numberYarns / (math.pi * r0 * math.cos(theta))
if coverJammed <= 1:
CF = coverJammed
elif coverJammed >1:
CF = 1
self.ids.volumeFraction.text = '{0:.3f}'.format(Vf)
self.ids.coverFactor.text = '{0:.3f}'.format(CF)
def ShowBraidPattern(self):
braidPattern = self.ids.braidType.text
if braidPattern == "Diamond":
self.ids.braidPatternImage.source = 'DiamondBraid_45deg.jpg'
elif braidPattern == "Regular":
self.ids.braidPatternImage.source = 'RegularBraid_45deg.jpg'
elif braidPattern == "Hercules":
self.ids.braidPatternImage.source = 'HerculesBraid_45deg.jpg'
#This is the angle finder widget that allows drawing two straight lines and calculates the minor angle between the two.
class Angle(Widget):
#This function initializes the widget with a touch count of zero
def __init__(self, **kwargs):
super(Angle, self).__init__(**kwargs)
self.touch_down_count = 0
#This function defines the actions that take place when a touch event occurs
def on_touch_down(self, touch):
#when touch count = 2, the canvas is cleared, getting rid of the lines and angle
if self.touch_down_count == 2:
self.canvas.clear()
return
#when touch count is greater than 2, we reset the count to zero to allow for new lines to be drawn and measured
if self.touch_down_count > 2:
self.touch_down_count = 0
#Record the touch coordinates in x and y as variables
x1 = touch.x
y1 = touch.y
#create a label on touch and store it in the user dictionary to be accessed later by an update function
touch.ud['label'] = Label(size_hint=(None, None))
#when the touch count is 0 or 1, we will record the touch coordinates and draw a crosshair at the touch location
if self.touch_down_count <= 1:
#add a label widget
self.add_widget(touch.ud['label'])
with self.canvas:
#save the touch points to the user dictionary
touch.ud['x1'] = x1
touch.ud['y1'] = y1
#set parameters for crosshair display
Color(1, 0, 0)
l = dp(40)
w = dp(3)
#draw crosshair
Rectangle(pos=(touch.ud['x1'] - w / 2, touch.ud['y1'] - l / 2), size=(w, l))
Rectangle(pos=(touch.ud['x1'] - l / 2, touch.ud['y1'] - w / 2), size=(l, w))
#Function to define what happens on a drag action
def on_touch_move(self, touch):
#Record the touch coordinates to variables
x2 = touch.x
y2 = touch.y
#Save touch coordinates to the user dictionary
touch.ud['x2'] = x2
touch.ud['y2'] = y2
ud = touch.ud
#define a group, g, that will be assigned to the line drawn to allow the line to be redrawn as movements occur, leaving only one line on the screen
ud['group'] = g = str(touch.uid)
self.canvas.remove_group(g)
#When the touch count is zero (first touch), we define a vector v1 based on the touch positions in ud
if self.touch_down_count == 0:
v1 = (touch.ud['x2'] - touch.ud['x1'], touch.ud['y2'] - touch.ud['y1'])
self.v1 = v1
#When the touch count is 1 (second touch), we define a vector v2 based on the touch positions in ud. The angle between vectors v1 and v2 is then calculated.
if self.touch_down_count == 1:
v2 = (touch.ud['x2'] - touch.ud['x1'], touch.ud['y2'] - touch.ud['y1'])
self.v2 = v2
angle = Vector(self.v1).angle(self.v2)
absoluteAngle = abs(angle)
#The following if statement is used to ensure the minor angle is always calculated
if absoluteAngle > 90:
absoluteAngle = 180 - absoluteAngle
#The next two lines are used to update the angle label value as the lines are moved around
touch.ud['angle'] = absoluteAngle
self.update_touch_label(touch.ud['label'], touch)
#If the touch count is greater than 1 (third touch), then this function will end and the canvas will clear as in the previous function
if self.touch_down_count > 1:
return
#This defines the line and crosshair that is drawn between the initial touch point and where the finger has been dragged
with self.canvas:
Color(1, 0, 0)
l = dp(40)
w = dp(3)
Line(points=[touch.ud['x1'], touch.ud['y1'], x2, y2], width=dp(1.5), group=g)
Rectangle(pos=(touch.ud['x2'] - w / 2, touch.ud['y2'] - l / 2), size=(w, l), group=g)
Rectangle(pos=(touch.ud['x2'] - l / 2, touch.ud['y2'] - w / 2), size=(l, w), group=g)
#this function defines what to do when a touch is released. The touch count is simply incremented
def on_touch_up(self, touch):
self.touch_down_count += 1
#This function defines how the angle label is to be updated. It indicates the number of digits to show, the label size and position, color, and font type
def update_touch_label(self, label, touch):
label.text = '%.3f deg' % (touch.ud['angle'])
label.pos = (self.center_x, self.height - dp(60))
label.font_size = '25 dp'
label.size = 1, 1
label.color = 0, 0, 0, 1
label.bold = 1
class BraidedCompositeDesignApp(App):
def build(self):
self.screen = None
self.root = GridLayout(rows = 2, cols = 1)
self.screen_layout = BoxLayout()
self.menu = ScreenMenu()
self.root.add_widget(self.menu)
self.root.add_widget(self.screen_layout)
self.menu.bind(text=self.select_screen)
self.show('Main')
#control window size for screen shots
Window.size= (360,640)
return self.root
def select_screen(self, *args):
self.show(self.menu.text)
def ensure_dir(self,f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
curdir = os.path.dirname(os.path.realpath(__file__))
print 'current dir', curdir
img1 = "Test Images/pic1.jpg"
img2 = "Test Images/pic2.jpg"
img3 = "Test Images/pic3.jpg"
img4 = "Test Images/pic4.jpg"
img5 = "Test Images/pic5.jpg"
name1 = os.path.join(curdir, img1)
name2 = os.path.join(curdir, img2)
name3 = os.path.join(curdir, img3)
name4 = os.path.join(curdir, img4)
name5 = os.path.join(curdir, img5)
shutil.copy(name1, d)
shutil.copy(name2, d)
shutil.copy(name3, d)
shutil.copy(name4, d)
shutil.copy(name5, d)
def on_pause(self):
return True
def on_resume(self):
pass
def show(self, name='Main'):
if self.screen is not None:
self.screen_layout.remove_widget(self.screen)
self.screen = None
if name == 'Main':
screen = MainScreen()
elif name == 'Micromechanics':
screen = MicroMechanics()
elif name == 'Lamina Strength':
screen = LaminaStrength()
elif name == 'CS Transform':
screen = CoordinateTransform()
elif name == 'Braid Manufacturing':
screen = BraidManufacturing()
#screen = braidManufacture()
elif name == 'Volume Fraction':
screen = VolumeFraction()
elif name == 'Angle':
#check to see if directory is available, if not create new directory and load test images
#into this directory
filename = "/sdcard/Pictures/BraidedCompositeApp/TestImages/"
self.ensure_dir(filename)
screen = AngleLayout()
elif name == 'Braid Machine Setup':
screen = MachineSetup()
elif name == 'About':
screen = About_Screen()
else:
raise Exception('Invalid screen name')
self.screen = screen
self.screen_layout.add_widget(screen)
if __name__ == "__main__":
BraidedCompositeDesignApp().run()
| mit | 7,503,996,245,676,990,000 | 37.383667 | 164 | 0.571615 | false |
indianajohn/ycmd | ycmd/tests/server_utils_test.py | 1 | 1633 | # Copyright (C) 2016 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from hamcrest import raises, assert_that, calling
from nose.tools import ok_
from ycmd.server_utils import ( PathToNearestThirdPartyFolder,
AddNearestThirdPartyFoldersToSysPath )
import os.path
def PathToNearestThirdPartyFolder_Success_test():
ok_( PathToNearestThirdPartyFolder( os.path.abspath( __file__ ) ) )
def PathToNearestThirdPartyFolder_Failure_test():
ok_( not PathToNearestThirdPartyFolder( os.path.expanduser( '~' ) ) )
def AddNearestThirdPartyFoldersToSysPath_Failure_test():
assert_that(
calling( AddNearestThirdPartyFoldersToSysPath ).with_args(
os.path.expanduser( '~' ) ),
raises( RuntimeError, '.*third_party folder.*' ) )
| gpl-3.0 | 749,113,430,439,658,800 | 35.288889 | 71 | 0.746479 | false |
rpetrenko/test-reporter | server/api/common.py | 1 | 1323 | # This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
import logging
from flask_restplus import Api
from server import settings
from bson import json_util
import json
log = logging.getLogger(__name__)
api = Api(version='1.0',
title='Test Reporter API',
description='API for test reporter')
@api.errorhandler
def default_error_handler(e):
# message = 'An unhandled exception occurred.'
log.exception(e)
if not settings.FLASK_DEBUG:
return {'message': str(e)}, 500
def db_response_to_json(x):
json_str = json.dumps(x, default=json_util.default)
return json.loads(json_str)
def jenkins_response_to_json(x):
return json.loads(x)
def insert_creds_to_jenkins_url(username, api_key, uri):
parts = uri.split("://")
assert len(parts) == 2
if api_key:
insert_part = "{}:{}@".format(username, api_key)
elif username:
insert_part = "{}@".format(username)
else:
insert_part = ""
uri = "{}://{}{}".format(parts[0], insert_part, parts[1])
return uri
def create_jenkins_uri(username, api_key, uri):
uri = insert_creds_to_jenkins_url(username, api_key, uri)
if not uri.endswith('/'):
uri = uri + '/'
return "{}api/json".format(uri)
| apache-2.0 | -776,886,979,894,500,600 | 23.962264 | 68 | 0.643991 | false |
duanyp1991/purchase-workflow | purchase_requisition_multicurrency/__openerp__.py | 1 | 1212 | # -*- coding: utf-8 -*-
#
#
# Author: Yannick Vaucher
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{"name": "Purchase Requisition Multicurrency",
"version": "0.1",
"author": "Camptocamp",
"license": "AGPL-3",
"category": "Purchase Management",
"complexity": "normal",
"images": [],
"depends": ["purchase_requisition",
],
"demo": [],
"data": ["view/purchase_order.xml",
"view/purchase_requisition.xml",
],
"auto_install": False,
"test": [],
"installable": True,
"certificate": "",
}
| agpl-3.0 | 4,557,405,406,151,952,400 | 30.894737 | 77 | 0.665017 | false |
bolkedebruin/airflow | airflow/hooks/docker_hook.py | 1 | 1156 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.docker.hooks.docker`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.docker.hooks.docker import DockerHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.docker.hooks.docker`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 | 7,498,838,845,523,876,000 | 38.862069 | 85 | 0.762976 | false |
pplonski/keras2cpp | dump_to_simple_cpp.py | 1 | 3089 | import numpy as np
np.random.seed(1337)
from keras.models import Sequential, model_from_json
import json
import argparse
np.set_printoptions(threshold=np.inf)
parser = argparse.ArgumentParser(description='This is a simple script to dump Keras model into simple format suitable for porting into pure C++ model')
parser.add_argument('-a', '--architecture', help="JSON with model architecture", required=True)
parser.add_argument('-w', '--weights', help="Model weights in HDF5 format", required=True)
parser.add_argument('-o', '--output', help="Ouput file name", required=True)
parser.add_argument('-v', '--verbose', help="Verbose", required=False)
args = parser.parse_args()
print 'Read architecture from', args.architecture
print 'Read weights from', args.weights
print 'Writing to', args.output
arch = open(args.architecture).read()
model = model_from_json(arch)
model.load_weights(args.weights)
model.compile(loss='categorical_crossentropy', optimizer='adadelta')
arch = json.loads(arch)
with open(args.output, 'w') as fout:
fout.write('layers ' + str(len(model.layers)) + '\n')
layers = []
for ind, l in enumerate(arch["config"]):
if args.verbose:
print ind, l
fout.write('layer ' + str(ind) + ' ' + l['class_name'] + '\n')
if args.verbose:
print str(ind), l['class_name']
layers += [l['class_name']]
if l['class_name'] == 'Convolution2D':
#fout.write(str(l['config']['nb_filter']) + ' ' + str(l['config']['nb_col']) + ' ' + str(l['config']['nb_row']) + ' ')
#if 'batch_input_shape' in l['config']:
# fout.write(str(l['config']['batch_input_shape'][1]) + ' ' + str(l['config']['batch_input_shape'][2]) + ' ' + str(l['config']['batch_input_shape'][3]))
#fout.write('\n')
W = model.layers[ind].get_weights()[0]
if args.verbose:
print W.shape
fout.write(str(W.shape[0]) + ' ' + str(W.shape[1]) + ' ' + str(W.shape[2]) + ' ' + str(W.shape[3]) + ' ' + l['config']['border_mode'] + '\n')
for i in range(W.shape[0]):
for j in range(W.shape[1]):
for k in range(W.shape[2]):
fout.write(str(W[i,j,k]) + '\n')
fout.write(str(model.layers[ind].get_weights()[1]) + '\n')
if l['class_name'] == 'Activation':
fout.write(l['config']['activation'] + '\n')
if l['class_name'] == 'MaxPooling2D':
fout.write(str(l['config']['pool_size'][0]) + ' ' + str(l['config']['pool_size'][1]) + '\n')
#if l['class_name'] == 'Flatten':
# print l['config']['name']
if l['class_name'] == 'Dense':
#fout.write(str(l['config']['output_dim']) + '\n')
W = model.layers[ind].get_weights()[0]
if args.verbose:
print W.shape
fout.write(str(W.shape[0]) + ' ' + str(W.shape[1]) + '\n')
for w in W:
fout.write(str(w) + '\n')
fout.write(str(model.layers[ind].get_weights()[1]) + '\n')
| mit | 5,338,512,930,857,991,000 | 41.902778 | 167 | 0.557462 | false |
SpatialMetabolomics/maori-upload | webapp/app.py | 1 | 7885 | import os
from os.path import dirname, exists, isdir, join, splitext
import base64
import hmac
import hashlib
import json
import boto3
import tempfile
import re
import tornado.ioloop
import tornado.web
from tornado.options import define, options
from datetime import datetime as dt
import yaml
from notify import post_to_slack, post_job_to_queue
TMP_STORAGE_PATH = "/tmp"
METADATA_FILE_NAME = "meta.json"
CONFIG_FILE_NAME = "config.json"
# Resolving Power defined at m/z 200. Compromise values based on the average resolving power @m/z 500 of Orbitrap and FTICR instruments. #todo replace this with full instrument model
RESOL_POWER_PARAMS = {
'70K': {'sigma': 0.00247585727028, 'fwhm': 0.00583019832869, 'pts_per_mz': 2019},
'100K': {'sigma': 0.0017331000892, 'fwhm': 0.00408113883008, 'pts_per_mz': 2885},
'140K': {'sigma': 0.00123792863514, 'fwhm': 0.00291509916435, 'pts_per_mz': 4039},
'200K': {'sigma': 0.000866550044598, 'fwhm': 0.00204056941504, 'pts_per_mz': 5770},
'250K': {'sigma': 0.000693240035678, 'fwhm': 0.00163245553203, 'pts_per_mz': 7212},
'280K': {'sigma': 0.00061896431757, 'fwhm': 0.00145754958217, 'pts_per_mz': 8078},
'500K': {'sigma': 0.000346620017839, 'fwhm': 0.000816227766017, 'pts_per_mz': 14425},
'750K': {'sigma': 0.000231080011893, 'fwhm': 0.000544151844011, 'pts_per_mz': 21637},
'1000K': {'sigma': 0.00017331000892, 'fwhm': 0.000408113883008, 'pts_per_mz': 28850},
}
def create_config(meta_json):
polarity_dict = {'Positive': '+', 'Negative': '-'}
polarity = polarity_dict[meta_json['MS_Analysis']['Polarity']]
instrument = meta_json['MS_Analysis']['Analyzer']
rp = meta_json['MS_Analysis']['Detector_Resolving_Power']
rp_mz = float(rp['mz'])
rp_resolution = float(rp['Resolving_Power'])
# TODO: use pyMSpec once 'instrument_model' branch is merged into master
if instrument == 'FTICR':
rp200 = rp_resolution * rp_mz / 200.0
elif instrument == 'Orbitrap':
rp200 = rp_resolution * (rp_mz / 200.0) ** 0.5
else:
rp200 = rp_resolution
if rp200 < 85000:
params = RESOL_POWER_PARAMS['70K']
elif rp200 < 120000:
params = RESOL_POWER_PARAMS['100K']
elif rp200 < 195000:
params = RESOL_POWER_PARAMS['140K']
elif rp200 < 265000:
params = RESOL_POWER_PARAMS['250K']
elif rp200 < 390000:
params = RESOL_POWER_PARAMS['280K']
elif rp200 < 625000:
params = RESOL_POWER_PARAMS['500K']
elif rp200 < 875000:
params = RESOL_POWER_PARAMS['750K']
else:
params = RESOL_POWER_PARAMS['1000K']
return {
"database": {
"name": meta_json['metaspace_options']['Metabolite_Database']
},
"isotope_generation": {
"adducts": {'+': ['+H', '+K', '+Na'], '-': ['-H', '+Cl']}[polarity],
"charge": {
"polarity": polarity,
"n_charges": 1
},
"isocalc_sigma": round(params['sigma'], 6),
"isocalc_pts_per_mz": params['pts_per_mz']
},
"image_generation": {
"ppm": 3.0,
"nlevels": 30,
"q": 99,
"do_preprocessing": False
}
}
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('static/index.html')
class SubmitHandler(tornado.web.RequestHandler):
def initialize(self):
self.config = yaml.load(open(options.config))
self.s3 = boto3.resource('s3', self.config['aws']['region'])
def upload_to_s3(self, doc, bucket, key):
with tempfile.NamedTemporaryFile() as f:
json.dump(doc, f, indent=4)
f.flush()
obj = self.s3.Object(bucket, key)
obj.upload_file(f.name)
def post(self):
if self.request.headers["Content-Type"].startswith("application/json"):
data = json.loads(self.request.body)
session_id = data['session_id']
metadata = data['formData']
self.upload_to_s3(metadata, self.config['aws']['s3_bucket'], join(session_id, METADATA_FILE_NAME))
ds_config = create_config(metadata)
self.upload_to_s3(ds_config, self.config['aws']['s3_bucket'], join(session_id, CONFIG_FILE_NAME))
self.set_header("Content-Type", "text/plain")
self.write("Uploaded to S3: {}".format(data['formData']))
else:
print(self.request.headers["Content-Type"])
self.write("Error: Content-Type has to be 'application/json'")
class MessageHandler(tornado.web.RequestHandler):
def initialize(self):
self.config = yaml.load(open(options.config))
def post(self):
if self.request.headers["Content-Type"].startswith("application/json"):
data = json.loads(self.request.body)
session_id = data['session_id']
metadata = data['formData']
ds_name = u'{}//{}'.format(metadata['Submitted_By']['Institution'],
metadata['metaspace_options']['Dataset_Name'])
ds_name = re.sub(',', '_', ds_name)
msg = {
'ds_id': dt.now().strftime("%Y-%m-%d_%Hh%Mm%Ss"),
'ds_name': ds_name,
'input_path': 's3a://{}/{}'.format(self.config['aws']['s3_bucket'], session_id),
'user_email': metadata['Submitted_By']['Submitter']['Email'].lower()
}
if self.config['slack']['webhook_url']:
post_to_slack('email', " [v] Sent: {}".format(json.dumps(msg)))
if self.config['rabbitmq']['host']:
post_job_to_queue(msg)
else:
print(self.request.headers["Content-Type"])
self.write("Error: Content-Type has to be 'application/json'")
class UploadHandler(tornado.web.RequestHandler):
def initialize(self):
self.config = yaml.load(open(options.config))
def sign_policy(self, policy):
""" Sign and return the policy document for a simple upload.
http://aws.amazon.com/articles/1434/#signyours3postform """
signed_policy = base64.b64encode(policy)
signature = base64.b64encode(hmac.new(
self.config['aws']['secret_access_key'], signed_policy, hashlib.sha1).
digest())
return {'policy': signed_policy, 'signature': signature}
def sign_headers(self, headers):
""" Sign and return the headers for a chunked upload. """
return {
'signature': base64.b64encode(hmac.new(
self.config['aws']['secret_access_key'], headers, hashlib.sha1).
digest())
}
def post(self):
""" Route for signing the policy document or REST headers. """
request_payload = json.loads(self.request.body)
if request_payload.get('headers'):
response_data = self.sign_headers(request_payload['headers'])
else:
response_data = self.sign_policy(self.request.body)
return self.write(response_data)
class WebConfigHandler(tornado.web.RequestHandler):
def get(self):
self.write(json.load(open(options.web_config)))
def make_app():
define('config', type=str)
define('web_config', type=str)
options.parse_command_line()
return tornado.web.Application([
(r"/", MainHandler),
(r'/s3/sign', UploadHandler),
(r"/submit", SubmitHandler),
(r"/config.json", WebConfigHandler),
(r"/send_msg", MessageHandler)
],
static_path=join(dirname(__file__), "static"),
static_url_prefix='/static/',
debug=False,
compress_response=True
)
if __name__ == "__main__":
if not isdir(TMP_STORAGE_PATH):
os.mkdir(TMP_STORAGE_PATH)
app = make_app()
app.listen(9777)
tornado.ioloop.IOLoop.current().start()
| apache-2.0 | -8,586,711,336,116,844,000 | 34.518018 | 182 | 0.592137 | false |
Valloric/ycmd | ycmd/completers/language_server/generic_lsp_completer.py | 1 | 1922 | # Copyright (C) 2019 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from ycmd import responses, utils
from ycmd.completers.language_server.simple_language_server_completer import (
SimpleLSPCompleter )
class GenericLSPCompleter( SimpleLSPCompleter ):
def __init__( self, user_options, server_settings ):
self._name = server_settings[ 'name' ]
self._supported_filetypes = server_settings[ 'filetypes' ]
super( GenericLSPCompleter, self ).__init__( user_options )
self._command_line = server_settings[ 'cmdline' ]
self._command_line[ 0 ] = utils.FindExecutable( self._command_line[ 0 ] )
def Language( self ):
return self._name
def GetServerName( self ):
return self._name + 'Completer'
def GetCommandLine( self ):
return self._command_line
def GetCustomSubcommands( self ):
return { 'GetHover': lambda self, request_data, args:
responses.BuildDisplayMessageResponse(
self.GetHoverResponse( request_data ) ) }
def SupportedFiletypes( self ):
return self._supported_filetypes
| gpl-3.0 | 7,591,909,427,674,534,000 | 32.137931 | 78 | 0.727888 | false |
bsmithyman/galoshes | galoshes/meta.py | 1 | 7934 | '''
Low-level programming constructs for key-value stores
Originally developed as part of Zephyr
https://zephyr.space/
'''
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
from builtins import object
import warnings
import numpy as np
from functools import reduce
from future.utils import with_metaclass
class ClassProperty(property):
'''
Class decorator to enable property behaviour in classes
'''
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class AMMetaClass(type):
'''
Meta class that enables AttributeMapper functionality, including inheritance
in the dictionary 'initMap'.
'''
def __new__(mcs, name, bases, attrs):
'Build a new subclass of AttributeMapper'
baseMaps = [getattr(base, 'initMap', {}) for base in bases][::-1]
baseMaps.append(attrs.get('initMap', {}))
initMap = {}
for baseMap in baseMaps:
initMap.update(baseMap)
for key in initMap:
if initMap[key] is None:
del(initMap[key])
attrs['initMap'] = initMap
baseMasks = reduce(set.union, (getattr(base, 'maskKeys', set()) for base in bases))
maskKeys = set.union(baseMasks, attrs.get('maskKeys', set()))
if maskKeys:
attrs['maskKeys'] = maskKeys
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
'Instantiate a subsclass of AttributeMapper'
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
systemConfig = args[0]
obj = cls.__new__(cls)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for key in obj.initMap:
if (key not in systemConfig) and obj.initMap[key][0]:
raise ValueError('Class {0!s} requires parameter \'{1!s}\''.format(cls.__name__, key))
if key in systemConfig:
if obj.initMap[key][2] is None:
def typer (x):
return x
else:
def typer(x):
newtype = obj.initMap[key][2]
try:
return obj.initMap[key][2](x)
except TypeError:
if np.iscomplex(x) and issubclass(newtype, np.floating):
return typer(x.real)
raise
if obj.initMap[key][1] is None:
setattr(obj, key, typer(systemConfig[key]))
else:
setattr(obj, obj.initMap[key][1], typer(systemConfig[key]))
obj.__init__(*args, **kwargs)
return obj
class AttributeMapper(with_metaclass(AMMetaClass, object)):
'''
An AttributeMapper subclass defines a dictionary initMap, which
includes keys for mappable inputs expected from the systemConfig
parameter. The dictionary takes the form:
initMap = {
# Argument Required Rename as ... Store as type
'c': (True, '_c', np.complex128),
'rho': (False, '_rho', np.float64),
'freq': (True, None, np.complex128),
'dx': (False, '_dx', np.float64),
'dz': (False, '_dz', np.float64),
'nx': (True, None, np.int64),
'nz': (True, None, np.int64),
'freeSurf': (False, '_freeSurf', list),
}
Each value in the dictionary is a tuple, which is interpreted by
the metaclass (i.e., AMMetaClass) to determine how to process the
value corresponding to the same key in systemConfig.
An exception will be raised if the first element in the tuple
is set to true, but the corresponding key does not exist in the
systemConfig parameter.
If the second element in the tuple is set to None, the key will be
defined in the subclass's attribute dictionary as it stands, whereas
if the second element is a string then that overrides the key.
If the third element in the tuple is set to None, the input argument
will be set in the subclass dictionary unmodified; however, if the
third element is a callable then it will be applied to the element
(e.g., to allow copying and/or typecasting of inputs).
NB: Complex numpy arguments are handled specially: the real part of
the value is kept and the imaginary part is discarded when they are
typecast to a float.
'''
def __init__(self, systemConfig):
'''
AttributeMapper(systemConfig)
Args:
systemConfig (dict): A set of setup keys
'''
pass
@ClassProperty
@classmethod
def required(cls):
'Property to return required fields in initMap'
return {key for key in cls.initMap if cls.initMap[key][0]}
@ClassProperty
@classmethod
def optional(cls):
'Property to return optional fields in initMap'
return {key for key in cls.initMap if not cls.initMap[key][0]}
class SCFilter(object):
'''
A SCFilter class is initialized with a list of classes as arguments.
For any of those classes that are AttributeMapper subclasses, SCFilter
determines the required fields in their initMap trees, and the optional
fields. When called, the SCFilter discards any key in the passed dictionary
that does not match one of those fields, and raises an error if any of the
required fields are not present.
'''
def __init__(self, clslist):
'''
SCFilter(clslist)
Args:
clslist (list): List of classes from which to build the filter
Returns:
new SCFilter instance
'''
if not hasattr(clslist, '__contains__'):
clslist = [clslist]
self.required = reduce(set.union, (cls.required for cls in clslist if issubclass(cls, AttributeMapper)))
self.optional = reduce(set.union, (cls.optional for cls in clslist if issubclass(cls, AttributeMapper)))
self.optional.symmetric_difference_update(self.required)
def __call__(self, systemConfig):
'''
Args:
systemConfig (dict): A systemConfig dictionary to filter
Returns:
dict: Filtered dictionary
Raises:
ValueError: If a required key is not in the systemConfig
'''
for key in self.required:
if key not in systemConfig:
raise ValueError('{0!s} requires parameter \'{1!s}\''.format(cls.__name__, key))
return {key: systemConfig[key] for key in set.union(self.required, self.optional) if key in systemConfig}
class BaseSCCache(AttributeMapper):
'''
Subclass of AttributeMapper that caches (a filtered version of) the
systemConfig object used to initialize it.
'''
maskKeys = set()
cacheItems = []
def __init__(self, systemConfig):
super(BaseSCCache, self).__init__(systemConfig)
self.systemConfig = {key: systemConfig[key] for key in systemConfig if key not in self.maskKeys}
@property
def systemConfig(self):
return self._systemConfig
@systemConfig.setter
def systemConfig(self, value):
self._systemConfig = value
self.clearCache()
def clearCache(self):
'Clears cached items (e.g., when model is reset).'
for attr in self.cacheItems:
if hasattr(self, attr):
delattr(self, attr)
| mit | -61,783,569,254,275,260 | 32.761702 | 113 | 0.592135 | false |
anisotropi4/goldfinch | bin/dump-tree.py | 1 | 1613 | #!/usr/bin/python3
import xml.etree.cElementTree as ET
import sys
import argparse
parser = argparse.ArgumentParser(description='Strip namespace and a list of xml-tags in a tsv format')
parser.add_argument('--path', dest='path', type=str, default='',
help='output directory file')
parser.add_argument('inputfile', type=str, nargs='?', help='name of xml-file to parse')
parser.add_argument('outputfile', type=str, nargs='?', help='name of output file')
args = parser.parse_args()
path = args.path
if path != '':
path = path + '/'
fin = sys.stdin
if args.inputfile:
fin = open(args.inputfile, 'r')
fout = sys.stdout
if args.outputfile:
fout = open(path + args.outputfile, 'w')
def strip_ns(tag, namespaces):
for nk, nv in namespaces.items():
if tag.startswith(nk+':'):
return tag[len(nk)+1:]
if tag.startswith('{'+nv+'}'):
return tag[len(nv)+2:]
return tag
namespaces = {}
document = ET.iterparse(fin, events=('start', 'end', 'start-ns', 'end-ns'))
s = []
n = 0
for event, e in document:
if event == 'start-ns':
(nk, nv) = e
namespaces[nk] = nv
continue
if event == 'end-ns':
namespaces.pop('key', None)
continue
if event == 'start':
tag = strip_ns(e.tag, namespaces)
s.append(tag)
n = n + 1
r = "\t".join(s + [str(n)])
fout.write(r)
fout.write('\n')
e.clear()
if event == 'end':
s.pop()
n = n - 1
if fout is not sys.stdout:
fout.close()
| mit | 1,958,677,056,999,847,400 | 22.376812 | 102 | 0.555487 | false |
biolink/ontobio | tests/test_enrich.py | 1 | 1478 | from ontobio.ontol_factory import OntologyFactory
from ontobio.assoc_factory import AssociationSetFactory
from ontobio.assocmodel import AssociationSet
import logging
import random
CVP = 'MP:0004084' # cardiomyopathy
MUS = 'NCBITaxon:10090'
def test_construct():
"""
enrichment test
build a gene set from MP term for cardiomyopathy;
test for enrichment against GO
"""
ofactory = OntologyFactory()
afactory = AssociationSetFactory()
logging.info("Creating mp handle")
mp = ofactory.create('obo:mp')
logging.info("Getting pheno assocs")
aset_phen = afactory.create(ontology=mp,
subject_category='gene',
object_category='phenotype',
taxon=MUS)
logging.info("Creating go handle")
ont = ofactory.create('go')
logging.info("Getting go assocs")
aset = afactory.create(ontology=ont,
subject_category='gene',
object_category='function',
taxon=MUS)
logging.info("Getting sample")
sample = aset_phen.query([CVP],[])
logging.info("sample = {}".format(len(sample)))
rs = aset.enrichment_test(sample, threshold=1e-2, labels=True, direction='less')
for r in rs:
print("UNDER: "+str(r))
rs = aset.enrichment_test(sample, threshold=0.05, labels=True)
for r in rs:
print(str(r))
#test_construct()
| bsd-3-clause | 8,935,766,011,242,972,000 | 29.163265 | 84 | 0.608254 | false |
pydio/pydio-sync | src/pydio/res/i18n/html_strings.py | 1 | 9431 | def _(a_string): return a_string
var_1=_('How can I find my server URL?')
var_2=_('The server URL is the adress that you can see in your browser when accessing Pydio via the web.')
var_3=_('It starts with http or https depending on your server configuration.')
var_4=_('If you are logged in Pydio and you see the last part of the URL starting with "ws-", remove this part and only keep the beginning (see image below).')
var_5=_('Got it!')
var_6=_('Connecting ...')
var_7=_('Configure Connection')
var_8=_('Error while trying to connect to %1 :')
var_9=_('%1')
var_10=_('Connect to the server with the same URL as the one you would use to access through a web browser, e.g. http://mydomain.com/pydio')
var_11=_('Required')
var_12=_('Required')
var_13=_('Required')
var_14=_('Tips')
var_15=_('where can I find the server URL?')
var_16=_('Connect')
var_17=_('Trust SSL certificate')
var_18=_('1/3 Select a workspace')
var_19=_('Welcome %1!')
var_20=_('You are connecting to %1')
var_21=_('change')
var_22=_('change')
var_23=_('Remote workspace')
var_24=_('This workspace is read only!')
var_25=_('Synchronise only a subfolder of this workspace')
var_26=_('loading')
var_27=_('Whole workspace')
var_28=_('loading')
var_29=_('Workspace')
var_30=_('Folder')
var_31=_('Change')
var_32=_('Next')
var_33=_('Advanced Parameters')
var_34=_('Save changes')
var_35=_('2/3 Select a destination')
var_36=_('By default a local folder will be created on your computer')
var_37=_('Change')
var_38=_('3/3 Optional Parameters')
var_39=_('Server')
var_40=_('Workspace')
var_41=_('Folder')
var_42=_('Whole workspace')
var_43=_('change')
var_44=_('Local folder')
var_45=_('change')
var_46=_('Name this synchro')
var_47=_('Advanced Parameters')
var_48=_('Previous')
var_49=_('Next')
var_50=_('Previous')
var_51=_('Next')
var_52=_('SYNC NAME')
var_53=_('DATA SIZE')
var_54=_('ESTIMATED TIME')
var_55=_('Ready for ignition!')
var_56=_('Are you ready to launch the synchronization?')
var_57=_('Your data will be in orbit in no time!')
var_58=_('A sync task with similar parameters exists.')
var_59=_('Please')
var_60=_('change parameters')
var_61=_('A sync task with similar parameters exists.')
var_62=_('You may want to')
var_63=_('change parameters')
var_64=_('FIRE THE ROCKET!')
var_65=_('change parameters')
var_66=_('Synchronization running...')
var_67=_('Liftoff! First sync can take some time...')
var_68=_('CREATE NEW SYNCHRO')
var_69=_('DONE')
var_70=_('Sync Direction')
var_71=_('Modifications are sent to the server but the client does not download anything.')
var_72=_('Modifications from both sides (local/remote) are automatically reflected on the other side.')
var_73=_('Modifications from the server are downloaded buth nothing is sent to the server.')
var_74=_('Upload Only')
var_75=_('computer to server')
var_76=_('Bi-directional')
var_77=_('default when possible')
var_78=_('Download Only')
var_79=_('server to computer')
var_80=_('Sync Frequency')
var_81=_('By default, the application will synchronize automatically')
var_82=_('Automatically')
var_83=_('Manually')
var_84=_('Given Time')
var_85=_('Run the sync every day at')
var_86=_('Conflicts')
var_87=_('When files were modified on both the server and your computer at the same time, a conflict is detected.')
var_88=_('Automatic')
var_89=_('Solve conflicts manually')
var_90=_('With the default keep-both behavior conflicting files will be copied on your local sync. Which version is to be kept?')
var_91=_('Keep both')
var_92=_('Prefer local')
var_93=_('Prefer remote')
var_94=_('Connection settings')
var_95=_('Increase the timeout in case of slow responsive server')
var_96=_('Timeout in seconds')
var_97=_('You can increase or reduce the number of concurrent connections. More means a faster sync but requires a server with more resources. (Default: 4)')
var_98=_('Concurrent connections')
var_99=_('Include/Exclude from Sync')
var_100=_('Syncronise, use a list of files or patterns to include in the sync')
var_101=_('Do not synchronise, list of files or patterns to exclude from sync')
var_102=_('SYNC %1 parameters')
var_103=_('Server')
var_104=_('Workspace')
var_105=_('Folder')
var_106=_('Resync task')
var_107=_('This operation will make sure that your server and local folder are correctly synchronized. Beware, this can take a while, and may be resource intensive.')
var_108=_('Cancel')
var_109=_('Proceed')
var_110=_('Trigger a full re-indexation')
var_111=_('Label')
var_112=_('Server Connexion')
var_113=_('Login')
var_114=_('Password')
var_115=_('Local Folder')
var_116=_('Choose')
var_117=_('Remote workspace')
var_118=_('Workspace')
var_119=_('Folder')
var_120=_('Change')
var_121=_('This workspace is read only!')
var_122=_('Synchronise only a subfolder of this workspace')
var_123=_('Whole workspace')
var_124=_('[loading...]')
var_125=_('Advanced parameters')
var_126=_('Delete Sync')
var_127=_('Save Parameters')
var_128=_('Help us! ')
var_129=_('Give us your feedback to improve PydioSync.')
var_130=_('Please wait...')
var_131=_('PydioSync Feedback Form')
var_132=_('You have the power to help us improve PydioSync by submitting anonymously this simple form.')
var_133=_('Include the number of synced files;')
var_134=_('Include the last sequence number;')
var_135=_('Include server info;')
var_136=_('Include errors;')
var_137=_('Include the number of errors;')
var_138=_('Comments (Appreciated)')
var_139=_('About')
var_140=_('General configurations page')
var_141=_('Update settings')
var_142=_('Enable / Disable update here.')
var_143=_('Set the update check frequency (here 1 means update check only once a day, default value 0 means it check for update each time agent establishes a new connection with UI) ')
var_144=_('Update check frequency in days')
var_145=_('Date on which last update check happened')
var_146=_('Last update check was on: ')
var_147=_('Proxy settings')
var_148=_('Enable / Disable Proxy.')
var_149=_('If you want the network connections to pass through proxy, fill the parameters below.')
var_150=_('Log settings')
var_151=_('You can change the name of log file here.')
var_152=_('File Name')
var_153=_('Limit the number of log files to be stored locally.')
var_154=_('Number of log files')
var_155=_('Set restrictions on log file size here.')
var_156=_('Enhance the log details as you need them.')
var_157=_('Info')
var_158=_('Debug')
var_159=_('Warning')
var_160=_('Other settings')
var_161=_('Max wait time for local db access')
var_162=_('If you encounter database locked error try increasing this value')
var_163=_('Timeout in seconds')
var_164=_('Set Language')
var_165=_('Language ')
var_166=_('Update Settings')
var_167=_('Ooops, cannot contact agent! Make sure it is running correctly, process will try to reconnect in 20s')
var_168=_('Select a workspace')
var_169=_('Full path to the local folder')
var_170=_('Are you sure you want to delete this synchro? No data will be deleted')
var_171=_('computing...')
var_172=_('Status')
var_173=_('syncing')
var_174=_('Size')
var_175=_('Estimated time')
var_176=_('Status')
var_177=_('syncing')
var_178=_('Status')
var_179=_('syncing')
var_180=_('Last sync')
var_181=_('ERROR')
var_182=_('Status')
var_183=_('idle')
var_184=_('[DISABLED]')
var_185=_('Conflicts')
var_186=_('Solve Conflict')
var_187=_('Solved')
var_188=_('Last files synced')
var_189=_('Open File')
var_190=_('Transfers in progress')
var_191=_('An element named %1 was modified on both the server and on your local computer. Select how you want to solve this conflicting case:')
var_192=_('Apply to all conflicts')
var_193=_('Mine')
var_194=_('Both Versions')
var_195=_('Theirs')
var_196=_('Create a new synchronization')
var_197=_('Create a new synchronization')
var_198=_('Share %1 via Pydio')
var_199=_('Share %1 via Pydio')
var_200=_('Description')
var_201=_('Path')
var_202=_('Share item')
var_203=_('by creating a public link that can easily be sent to your contacts.')
var_204=_('You can customize the link parameters using the forms below.')
var_205=_('Secure Link Access')
var_206=_('Optional Password')
var_207=_('Password')
var_208=_('Expires After')
var_209=_('Days')
var_210=_('Downloads')
var_211=_('Security')
var_212=_('Password')
var_213=_('Expires after')
var_214=_('Days')
var_215=_('Downloads')
var_216=_('Advanced parameters')
var_217=_('Link Handle')
var_218=_('If you want a durable and pretty link (like https://.../my-share-link), you can use this field. Link handle will be generated if left empty.')
var_219=_('Description')
var_220=_('This will be displayed to the shared users.')
var_221=_('Access Rights')
var_222=_('By default, the item will be previewed and downloadable')
var_223=_('Preview')
var_224=_('Download')
var_225=_('Upload')
var_226=_('Generate Link')
var_227=_('Generate Link')
var_228=_('Share %1 via Pydio')
var_229=_('Shared Link')
var_230=_('Shared link to the selected item already exists. Below is the link to the selected item')
var_231=_('New shared link to the selected item has been generated')
var_232=_('Shared Link to the selected item has not been generated')
var_233=_('Copy to Clipboard')
var_234=_('UnShare Link')
var_235=_('Text has been copied to clipboard.')
var_236=_('Successfully unshared.')
var_237=_('Please wait ...')
var_238=_('Welcome to the Pydio Sync')
var_239=_('The easiest way to keep your data in control')
var_240=_('Loading...')
var_241=_('Get Started')
var_242=_('Required')
var_243=_('Proxy settings')
var_244=_('Get Started')
| gpl-3.0 | -2,384,637,885,232,840,700 | 37.493878 | 184 | 0.693564 | false |
Neurita/boyle | boyle/files/utils.py | 1 | 1412 | # coding=utf-8
"""
Utilities for file management.
"""
# ------------------------------------------------------------------------------
# Author: Alexandre Manhaes Savio <[email protected]>
#
# 2015, Alexandre Manhaes Savio
# Use this at your own risk!
# ------------------------------------------------------------------------------
import os.path as op
import shutil
from .names import remove_ext, get_extension
def copy_w_ext(srcfile, destdir, basename):
""" Copy `srcfile` in `destdir` with name `basename + get_extension(srcfile)`.
Add pluses to the destination path basename if a file with the same name already
exists in `destdir`.
Parameters
----------
srcfile: str
destdir: str
basename:str
Returns
-------
dstpath: str
"""
ext = get_extension(op.basename(srcfile))
dstpath = op.join(destdir, basename + ext)
return copy_w_plus(srcfile, dstpath)
def copy_w_plus(src, dst):
"""Copy file from `src` path to `dst` path. If `dst` already exists, will add '+' characters
to the end of the basename without extension.
Parameters
----------
src: str
dst: str
Returns
-------
dstpath: str
"""
dst_ext = get_extension(dst)
dst_pre = remove_ext (dst)
while op.exists(dst_pre + dst_ext):
dst_pre += '+'
shutil.copy(src, dst_pre + dst_ext)
return dst_pre + dst_ext
| bsd-3-clause | 4,066,195,155,547,318,300 | 20.723077 | 96 | 0.550992 | false |
hguemar/cinder | cinder/api/contrib/admin_actions.py | 1 | 11497 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils import strutils
import webob
from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import backup
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import rpc
from cinder import volume
LOG = logging.getLogger(__name__)
class AdminController(wsgi.Controller):
"""Abstract base class for AdminControllers."""
collection = None # api collection to extend
# FIXME(clayg): this will be hard to keep up-to-date
# Concrete classes can expand or over-ride
valid_status = set(['creating',
'available',
'deleting',
'error',
'error_deleting', ])
def __init__(self, *args, **kwargs):
super(AdminController, self).__init__(*args, **kwargs)
# singular name of the resource
self.resource_name = self.collection.rstrip('s')
self.volume_api = volume.API()
self.backup_api = backup.API()
def _update(self, *args, **kwargs):
raise NotImplementedError()
def _get(self, *args, **kwargs):
raise NotImplementedError()
def _delete(self, *args, **kwargs):
raise NotImplementedError()
def validate_update(self, body):
update = {}
try:
update['status'] = body['status'].lower()
except (TypeError, KeyError):
raise exc.HTTPBadRequest(explanation=_("Must specify 'status'"))
if update['status'] not in self.valid_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid status"))
return update
def authorize(self, context, action_name):
# e.g. "snapshot_admin_actions:reset_status"
action = '%s_admin_actions:%s' % (self.resource_name, action_name)
extensions.extension_authorizer('volume', action)(context)
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
context = req.environ['cinder.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = _("Updating %(resource)s '%(id)s' with '%(update)r'")
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = dict(id=id, update=update)
notifier = rpc.get_notifier('volumeStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self._update(context, id, update)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
notifier.info(context, self.collection + '.reset_status.end',
notifier_info)
return webob.Response(status_int=202)
@wsgi.action('os-force_delete')
def _force_delete(self, req, id, body):
"""Delete a resource, bypassing the check that it must be available."""
context = req.environ['cinder.context']
self.authorize(context, 'force_delete')
try:
resource = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
self._delete(context, resource, force=True)
return webob.Response(status_int=202)
class VolumeAdminController(AdminController):
"""AdminController for Volumes."""
collection = 'volumes'
# FIXME(jdg): We're appending additional valid status
# entries to the set we declare in the parent class
# this doesn't make a ton of sense, we should probably
# look at the structure of this whole process again
# Perhaps we don't even want any definitions in the abstract
# parent class?
valid_status = AdminController.valid_status.union(
set(['attaching', 'in-use', 'detaching']))
valid_attach_status = set(['detached', 'attached', ])
valid_migration_status = set(['migrating', 'error',
'completing', 'none',
'starting', ])
def _update(self, *args, **kwargs):
db.volume_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.volume_api.get(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.volume_api.delete(*args, **kwargs)
def validate_update(self, body):
update = {}
status = body.get('status', None)
attach_status = body.get('attach_status', None)
migration_status = body.get('migration_status', None)
valid = False
if status:
valid = True
update = super(VolumeAdminController, self).validate_update(body)
if attach_status:
valid = True
update['attach_status'] = attach_status.lower()
if update['attach_status'] not in self.valid_attach_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid attach status"))
if migration_status:
valid = True
update['migration_status'] = migration_status.lower()
if update['migration_status'] not in self.valid_migration_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid migration status"))
if update['migration_status'] == 'none':
update['migration_status'] = None
if not valid:
raise exc.HTTPBadRequest(
explanation=_("Must specify 'status', 'attach_status' "
"or 'migration_status' for update."))
return update
@wsgi.action('os-force_detach')
def _force_detach(self, req, id, body):
"""Roll back a bad detach after the volume been disconnected."""
context = req.environ['cinder.context']
self.authorize(context, 'force_detach')
try:
volume = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
self.volume_api.terminate_connection(context, volume,
{}, force=True)
self.volume_api.detach(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume')
def _migrate_volume(self, req, id, body):
"""Migrate a volume to the specified host."""
context = req.environ['cinder.context']
self.authorize(context, 'migrate_volume')
try:
volume = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
params = body['os-migrate_volume']
try:
host = params['host']
except KeyError:
raise exc.HTTPBadRequest(explanation=_("Must specify 'host'"))
force_host_copy = params.get('force_host_copy', False)
if isinstance(force_host_copy, basestring):
try:
force_host_copy = strutils.bool_from_string(force_host_copy,
strict=True)
except ValueError:
raise exc.HTTPBadRequest(
explanation=_("Bad value for 'force_host_copy'"))
elif not isinstance(force_host_copy, bool):
raise exc.HTTPBadRequest(
explanation=_("'force_host_copy' not string or bool"))
self.volume_api.migrate_volume(context, volume, host, force_host_copy)
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume_completion')
def _migrate_volume_completion(self, req, id, body):
"""Complete an in-progress migration."""
context = req.environ['cinder.context']
self.authorize(context, 'migrate_volume_completion')
try:
volume = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
params = body['os-migrate_volume_completion']
try:
new_volume_id = params['new_volume']
except KeyError:
raise exc.HTTPBadRequest(
explanation=_("Must specify 'new_volume'"))
try:
new_volume = self._get(context, new_volume_id)
except exception.NotFound:
raise exc.HTTPNotFound()
error = params.get('error', False)
ret = self.volume_api.migrate_volume_completion(context, volume,
new_volume, error)
return {'save_volume_id': ret}
class SnapshotAdminController(AdminController):
"""AdminController for Snapshots."""
collection = 'snapshots'
def _update(self, *args, **kwargs):
db.snapshot_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.volume_api.get_snapshot(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.volume_api.delete_snapshot(*args, **kwargs)
class BackupAdminController(AdminController):
"""AdminController for Backups."""
collection = 'backups'
valid_status = set(['available',
'error'
])
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
context = req.environ['cinder.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = {'id': id, 'update': update}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self.backup_api.reset_status(context=context, backup_id=id,
status=update['status'])
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
return webob.Response(status_int=202)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin actions."""
name = "AdminActions"
alias = "os-admin-actions"
namespace = "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1"
updated = "2012-08-25T00:00:00+00:00"
def get_controller_extensions(self):
exts = []
for class_ in (VolumeAdminController, SnapshotAdminController,
BackupAdminController):
controller = class_()
extension = extensions.ControllerExtension(
self, class_.collection, controller)
exts.append(extension)
return exts
| apache-2.0 | -87,235,681,852,157,700 | 36.449511 | 79 | 0.596677 | false |
litedesk/litedesk-webserver-provision | src/provisioning/models.py | 1 | 20144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import datetime
from urlparse import urlparse
from autoslug import AutoSlugField
from django.conf import settings
from django.core.mail import send_mail
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.template.loader import render_to_string
from litedesk.lib import airwatch
from model_utils import Choices
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel, TimeFramedModel, StatusModel
from qrcode.image.pure import PymagingImage
import qrcode
from audit.models import Trackable
from contrib.models import PropertyTable
from tenants.models import Tenant, TenantService, User
from signals import item_provisioned, item_deprovisioned
import okta
log = logging.getLogger(__name__)
class Provisionable(object):
def activate(self, user, **kw):
raise NotImplementedError
def deprovision(self, service, user, *args, **kw):
raise NotImplementedError
def provision(self, service, user, *args, **kw):
raise NotImplementedError
class UserProvisionable(TimeStampedModel):
user = models.ForeignKey(User)
service = models.ForeignKey(TenantService)
item_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
item = GenericForeignKey('item_type', 'object_id')
@property
def tenant(self):
return self.user.tenant
def __unicode__(self):
return '%s provision for user %s on %s' % (
self.item, self.user, self.service)
class Meta:
unique_together = ('user', 'service', 'item_type', 'object_id')
class UserProvisionHistory(Trackable, TimeFramedModel):
user = models.ForeignKey(User)
service = models.ForeignKey(TenantService)
item_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
item = GenericForeignKey('item_type', 'object_id')
@staticmethod
def on_provision(*args, **kw):
user = kw.get('user')
provisioned_item = kw.get('instance')
item_type = ContentType.objects.get_for_model(provisioned_item)
entry = UserProvisionHistory(
user=user,
service=kw.get('service'),
item_type=item_type,
object_id=provisioned_item.id,
start=datetime.datetime.now()
)
entry.save(editor=kw.get('editor'))
@staticmethod
def on_deprovision(*args, **kw):
user = kw.get('user')
provisioned_item = kw.get('instance')
item_type = ContentType.objects.get_for_model(provisioned_item)
for entry in user.userprovisionhistory_set.filter(
item_type=item_type,
object_id=provisioned_item.id,
service=kw.get('service'),
end__isnull=True
):
entry.end = datetime.datetime.now()
entry.save(editor=kw.get('editor'))
class Asset(TimeStampedModel, Provisionable):
objects = InheritanceManager()
name = models.CharField(max_length=1000)
slug = AutoSlugField(populate_from='name', unique=False, default='')
description = models.TextField(null=True, blank=True)
web = models.BooleanField(default=True)
mobile = models.BooleanField(default=False)
desktop = models.BooleanField(default=False)
@property
def __subclassed__(self):
return Asset.objects.get_subclass(id=self.id)
@property
def supported_platforms(self):
return [p for p in ['web', 'mobile', 'desktop'] if getattr(self, p)]
def provision(self, service, user, editor=None):
if self.can_be_managed_by(service):
UserProvisionable.objects.create(
service=service,
user=user,
item_type=ContentType.objects.get_for_model(self),
object_id=self.id
)
item_provisioned.send(
sender=self.__class__,
editor=editor,
instance=self,
service=service,
user=user
)
def deprovision(self, service, user, editor=None):
UserProvisionable.objects.filter(
service=service,
user=user,
item_type=ContentType.objects.get_for_model(self),
object_id=self.id
).delete()
item_deprovisioned.send(
sender=self.__class__,
editor=editor,
instance=self,
service=service,
user=user
)
def can_be_managed_by(self, service):
return service.type in self.supported_platforms
def __unicode__(self):
return self.name
class Software(Asset):
EXPENSE_CATEGORY = 'software'
def provision(self, service, user, editor=None):
service.assign(self, user)
super(Software, self).provision(service, user, editor=editor)
def deprovision(self, service, user, editor=None):
service.unassign(self, user)
super(Software, self).deprovision(service, user, editor=editor)
class Device(Asset):
EXPENSE_CATEGORY = 'devices'
image = models.ImageField(null=True, blank=True)
@property
def __subclassed__(self):
if 'chrome' in self.name.lower():
self.__class__ = ChromeDevice
return self
def _get_email_template_parameters(self, service, user):
device = self.__subclassed__
if isinstance(device, ChromeDevice):
return {
'user': user,
'service': service,
'site': settings.SITE,
'device': device,
'title': '%s - Welcome to Google' % settings.SITE.get('name'),
'include_additional_information_message': 'true'
}
return None
def _get_email_template(self, service, format='html'):
extension = {
'text': 'txt',
'html': 'html'
}.get(format, format)
template_name = None
if isinstance(self.__subclassed__, ChromeDevice):
template_name = 'activation_chromebook'
return template_name and 'provisioning/mail/%s/%s.tmpl.%s' % (
format, template_name, extension
)
def provision(self, service, user, editor=None):
super(Device, self).provision(service, user, editor=editor)
html_template = self._get_email_template(service, format='html')
text_template = self._get_email_template(service, format='text')
if not (html_template or text_template):
return
template_parameters = self._get_email_template_parameters(service, user)
text_msg = render_to_string(text_template, template_parameters)
html_msg = render_to_string(html_template, template_parameters)
send_mail(
template_parameters['title'],
text_msg,
settings.DEFAULT_FROM_EMAIL,
[user.email],
html_message=html_msg
)
def activate(self, user, *args, **kw):
pass
class MobileDataPlan(Asset):
pass
class ChromeDevice(Device):
def can_be_managed_by(self, service):
return service.type == TenantService.PLATFORM_TYPE_CHOICES.web
class Meta:
proxy = True
class TenantAsset(PropertyTable):
tenant = models.ForeignKey(Tenant)
asset = models.ForeignKey(Asset)
class Meta:
unique_together = ('tenant', 'asset')
class InventoryEntry(Trackable, StatusModel):
STATUS = Choices('handed_out', 'returned')
user = models.ForeignKey(User)
tenant_asset = models.ForeignKey(TenantAsset)
serial_number = models.CharField(max_length=100, null=False, default='N/A')
@property
def tenant(self):
return self.user.tenant
def save(self, *args, **kwargs):
super(InventoryEntry, self).save(
editor=self.user.tenant.primary_contact, *args, **kwargs)
# TODO : if the inventory item is a google device make a call to the google api to
# save the username in the annotated user field
def __unicode__(self):
return '%s (%s)' % (self.user.username, self.serial_number)
class Okta(TenantService, Provisionable):
PLATFORM_TYPE = TenantService.PLATFORM_TYPE_CHOICES.web
ACTIVE_DIRECTORY_CONTROLLER = True
DEACTIVATION_EXCEPTION = okta.UserNotActiveError
domain = models.CharField(max_length=200)
@property
def portal_url(self):
return 'https://%s.okta.com' % self.domain
@property
def portal_help_url(self):
return '%s/help/login' % self.portal_url
def get_client(self):
return okta.Client(self.domain, self.api_token)
def get_service_user(self, user):
client = self.get_client()
return client.get(okta.User, user.tenant_email)
def get_users(self):
client = self.get_client()
return client.get_users()
def register(self, user):
client = self.get_client()
try:
client.add_user(user, activate=False)
except okta.UserAlreadyExistsError:
pass
return self.get_service_user(user)
def activate(self, user, editor=None):
client = self.get_client()
try:
service_user = self.get_service_user(user)
except okta.ResourceDoesNotExistError:
service_user = self.register(user)
status_before = getattr(service_user, 'status', 'STAGED')
activation_url = None
try:
activation_response = client.activate_user(service_user,
send_email=False)
except okta.UserAlreadyActivatedError:
pass
else:
if status_before == 'STAGED':
activation_url = activation_response.get('activationUrl')
password = user.get_remote().set_one_time_password()
template_parameters = {
'user': user,
'service': self,
'site': settings.SITE,
'activation_url': activation_url,
'password': password
}
text_msg = render_to_string(
'provisioning/mail/text/activation_okta.tmpl.txt',
template_parameters
)
html_msg = render_to_string(
'provisioning/mail/html/activation_okta.tmpl.html',
template_parameters
)
send_mail(
'%s - Welcome to %s' % (settings.SITE.get('name'), self.name),
text_msg,
settings.DEFAULT_FROM_EMAIL,
[user.email],
html_message=html_msg
)
super(Okta, self).activate(user, editor)
def assign(self, asset, user):
log.debug('Assigning %s to %s on Okta' % (asset, user))
metadata, _ = self.tenantserviceasset_set.get_or_create(asset=asset)
client = self.get_client()
service_user = self.get_service_user(user)
service_application = client.get(okta.Application,
metadata.get('application_id'))
try:
service_application.assign(service_user,
profile=metadata.get('profile'))
except Exception, why:
log.warn('Error when assigning %s to %s: %s' % (asset, user, why))
def unassign(self, asset, user):
log.debug('Removing %s from %s on Okta' % (asset, user))
metadata, _ = self.tenantserviceasset_set.get_or_create(asset=asset)
client = self.get_client()
service_user = self.get_service_user(user)
service_application = client.get(okta.Application,
metadata.get('application_id'))
try:
service_application.unassign(service_user)
except okta.UserApplicationNotFound, e:
log.info('Failed to unassign %s from %s: %s' % (asset, user, e))
except Exception, why:
log.warn('Error when unassigning %s to %s: %s' % (asset, user, why))
@classmethod
def get_serializer_data(cls, **data):
return {
'domain': data.get('domain')
}
class Meta:
verbose_name = 'Okta'
class AirWatch(TenantService, Provisionable):
PLATFORM_TYPE = 'mobile'
QRCODE_ROOT_DIR = os.path.join(settings.MEDIA_ROOT, 'airwatch_qrcodes')
QRCODE_ROOT_URL = settings.SITE.get(
'host_url') + settings.MEDIA_URL + 'airwatch_qrcodes/'
QRCODE_TEMPLATE = 'https://awagent.com?serverurl={0}&gid={1}'
DEACTIVATION_EXCEPTION = airwatch.user.UserNotActiveError
username = models.CharField(max_length=80)
password = models.CharField(max_length=1000)
server_url = models.URLField()
group_id = models.CharField(max_length=80)
@property
def portal_domain(self):
portal_domain = urlparse(self.server_url).netloc
if portal_domain.startswith('as'):
portal_domain = portal_domain.replace('as', 'ds', 1)
return portal_domain
def get_client(self):
return airwatch.client.Client(
self.server_url, self.username, self.password, self.api_token
)
def get_service_user(self, user):
client = self.get_client()
service_user = airwatch.user.User.get_remote(client, user.username)
if service_user is None:
service_user = airwatch.user.User.create(client, user.username)
return service_user
def get_usergroup(self, group_name):
client = self.get_client()
return airwatch.group.UserGroupHacked.get_remote(client, group_name)
def get_smartgroup(self, smartgroup_id):
client = self.get_client()
return airwatch.group.SmartGroup.get_remote(client, smartgroup_id)
def register(self, user):
client = self.get_client()
try:
return airwatch.user.User.create(client, user.username)
except airwatch.user.UserAlreadyRegisteredError:
return self.get_service_user(user)
@property
def qrcode(self):
server_domain = self.portal_domain
image_dir = os.path.join(self.QRCODE_ROOT_DIR, server_domain)
image_file_name = '{0}.png'.format(self.group_id)
image_file_path = os.path.join(image_dir, image_file_name)
if not os.path.exists(image_file_path):
if not os.path.exists(image_dir):
os.makedirs(image_dir)
data = self.QRCODE_TEMPLATE.format(server_domain, self.group_id)
image = qrcode.make(data, image_factory=PymagingImage, box_size=5)
with open(image_file_path, 'w') as image_file:
image.save(image_file)
image_url = self.QRCODE_ROOT_URL + server_domain + '/' + image_file_name
return image_url
def activate(self, user, editor=None):
service_user = self.get_service_user(user)
if service_user is None:
service_user = self.register(user)
try:
title = '%s - Welcome to AirWatch' % settings.SITE.get('name')
service_user.activate()
template_parameters = {
'user': user,
'service': self,
'site': settings.SITE,
'qr_code': self.qrcode
}
text_msg = render_to_string(
'provisioning/mail/text/activation_airwatch.tmpl.txt',
template_parameters
)
html_msg = render_to_string(
'provisioning/mail/html/activation_airwatch.tmpl.html',
template_parameters
)
send_mail(
title,
text_msg,
settings.DEFAULT_FROM_EMAIL,
[user.email],
html_message=html_msg
)
except airwatch.user.UserAlreadyActivatedError:
pass
else:
super(AirWatch, self).activate(user, editor)
def deactivate(self, user, editor=None):
super(AirWatch, self).deactivate(user, editor)
self.get_service_user(user).delete()
def __group_and_aw_user(self, software, user):
metadata, _ = self.tenantserviceasset_set.get_or_create(asset=software)
group = self.get_usergroup(metadata.get('group_name'))
service_user = self.get_service_user(user)
return group, service_user
def assign(self, software, user):
if self.type not in software.supported_platforms:
return
log.debug('Assigning %s to %s on Airwatch' % (software, user))
group, aw_user = self.__group_and_aw_user(software, user)
try:
group.add_member(aw_user)
except airwatch.user.UserAlreadyEnrolledError:
pass
def unassign(self, software, user):
if self.type not in software.supported_platforms:
return
log.debug('Removing %s from %s on Airwatch' % (software, user))
group, aw_user = self.__group_and_aw_user(software, user)
try:
group.remove_member(aw_user)
except airwatch.user.UserNotEnrolledError:
pass
def get_all_devices(self):
endpoint = 'mdm/devices/search'
response = self.get_client().call_api(
'GET', endpoint)
response.raise_for_status()
if response.status_code == 200:
devices = [{'model': d['Model'], 'username': d['UserName'],
'serial_number': d[
'SerialNumber']} for d in
response.json().get('Devices')]
return devices
def get_available_devices(self):
return [d for d in self.get_all_devices()
if d['username'] == '' or d['username'] == 'staging']
@classmethod
def get_serializer_data(cls, **data):
return {
'username': data.get('username'),
'password': data.get('password'),
'server_url': data.get('server_url'),
'group_id': data.get('group_id')
}
class Meta:
verbose_name = 'AirWatch'
class MobileIron(TenantService, Provisionable):
PLATFORM_TYPE = 'mobile'
class TenantServiceAsset(PropertyTable):
service = models.ForeignKey(TenantService)
asset = models.ForeignKey(Asset)
@property
def tenant(self):
return self.service.tenant
@property
def platform(self):
return self.service.type
def __unicode__(self):
return 'Asset %s on %s' % (self.asset, self.service)
class Meta:
unique_together = ('service', 'asset')
class LastSeenEvent(TimeStampedModel):
user = models.ForeignKey(User)
item_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
item = GenericForeignKey('item_type', 'object_id')
last_seen = models.DateTimeField()
item_provisioned.connect(UserProvisionHistory.on_provision,
dispatch_uid='provision')
item_deprovisioned.connect(UserProvisionHistory.on_deprovision,
dispatch_uid='deprovision')
if not getattr(settings, 'PROVISIONABLE_SERVICES'):
settings.PROVISIONABLE_SERVICES = [
'.'.join([__name__, k.__name__]) for k in [Okta, AirWatch, MobileIron]
]
if not getattr(settings, 'ASSET_CLASSES', []):
settings.ASSET_CLASSES = [
'.'.join([__name__, k.__name__]) for k in
[Software, Device, MobileDataPlan]
]
| apache-2.0 | 4,245,768,454,504,376,300 | 32.186161 | 90 | 0.610653 | false |
davidh-ssec/pyresample | pyresample/test/test_kd_tree.py | 1 | 44711 | from __future__ import with_statement
import os
import sys
import six
import numpy as np
from pyresample import geometry, kd_tree, utils
from pyresample.test.utils import catch_warnings
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.area_def = geometry.AreaDefinition('areaD',
'Europe (3km, HRV, VTC)',
'areaD',
{'a': '6378144.0',
'b': '6356759.0',
'lat_0': '50.00',
'lat_ts': '50.00',
'lon_0': '8.00',
'proj': 'stere'},
800,
800,
[-1370912.72,
-909968.64000000001,
1029087.28,
1490031.3600000001])
cls.tdata = np.array([1, 2, 3])
cls.tlons = np.array([11.280789, 12.649354, 12.080402])
cls.tlats = np.array([56.011037, 55.629675, 55.641535])
cls.tswath = geometry.SwathDefinition(lons=cls.tlons, lats=cls.tlats)
cls.tgrid = geometry.CoordinateDefinition(
lons=np.array([12.562036]), lats=np.array([55.715613]))
def test_nearest_base(self):
res = kd_tree.resample_nearest(self.tswath,
self.tdata.ravel(), self.tgrid,
100000, reduce_data=False, segments=1)
self.assertTrue(res[0] == 2)
def test_gauss_base(self):
with catch_warnings(UserWarning) as w:
res = kd_tree.resample_gauss(self.tswath,
self.tdata.ravel(), self.tgrid,
50000, 25000, reduce_data=False, segments=1)
self.assertFalse(len(w) != 1)
self.assertFalse(('Searching' not in str(w[0].message)))
self.assertAlmostEqual(res[0], 2.2020729, 5)
def test_custom_base(self):
def wf(dist):
return 1 - dist / 100000.0
with catch_warnings(UserWarning) as w:
res = kd_tree.resample_custom(self.tswath,
self.tdata.ravel(), self.tgrid,
50000, wf, reduce_data=False, segments=1)
self.assertFalse(len(w) != 1)
self.assertFalse(('Searching' not in str(w[0].message)))
self.assertAlmostEqual(res[0], 2.4356757, 5)
def test_gauss_uncert(self):
sigma = utils.fwhm2sigma(41627.730557884883)
with catch_warnings(UserWarning) as w:
res, stddev, count = kd_tree.resample_gauss(self.tswath, self.tdata,
self.tgrid, 100000, sigma,
with_uncert=True)
self.assertTrue(len(w) > 0)
self.assertTrue((any('Searching' in str(_w.message) for _w in w)))
expected_res = 2.20206560694
expected_stddev = 0.707115076173
expected_count = 3
self.assertAlmostEqual(res[0], expected_res, 5)
self.assertAlmostEqual(stddev[0], expected_stddev, 5)
self.assertEqual(count[0], expected_count)
def test_custom_uncert(self):
def wf(dist):
return 1 - dist / 100000.0
with catch_warnings(UserWarning) as w:
res, stddev, counts = kd_tree.resample_custom(self.tswath,
self.tdata, self.tgrid,
100000, wf, with_uncert=True)
self.assertTrue(len(w) > 0)
self.assertTrue((any('Searching' in str(_w.message) for _w in w)))
self.assertAlmostEqual(res[0], 2.32193149, 5)
self.assertAlmostEqual(stddev[0], 0.81817972, 5)
self.assertEqual(counts[0], 3)
def test_nearest(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def, data.ravel(),
self.area_def, 50000, segments=1)
cross_sum = res.sum()
expected = 15874591.0
self.assertEqual(cross_sum, expected)
def test_nearest_masked_swath_target(self):
"""Test that a masked array works as a target."""
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
mask = np.ones_like(lons, dtype=np.bool)
mask[::2, ::2] = False
swath_def = geometry.SwathDefinition(
lons=np.ma.masked_array(lons, mask=mask),
lats=np.ma.masked_array(lats, mask=False)
)
res = kd_tree.resample_nearest(swath_def, data.ravel(),
swath_def, 50000, segments=3)
cross_sum = res.sum()
# expected = 12716 # if masks aren't respected
expected = 12000
self.assertEqual(cross_sum, expected)
def test_nearest_1d(self):
data = np.fromfunction(lambda x, y: x * y, (800, 800))
lons = np.fromfunction(lambda x: 3 + x / 100., (500,))
lats = np.fromfunction(lambda x: 75 - x / 10., (500,))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(self.area_def, data.ravel(),
swath_def, 50000, segments=1)
cross_sum = res.sum()
expected = 35821299.0
self.assertEqual(res.shape, (500,))
self.assertEqual(cross_sum, expected)
def test_nearest_empty(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 165 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def, data.ravel(),
self.area_def, 50000, segments=1)
cross_sum = res.sum()
expected = 0
self.assertEqual(cross_sum, expected)
def test_nearest_empty_multi(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 165 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
data_multi = np.column_stack((data.ravel(), data.ravel(),
data.ravel()))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def, data_multi,
self.area_def, 50000, segments=1)
self.assertEqual(res.shape, (800, 800, 3),
msg='Swath resampling nearest empty multi failed')
def test_nearest_empty_multi_masked(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 165 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
data_multi = np.column_stack((data.ravel(), data.ravel(),
data.ravel()))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def, data_multi,
self.area_def, 50000, segments=1,
fill_value=None)
self.assertEqual(res.shape, (800, 800, 3))
def test_nearest_empty_masked(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 165 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def, data.ravel(),
self.area_def, 50000, segments=1,
fill_value=None)
cross_sum = res.mask.sum()
expected = res.size
self.assertTrue(cross_sum == expected)
def test_nearest_segments(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def, data.ravel(),
self.area_def, 50000, segments=2)
cross_sum = res.sum()
expected = 15874591.0
self.assertEqual(cross_sum, expected)
def test_nearest_remap(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def, data.ravel(),
self.area_def, 50000, segments=1)
remap = kd_tree.resample_nearest(self.area_def, res.ravel(),
swath_def, 5000, segments=1)
cross_sum = remap.sum()
expected = 22275.0
self.assertEqual(cross_sum, expected)
def test_nearest_mp(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def, data.ravel(),
self.area_def, 50000, nprocs=2, segments=1)
cross_sum = res.sum()
expected = 15874591.0
self.assertEqual(cross_sum, expected)
def test_nearest_multi(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
data_multi = np.column_stack((data.ravel(), data.ravel(),
data.ravel()))
res = kd_tree.resample_nearest(swath_def, data_multi,
self.area_def, 50000, segments=1)
cross_sum = res.sum()
expected = 3 * 15874591.0
self.assertEqual(cross_sum, expected)
def test_nearest_multi_unraveled(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
data_multi = np.dstack((data, data, data))
res = kd_tree.resample_nearest(swath_def, data_multi,
self.area_def, 50000, segments=1)
cross_sum = res.sum()
expected = 3 * 15874591.0
self.assertEqual(cross_sum, expected)
def test_gauss_sparse(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_gauss(swath_def, data.ravel(),
self.area_def, 50000, 25000, fill_value=-1, segments=1)
cross_sum = res.sum()
expected = 15387753.9852
self.assertAlmostEqual(cross_sum, expected, places=3)
def test_gauss(self):
data = np.fromfunction(lambda y, x: (y + x) * 10 ** -5, (5000, 100))
lons = np.fromfunction(
lambda y, x: 3 + (10.0 / 100) * x, (5000, 100))
lats = np.fromfunction(
lambda y, x: 75 - (50.0 / 5000) * y, (5000, 100))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
with catch_warnings(UserWarning) as w:
res = kd_tree.resample_gauss(swath_def, data.ravel(),
self.area_def, 50000, 25000, segments=1)
self.assertFalse(len(w) != 1)
self.assertFalse(('Possible more' not in str(w[0].message)))
cross_sum = res.sum()
expected = 4872.8100353517921
self.assertAlmostEqual(cross_sum, expected)
def test_gauss_fwhm(self):
data = np.fromfunction(lambda y, x: (y + x) * 10 ** -5, (5000, 100))
lons = np.fromfunction(
lambda y, x: 3 + (10.0 / 100) * x, (5000, 100))
lats = np.fromfunction(
lambda y, x: 75 - (50.0 / 5000) * y, (5000, 100))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
with catch_warnings(UserWarning) as w:
res = kd_tree.resample_gauss(swath_def, data.ravel(),
self.area_def, 50000, utils.fwhm2sigma(41627.730557884883), segments=1)
self.assertFalse(len(w) != 1)
self.assertFalse(('Possible more' not in str(w[0].message)))
cross_sum = res.sum()
expected = 4872.8100353517921
self.assertAlmostEqual(cross_sum, expected)
def test_gauss_multi(self):
data = np.fromfunction(lambda y, x: (y + x) * 10 ** -6, (5000, 100))
lons = np.fromfunction(
lambda y, x: 3 + (10.0 / 100) * x, (5000, 100))
lats = np.fromfunction(
lambda y, x: 75 - (50.0 / 5000) * y, (5000, 100))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
data_multi = np.column_stack((data.ravel(), data.ravel(),
data.ravel()))
with catch_warnings(UserWarning) as w:
res = kd_tree.resample_gauss(swath_def, data_multi,
self.area_def, 50000, [25000, 15000, 10000], segments=1)
self.assertFalse(len(w) != 1)
self.assertFalse(('Possible more' not in str(w[0].message)))
cross_sum = res.sum()
expected = 1461.8429990248171
self.assertAlmostEqual(cross_sum, expected)
def test_gauss_multi_uncert(self):
data = np.fromfunction(lambda y, x: (y + x) * 10 ** -6, (5000, 100))
lons = np.fromfunction(
lambda y, x: 3 + (10.0 / 100) * x, (5000, 100))
lats = np.fromfunction(
lambda y, x: 75 - (50.0 / 5000) * y, (5000, 100))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
data_multi = np.column_stack((data.ravel(), data.ravel(),
data.ravel()))
with catch_warnings(UserWarning) as w:
# The assertion below checks if there is only one warning raised
# and whether it contains a specific message from pyresample
# On python 2.7.9+ the resample_gauss method raises multiple deprecation warnings
# that cause to fail, so we ignore the unrelated warnings.
res, stddev, counts = kd_tree.resample_gauss(swath_def, data_multi,
self.area_def, 50000, [
25000, 15000, 10000],
segments=1, with_uncert=True)
self.assertTrue(len(w) >= 1)
self.assertTrue(
any(['Possible more' in str(x.message) for x in w]))
cross_sum = res.sum()
cross_sum_counts = counts.sum()
expected = 1461.8429990248171
expected_stddev = [0.44621800779801657, 0.44363137712896705,
0.43861019464274459]
expected_counts = 4934802.0
self.assertTrue(res.shape == stddev.shape and stddev.shape ==
counts.shape and counts.shape == (800, 800, 3))
self.assertAlmostEqual(cross_sum, expected)
for i, e_stddev in enumerate(expected_stddev):
cross_sum_stddev = stddev[:, :, i].sum()
self.assertAlmostEqual(cross_sum_stddev, e_stddev)
self.assertAlmostEqual(cross_sum_counts, expected_counts)
def test_gauss_multi_mp(self):
data = np.fromfunction(lambda y, x: (y + x) * 10 ** -6, (5000, 100))
lons = np.fromfunction(
lambda y, x: 3 + (10.0 / 100) * x, (5000, 100))
lats = np.fromfunction(
lambda y, x: 75 - (50.0 / 5000) * y, (5000, 100))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
data_multi = np.column_stack((data.ravel(), data.ravel(),
data.ravel()))
with catch_warnings(UserWarning) as w:
res = kd_tree.resample_gauss(swath_def, data_multi,
self.area_def, 50000, [
25000, 15000, 10000],
nprocs=2, segments=1)
self.assertFalse(len(w) != 1)
self.assertFalse(('Possible more' not in str(w[0].message)))
cross_sum = res.sum()
expected = 1461.8429990248171
self.assertAlmostEqual(cross_sum, expected)
def test_gauss_multi_mp_segments(self):
data = np.fromfunction(lambda y, x: (y + x) * 10 ** -6, (5000, 100))
lons = np.fromfunction(
lambda y, x: 3 + (10.0 / 100) * x, (5000, 100))
lats = np.fromfunction(
lambda y, x: 75 - (50.0 / 5000) * y, (5000, 100))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
data_multi = np.column_stack((data.ravel(), data.ravel(),
data.ravel()))
with catch_warnings(UserWarning) as w:
res = kd_tree.resample_gauss(swath_def, data_multi,
self.area_def, 50000, [
25000, 15000, 10000],
nprocs=2, segments=1)
self.assertFalse(len(w) != 1)
self.assertFalse('Possible more' not in str(w[0].message))
cross_sum = res.sum()
expected = 1461.8429990248171
self.assertAlmostEqual(cross_sum, expected)
def test_gauss_multi_mp_segments_empty(self):
data = np.fromfunction(lambda y, x: (y + x) * 10 ** -6, (5000, 100))
lons = np.fromfunction(
lambda y, x: 165 + (10.0 / 100) * x, (5000, 100))
lats = np.fromfunction(
lambda y, x: 75 - (50.0 / 5000) * y, (5000, 100))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
data_multi = np.column_stack((data.ravel(), data.ravel(),
data.ravel()))
res = kd_tree.resample_gauss(swath_def, data_multi,
self.area_def, 50000, [
25000, 15000, 10000],
nprocs=2, segments=1)
cross_sum = res.sum()
self.assertTrue(cross_sum == 0)
def test_custom(self):
def wf(dist):
return 1 - dist / 100000.0
data = np.fromfunction(lambda y, x: (y + x) * 10 ** -5, (5000, 100))
lons = np.fromfunction(
lambda y, x: 3 + (10.0 / 100) * x, (5000, 100))
lats = np.fromfunction(
lambda y, x: 75 - (50.0 / 5000) * y, (5000, 100))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
with catch_warnings(UserWarning) as w:
res = kd_tree.resample_custom(swath_def, data.ravel(),
self.area_def, 50000, wf, segments=1)
self.assertFalse(len(w) != 1)
self.assertFalse(('Possible more' not in str(w[0].message)))
cross_sum = res.sum()
expected = 4872.8100347930776
self.assertAlmostEqual(cross_sum, expected)
def test_custom_multi(self):
def wf1(dist):
return 1 - dist / 100000.0
def wf2(dist):
return 1
def wf3(dist):
return np.cos(dist) ** 2
data = np.fromfunction(lambda y, x: (y + x) * 10 ** -6, (5000, 100))
lons = np.fromfunction(
lambda y, x: 3 + (10.0 / 100) * x, (5000, 100))
lats = np.fromfunction(
lambda y, x: 75 - (50.0 / 5000) * y, (5000, 100))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
data_multi = np.column_stack((data.ravel(), data.ravel(),
data.ravel()))
with catch_warnings(UserWarning) as w:
res = kd_tree.resample_custom(swath_def, data_multi,
self.area_def, 50000, [wf1, wf2, wf3], segments=1)
self.assertFalse(len(w) != 1)
self.assertFalse('Possible more' not in str(w[0].message))
cross_sum = res.sum()
expected = 1461.8428378742638
self.assertAlmostEqual(cross_sum, expected)
def test_masked_nearest(self):
data = np.ones((50, 10))
data[:, 5:] = 2
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
mask = np.ones((50, 10))
mask[:, :5] = 0
masked_data = np.ma.array(data, mask=mask)
res = kd_tree.resample_nearest(swath_def, masked_data.ravel(),
self.area_def, 50000, segments=1)
expected_mask = np.fromfile(os.path.join(os.path.dirname(__file__),
'test_files',
'mask_test_nearest_mask.dat'),
sep=' ').reshape((800, 800))
expected_data = np.fromfile(os.path.join(os.path.dirname(__file__),
'test_files',
'mask_test_nearest_data.dat'),
sep=' ').reshape((800, 800))
self.assertTrue(np.array_equal(expected_mask, res.mask))
self.assertTrue(np.array_equal(expected_data, res.data))
def test_masked_nearest_1d(self):
data = np.ones((800, 800))
data[:400, :] = 2
lons = np.fromfunction(lambda x: 3 + x / 100., (500,))
lats = np.fromfunction(lambda x: 75 - x / 10., (500,))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
mask = np.ones((800, 800))
mask[400:, :] = 0
masked_data = np.ma.array(data, mask=mask)
res = kd_tree.resample_nearest(self.area_def, masked_data.ravel(),
swath_def, 50000, segments=1)
self.assertEqual(res.mask.sum(), 112)
def test_masked_gauss(self):
data = np.ones((50, 10))
data[:, 5:] = 2
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
mask = np.ones((50, 10))
mask[:, :5] = 0
masked_data = np.ma.array(data, mask=mask)
res = kd_tree.resample_gauss(swath_def, masked_data.ravel(),
self.area_def, 50000, 25000, segments=1)
expected_mask = np.fromfile(os.path.join(os.path.dirname(__file__),
'test_files',
'mask_test_mask.dat'),
sep=' ').reshape((800, 800))
expected_data = np.fromfile(os.path.join(os.path.dirname(__file__),
'test_files',
'mask_test_data.dat'),
sep=' ').reshape((800, 800))
expected = expected_data.sum()
cross_sum = res.data.sum()
self.assertTrue(np.array_equal(expected_mask, res.mask))
self.assertAlmostEqual(cross_sum, expected, places=3)
def test_masked_fill_float(self):
data = np.ones((50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def, data.ravel(),
self.area_def, 50000, fill_value=None, segments=1)
expected_fill_mask = np.fromfile(os.path.join(os.path.dirname(__file__),
'test_files',
'mask_test_fill_value.dat'),
sep=' ').reshape((800, 800))
fill_mask = res.mask
self.assertTrue(np.array_equal(fill_mask, expected_fill_mask))
def test_masked_fill_int(self):
data = np.ones((50, 10)).astype('int')
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def, data.ravel(),
self.area_def, 50000, fill_value=None, segments=1)
expected_fill_mask = np.fromfile(os.path.join(os.path.dirname(__file__),
'test_files',
'mask_test_fill_value.dat'),
sep=' ').reshape((800, 800))
fill_mask = res.mask
self.assertTrue(np.array_equal(fill_mask, expected_fill_mask))
def test_masked_full(self):
data = np.ones((50, 10))
data[:, 5:] = 2
mask = np.ones((50, 10))
mask[:, :5] = 0
masked_data = np.ma.array(data, mask=mask)
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def,
masked_data.ravel(
), self.area_def, 50000,
fill_value=None, segments=1)
expected_fill_mask = np.fromfile(os.path.join(os.path.dirname(__file__),
'test_files',
'mask_test_full_fill.dat'),
sep=' ').reshape((800, 800))
fill_mask = res.mask
self.assertTrue(np.array_equal(fill_mask, expected_fill_mask))
def test_masked_full_multi(self):
data = np.ones((50, 10))
data[:, 5:] = 2
mask1 = np.ones((50, 10))
mask1[:, :5] = 0
mask2 = np.ones((50, 10))
mask2[:, 5:] = 0
mask3 = np.ones((50, 10))
mask3[:25, :] = 0
data_multi = np.column_stack(
(data.ravel(), data.ravel(), data.ravel()))
mask_multi = np.column_stack(
(mask1.ravel(), mask2.ravel(), mask3.ravel()))
masked_data = np.ma.array(data_multi, mask=mask_multi)
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
res = kd_tree.resample_nearest(swath_def,
masked_data, self.area_def, 50000,
fill_value=None, segments=1)
expected_fill_mask = np.fromfile(os.path.join(os.path.dirname(__file__),
'test_files',
'mask_test_full_fill_multi.dat'),
sep=' ').reshape((800, 800, 3))
fill_mask = res.mask
cross_sum = res.sum()
expected = 357140.0
self.assertAlmostEqual(cross_sum, expected)
self.assertTrue(np.array_equal(fill_mask, expected_fill_mask))
def test_dtype(self):
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
grid_def = geometry.GridDefinition(lons, lats)
lons = np.asarray(lons, dtype='f4')
lats = np.asarray(lats, dtype='f4')
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
valid_input_index, valid_output_index, index_array, distance_array = \
kd_tree.get_neighbour_info(swath_def,
grid_def,
50000, neighbours=1, segments=1)
def test_nearest_from_sample(self):
data = np.fromfunction(lambda y, x: y * x, (50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
valid_input_index, valid_output_index, index_array, distance_array = \
kd_tree.get_neighbour_info(swath_def,
self.area_def,
50000, neighbours=1, segments=1)
res = kd_tree.get_sample_from_neighbour_info('nn', (800, 800), data.ravel(),
valid_input_index, valid_output_index,
index_array)
cross_sum = res.sum()
expected = 15874591.0
self.assertEqual(cross_sum, expected)
def test_custom_multi_from_sample(self):
def wf1(dist):
return 1 - dist / 100000.0
def wf2(dist):
return 1
def wf3(dist):
return np.cos(dist) ** 2
data = np.fromfunction(lambda y, x: (y + x) * 10 ** -6, (5000, 100))
lons = np.fromfunction(
lambda y, x: 3 + (10.0 / 100) * x, (5000, 100))
lats = np.fromfunction(
lambda y, x: 75 - (50.0 / 5000) * y, (5000, 100))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
data_multi = np.column_stack((data.ravel(), data.ravel(),
data.ravel()))
with catch_warnings(UserWarning) as w:
valid_input_index, valid_output_index, index_array, distance_array = \
kd_tree.get_neighbour_info(swath_def,
self.area_def,
50000, segments=1)
self.assertFalse(len(w) != 1)
self.assertFalse(('Possible more' not in str(w[0].message)))
res = kd_tree.get_sample_from_neighbour_info('custom', (800, 800),
data_multi,
valid_input_index, valid_output_index,
index_array, distance_array,
weight_funcs=[wf1, wf2, wf3])
cross_sum = res.sum()
expected = 1461.8428378742638
self.assertAlmostEqual(cross_sum, expected)
res = kd_tree.get_sample_from_neighbour_info('custom', (800, 800),
data_multi,
valid_input_index, valid_output_index,
index_array, distance_array,
weight_funcs=[wf1, wf2, wf3])
# Look for error where input data has been manipulated
cross_sum = res.sum()
expected = 1461.8428378742638
self.assertAlmostEqual(cross_sum, expected)
def test_masked_multi_from_sample(self):
data = np.ones((50, 10))
data[:, 5:] = 2
mask1 = np.ones((50, 10))
mask1[:, :5] = 0
mask2 = np.ones((50, 10))
mask2[:, 5:] = 0
mask3 = np.ones((50, 10))
mask3[:25, :] = 0
data_multi = np.column_stack(
(data.ravel(), data.ravel(), data.ravel()))
mask_multi = np.column_stack(
(mask1.ravel(), mask2.ravel(), mask3.ravel()))
masked_data = np.ma.array(data_multi, mask=mask_multi)
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
valid_input_index, valid_output_index, index_array, distance_array = \
kd_tree.get_neighbour_info(swath_def,
self.area_def,
50000, neighbours=1, segments=1)
res = kd_tree.get_sample_from_neighbour_info('nn', (800, 800),
masked_data,
valid_input_index,
valid_output_index, index_array,
fill_value=None)
expected_fill_mask = np.fromfile(os.path.join(os.path.dirname(__file__),
'test_files',
'mask_test_full_fill_multi.dat'),
sep=' ').reshape((800, 800, 3))
fill_mask = res.mask
self.assertTrue(np.array_equal(fill_mask, expected_fill_mask))
class TestXArrayResamplerNN(unittest.TestCase):
"""Test the XArrayResamplerNN class."""
@classmethod
def setUpClass(cls):
import xarray as xr
import dask.array as da
cls.area_def = geometry.AreaDefinition('areaD',
'Europe (3km, HRV, VTC)',
'areaD',
{'a': '6378144.0',
'b': '6356759.0',
'lat_0': '50.00',
'lat_ts': '50.00',
'lon_0': '8.00',
'proj': 'stere'},
800,
800,
[-1370912.72,
-909968.64000000001,
1029087.28,
1490031.3600000001])
dfa = da.from_array # shortcut
cls.chunks = chunks = 5
cls.tgrid = geometry.CoordinateDefinition(
lons=dfa(np.array([
[11.5, 12.562036, 12.9],
[11.5, 12.562036, 12.9],
[11.5, 12.562036, 12.9],
[11.5, 12.562036, 12.9],
]), chunks=chunks),
lats=dfa(np.array([
[55.715613, 55.715613, 55.715613],
[55.715613, 55.715613, 55.715613],
[55.715613, np.nan, 55.715613],
[55.715613, 55.715613, 55.715613],
]), chunks=chunks))
cls.tdata_1d = xr.DataArray(
dfa(np.array([1., 2., 3.]), chunks=chunks), dims=('my_dim1',))
cls.tlons_1d = xr.DataArray(
dfa(np.array([11.280789, 12.649354, 12.080402]), chunks=chunks),
dims=('my_dim1',))
cls.tlats_1d = xr.DataArray(
dfa(np.array([56.011037, 55.629675, 55.641535]), chunks=chunks),
dims=('my_dim1',))
cls.tswath_1d = geometry.SwathDefinition(lons=cls.tlons_1d,
lats=cls.tlats_1d)
cls.data_2d = xr.DataArray(
da.from_array(np.fromfunction(lambda y, x: y * x, (50, 10)),
chunks=5),
dims=('my_dim_y', 'my_dim_x'))
cls.data_3d = xr.DataArray(
da.from_array(np.fromfunction(lambda y, x, b: y * x * b, (50, 10, 3)),
chunks=5),
dims=('my_dim_y', 'my_dim_x', 'bands'),
coords={'bands': ['r', 'g', 'b']})
cls.lons_2d = xr.DataArray(
da.from_array(np.fromfunction(lambda y, x: 3 + x, (50, 10)),
chunks=5),
dims=('my_dim_y', 'my_dim_x'))
cls.lats_2d = xr.DataArray(
da.from_array(np.fromfunction(lambda y, x: 75 - y, (50, 10)),
chunks=5),
dims=('my_dim_y', 'my_dim_x'))
cls.swath_def_2d = geometry.SwathDefinition(lons=cls.lons_2d,
lats=cls.lats_2d)
cls.src_area_2d = geometry.AreaDefinition(
'areaD_src', 'Europe (3km, HRV, VTC)', 'areaD',
{'a': '6378144.0', 'b': '6356759.0', 'lat_0': '52.00',
'lat_ts': '52.00', 'lon_0': '5.00', 'proj': 'stere'}, 50, 10,
[-1370912.72, -909968.64000000001, 1029087.28,
1490031.3600000001])
def test_nearest_swath_1d_mask_to_grid_1n(self):
"""Test 1D swath definition to 2D grid definition; 1 neighbor."""
from pyresample.kd_tree import XArrayResamplerNN
import xarray as xr
import dask.array as da
resampler = XArrayResamplerNN(self.tswath_1d, self.tgrid,
radius_of_influence=100000,
neighbours=1)
data = self.tdata_1d
ninfo = resampler.get_neighbour_info(mask=data.isnull())
for val in ninfo[:3]:
# vii, ia, voi
self.assertIsInstance(val, da.Array)
res = resampler.get_sample_from_neighbour_info(data)
self.assertIsInstance(res, xr.DataArray)
self.assertIsInstance(res.data, da.Array)
actual = res.values
expected = np.array([
[1., 2., 2.],
[1., 2., 2.],
[1., np.nan, 2.],
[1., 2., 2.],
])
np.testing.assert_allclose(actual, expected)
def test_nearest_type_preserve(self):
"""Test 1D swath definition to 2D grid definition; 1 neighbor."""
from pyresample.kd_tree import XArrayResamplerNN
import xarray as xr
import dask.array as da
resampler = XArrayResamplerNN(self.tswath_1d, self.tgrid,
radius_of_influence=100000,
neighbours=1)
data = self.tdata_1d
data = xr.DataArray(da.from_array(np.array([1, 2, 3]),
chunks=5),
dims=('my_dim1',))
ninfo = resampler.get_neighbour_info()
for val in ninfo[:3]:
# vii, ia, voi
self.assertIsInstance(val, da.Array)
res = resampler.get_sample_from_neighbour_info(data, fill_value=255)
self.assertIsInstance(res, xr.DataArray)
self.assertIsInstance(res.data, da.Array)
actual = res.values
expected = np.array([
[1, 2, 2],
[1, 2, 2],
[1, 255, 2],
[1, 2, 2],
])
np.testing.assert_equal(actual, expected)
def test_nearest_swath_2d_mask_to_area_1n(self):
"""Test 2D swath definition to 2D area definition; 1 neighbor."""
from pyresample.kd_tree import XArrayResamplerNN
import xarray as xr
import dask.array as da
swath_def = self.swath_def_2d
data = self.data_2d
resampler = XArrayResamplerNN(swath_def, self.area_def,
radius_of_influence=50000,
neighbours=1)
ninfo = resampler.get_neighbour_info(mask=data.isnull())
for val in ninfo[:3]:
# vii, ia, voi
self.assertIsInstance(val, da.Array)
res = resampler.get_sample_from_neighbour_info(data)
self.assertIsInstance(res, xr.DataArray)
self.assertIsInstance(res.data, da.Array)
res = res.values
cross_sum = np.nansum(res)
expected = 15874591.0
self.assertEqual(cross_sum, expected)
def test_nearest_area_2d_to_area_1n(self):
"""Test 2D area definition to 2D area definition; 1 neighbor."""
from pyresample.kd_tree import XArrayResamplerNN
import xarray as xr
import dask.array as da
data = self.data_2d
resampler = XArrayResamplerNN(self.src_area_2d, self.area_def,
radius_of_influence=50000,
neighbours=1)
ninfo = resampler.get_neighbour_info()
for val in ninfo[:3]:
# vii, ia, voi
self.assertIsInstance(val, da.Array)
self.assertRaises(AssertionError,
resampler.get_sample_from_neighbour_info, data)
# rename data dimensions to match the expected area dimensions
data = data.rename({'my_dim_y': 'y', 'my_dim_x': 'x'})
res = resampler.get_sample_from_neighbour_info(data)
self.assertIsInstance(res, xr.DataArray)
self.assertIsInstance(res.data, da.Array)
res = res.values
cross_sum = np.nansum(res)
expected = 27706753.0
self.assertEqual(cross_sum, expected)
def test_nearest_area_2d_to_area_1n_3d_data(self):
"""Test 2D area definition to 2D area definition; 1 neighbor, 3d data."""
from pyresample.kd_tree import XArrayResamplerNN
import xarray as xr
import dask.array as da
data = self.data_3d
resampler = XArrayResamplerNN(self.src_area_2d, self.area_def,
radius_of_influence=50000,
neighbours=1)
ninfo = resampler.get_neighbour_info()
for val in ninfo[:3]:
# vii, ia, voi
self.assertIsInstance(val, da.Array)
self.assertRaises(AssertionError,
resampler.get_sample_from_neighbour_info, data)
# rename data dimensions to match the expected area dimensions
data = data.rename({'my_dim_y': 'y', 'my_dim_x': 'x'})
res = resampler.get_sample_from_neighbour_info(data)
self.assertIsInstance(res, xr.DataArray)
self.assertIsInstance(res.data, da.Array)
six.assertCountEqual(self, res.coords['bands'], ['r', 'g', 'b'])
res = res.values
cross_sum = np.nansum(res)
expected = 83120259.0
self.assertEqual(cross_sum, expected)
@unittest.skipIf(True, "Multiple neighbors not supported yet")
def test_nearest_swath_1d_mask_to_grid_8n(self):
"""Test 1D swath definition to 2D grid definition; 8 neighbors."""
from pyresample.kd_tree import XArrayResamplerNN
import xarray as xr
import dask.array as da
resampler = XArrayResamplerNN(self.tswath_1d, self.tgrid,
radius_of_influence=100000,
neighbours=8)
data = self.tdata_1d
ninfo = resampler.get_neighbour_info(mask=data.isnull())
for val in ninfo[:3]:
# vii, ia, voi
self.assertIsInstance(val, da.Array)
res = resampler.get_sample_from_neighbour_info(data)
self.assertIsInstance(res, xr.DataArray)
self.assertIsInstance(res.data, da.Array)
# actual = res.values
# expected = TODO
# np.testing.assert_allclose(actual, expected)
def suite():
"""The test suite."""
loader = unittest.TestLoader()
mysuite = unittest.TestSuite()
mysuite.addTest(loader.loadTestsFromTestCase(Test))
mysuite.addTest(loader.loadTestsFromTestCase(TestXArrayResamplerNN))
return mysuite
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | 5,617,309,494,279,428,000 | 46.768162 | 112 | 0.501666 | false |
bender-bot/bender | bender/_tests/test_main.py | 1 | 2381 | from io import StringIO
import pkg_resources
import pytest
import threading
import bender._main
from bender.backbones.console import BenderConsole
from bender.decorators import backbone_start
from bender.testing import VolatileBrain, DumbMessage
@pytest.mark.timeout(3.0)
def test_main(mock):
stdout = StringIO()
stdin = StringIO()
stdin.write(u'hey\nquit\n')
stdin.seek(0)
timer = threading.Timer(1.0, stdin.close)
timer.start()
console = BenderConsole(stdout=stdout, stdin=stdin)
mock.patch.object(bender._main, 'get_console', return_value=console)
mock.patch.object(bender._main, 'get_brain', return_value=VolatileBrain())
assert bender._main.main([]) == 0
assert 'Hey, my name is Bender' in stdout.getvalue()
@pytest.mark.timeout(3.0)
def test_backbone_selection(mock):
"""
Test that we can select backbones from the command line.
"""
quitter = install_quitter_backbone(mock)
mock.patch.object(bender._main, 'get_brain', return_value=VolatileBrain())
assert bender._main.main(['', '--backbone', 'quitter']) == 0
assert quitter.started
def install_quitter_backbone(mock):
"""
installs a "quitter" backbone: a backbone that immediately quits right
after starting.
It is installed as a distutils entry point by mocking the relevant
methods, as close to distutils as possible to ensure all our code is
tested.
This can be moved into a fixture, or even make QuitterBackbone
available in bender.testing.
"""
class QuitterBackbone(object):
def __init__(self):
self.on_message_received = None
self.started = False
@backbone_start
def start(self):
self.on_message_received(DumbMessage('quit', 'user'))
self.started = True
quitter = QuitterBackbone()
factory = lambda: quitter
class EntryPoint(object):
pass
quitter_entry_point = EntryPoint()
quitter_entry_point.name = 'quitter'
quitter_entry_point.load = lambda: factory
original_entry_points = pkg_resources.iter_entry_points
def iter_entry_points(name):
if name == 'bender_backbone':
return [quitter_entry_point]
else:
return original_entry_points(name)
mock.patch.object(pkg_resources, 'iter_entry_points', iter_entry_points)
return quitter
| lgpl-3.0 | -8,346,947,514,074,849,000 | 28.395062 | 78 | 0.677866 | false |
aaltay/beam | sdks/python/apache_beam/runners/portability/spark_runner_test.py | 1 | 6533 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import argparse
import logging
import shlex
import unittest
from shutil import rmtree
from tempfile import mkdtemp
import pytest
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_runner
from apache_beam.runners.portability import portable_runner_test
# Run as
#
# pytest spark_runner_test.py[::TestClass::test_case] \
# --test-pipeline-options="--environment_type=LOOPBACK"
_LOGGER = logging.getLogger(__name__)
class SparkRunnerTest(portable_runner_test.PortableRunnerTest):
_use_grpc = True
_use_subprocesses = True
expansion_port = None
spark_job_server_jar = None
@pytest.fixture(autouse=True)
def parse_options(self, request):
if not request.config.option.test_pipeline_options:
raise unittest.SkipTest(
'Skipping because --test-pipeline-options is not specified.')
test_pipeline_options = request.config.option.test_pipeline_options
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
'--spark_job_server_jar',
help='Job server jar to submit jobs.',
action='store')
parser.add_argument(
'--environment_type',
default='LOOPBACK',
choices=['DOCKER', 'PROCESS', 'LOOPBACK'],
help='Set the environment type for running user code. DOCKER runs '
'user code in a container. PROCESS runs user code in '
'automatically started processes. LOOPBACK runs user code on '
'the same process that originally submitted the job.')
parser.add_argument(
'--environment_option',
'--environment_options',
dest='environment_options',
action='append',
default=None,
help=(
'Environment configuration for running the user code. '
'Recognized options depend on --environment_type.\n '
'For DOCKER: docker_container_image (optional)\n '
'For PROCESS: process_command (required), process_variables '
'(optional, comma-separated)\n '
'For EXTERNAL: external_service_address (required)'))
known_args, unknown_args = parser.parse_known_args(
shlex.split(test_pipeline_options))
if unknown_args:
_LOGGER.warning('Discarding unrecognized arguments %s' % unknown_args)
self.set_spark_job_server_jar(
known_args.spark_job_server_jar or
job_server.JavaJarJobServer.path_to_beam_jar(
':runners:spark:2:job-server:shadowJar'))
self.environment_type = known_args.environment_type
self.environment_options = known_args.environment_options
@classmethod
def _subprocess_command(cls, job_port, expansion_port):
# will be cleaned up at the end of this method, and recreated and used by
# the job server
tmp_dir = mkdtemp(prefix='sparktest')
cls.expansion_port = expansion_port
try:
return [
'java',
'-Dbeam.spark.test.reuseSparkContext=true',
'-jar',
cls.spark_job_server_jar,
'--spark-master-url',
'local',
'--artifacts-dir',
tmp_dir,
'--job-port',
str(job_port),
'--artifact-port',
'0',
'--expansion-port',
str(expansion_port),
]
finally:
rmtree(tmp_dir)
@classmethod
def get_runner(cls):
return portable_runner.PortableRunner()
@classmethod
def get_expansion_service(cls):
# TODO Move expansion address resides into PipelineOptions
return 'localhost:%s' % cls.expansion_port
@classmethod
def set_spark_job_server_jar(cls, spark_job_server_jar):
cls.spark_job_server_jar = spark_job_server_jar
def create_options(self):
options = super(SparkRunnerTest, self).create_options()
options.view_as(PortableOptions).environment_type = self.environment_type
options.view_as(
PortableOptions).environment_options = self.environment_options
return options
def test_metrics(self):
# Skip until Spark runner supports metrics.
raise unittest.SkipTest("BEAM-7219")
def test_sdf(self):
# Skip until Spark runner supports SDF.
raise unittest.SkipTest("BEAM-7222")
def test_sdf_with_watermark_tracking(self):
# Skip until Spark runner supports SDF.
raise unittest.SkipTest("BEAM-7222")
def test_sdf_with_sdf_initiated_checkpointing(self):
# Skip until Spark runner supports SDF.
raise unittest.SkipTest("BEAM-7222")
def test_sdf_synthetic_source(self):
# Skip until Spark runner supports SDF.
raise unittest.SkipTest("BEAM-7222")
def test_callbacks_with_exception(self):
# Skip until Spark runner supports bundle finalization.
raise unittest.SkipTest("BEAM-7233")
def test_register_finalizations(self):
# Skip until Spark runner supports bundle finalization.
raise unittest.SkipTest("BEAM-7233")
def test_sdf_with_dofn_as_watermark_estimator(self):
# Skip until Spark runner supports SDF and self-checkpoint.
raise unittest.SkipTest("BEAM-7222")
def test_pardo_dynamic_timer(self):
raise unittest.SkipTest("BEAM-9912")
def test_flattened_side_input(self):
# Blocked on support for transcoding
# https://jira.apache.org/jira/browse/BEAM-7236
super(SparkRunnerTest,
self).test_flattened_side_input(with_transcoding=False)
def test_custom_merging_window(self):
raise unittest.SkipTest("BEAM-11004")
# Inherits all other tests from PortableRunnerTest.
if __name__ == '__main__':
# Run the tests.
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 | 7,796,007,208,722,939,000 | 32.849741 | 77 | 0.69325 | false |
gaccardo/buxfer_api | api/reporter.py | 1 | 16970 | import os
import math
from reportlab.pdfgen import canvas
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.charts.piecharts import Pie
from reportlab.graphics.charts.legends import Legend
from reportlab.lib.colors import black, red, purple, green, \
maroon, brown, pink, white, HexColor
from reportlab.graphics import renderPDF
from reportlab.platypus import Table, TableStyle
from reportlab.lib.units import cm
from reportlab.platypus import PageBreak
import datetime
import settings
from currency_calculator import CurrencyCalculator
class Reporter( object ):
def __init__(self, data):
self.accounts = data['accounts']
self.transactions = data['transactions']
self.budgets = data['budgets']
self.reminders = data['reminders']
self.c = None
self.l = 800
cc = CurrencyCalculator()
self.dolar = None
try:
self.dolar = cc.get_dolar()['real']
except:
self.dolar = settings.DOLAR
self.pdf_chart_colors = [
HexColor("#0000e5"),
HexColor("#1f1feb"),
HexColor("#5757f0"),
HexColor("#8f8ff5"),
HexColor("#c7c7fa"),
HexColor("#f5c2c2"),
HexColor("#eb8585"),
HexColor("#e04747"),
HexColor("#d60a0a"),
HexColor("#cc0000"),
HexColor("#ff0000"),
]
def __prepare_document(self):
file_path = os.path.join(settings.REPORT_TMP,
settings.REPORT_NAME)
self.c = canvas.Canvas(file_path)
def __generate_header(self):
self.c.setFont('Helvetica', 28)
self.c.drawString(30, self.l, 'Estado general de cuentas')
self.c.setFont('Courier', 11)
hoy = datetime.datetime.now()
hoy = hoy.strftime('%d/%m/%Y')
self.c.drawString(30, 780, 'Fecha: %s' % hoy)
self.c.drawString(495, 780, 'Dolar: $%.2f' % self.dolar)
self.c.line(20,775,580,775)
def __get_totals_by_currency(self):
totals = dict()
for acc in self.accounts:
if acc.currency not in totals:
totals[acc.currency] = acc.balance
else:
totals[acc.currency] += acc.balance
return totals
def __accounts_amount(self):
self.c.setFont('Courier', 14)
self.c.drawString(30, 750, 'Cuentas')
data = [['Cuenta', 'Moneda', 'Saldo']]
self.l = 630
for acc in self.accounts:
data.append([acc.name, acc.currency,
'$%.2f' % acc.balance])
t = Table(data)
t.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t.wrapOn(self.c, 30, self.l)
t.drawOn(self.c, 30, self.l)
self.l -= 20
self.c.setFont('Courier', 14)
self.c.drawString(30, self.l, 'Totales por moneda')
self.l -= 63
data2 = [['Moneda', 'Saldo']]
totals = self.__get_totals_by_currency()
for currency, amount in totals.iteritems():
data2.append([currency, amount])
t2 = Table(data2)
t2.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t2.wrapOn(self.c, 30, self.l)
t2.drawOn(self.c, 30, self.l)
def __translate_type(self, tipo):
types = dict()
types['income'] = 'ingreso'
types['expense'] = 'gasto'
types['transfer'] = 'tranferencia'
return types[tipo]
def __transactions(self):
self.l -= 20
self.c.setFont('Courier', 14)
self.c.drawString(30, self.l, 'Movimientos')
header = ['Fecha', 'Tipo', 'Cuenta', 'Monto', 'Description']
data = [header]
for tra in self.transactions:
tipo = self.__translate_type(tra.t_type)
data.append([tra.date, tipo.upper(), tra.account,
'$%.2f' % tra.amount, tra.description])
registros = 24
filas = len(data) / float(registros)
coheficiente = math.ceil(len(data) / filas)
look = 0
datas = list()
datas_new = list()
while look < len(data):
second = int(look+coheficiente)
datas.append(data[look:second])
look = int(look+coheficiente)
datas_new.append(datas[0])
for dd in datas[1:][::-1]:
datas_new.append([header] + dd)
data1 = datas_new[0]
self.l -= len(data1) * 19
t = Table(data1)
t.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t.wrapOn(self.c, 30, self.l)
t.drawOn(self.c, 30, self.l)
for dd in datas_new[1:][::-1]:
p = PageBreak()
p.drawOn(self.c, 0, 1000)
self.c.showPage()
self.l = 800 - (len(dd) * 19)
t2 = Table(dd)
t2.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t2.wrapOn(self.c, 30, self.l)
t2.drawOn(self.c, 30, self.l)
def __add_graph(self):
drawing = Drawing(200, 100)
data = list()
labels = list()
self.c.drawString(370, 730,
'Distribucion en pesos'.encode('utf-8'))
for acc in self.accounts:
balance = acc.balance
if acc.currency == 'USD':
balance = balance * self.dolar
data.append(balance)
labels.append(acc.name)
pie = Pie()
pie.x = 280
pie.y = 630
pie.height = 100
pie.width = 100
pie.data = data
pie.labels = labels
pie.simpleLabels = 1
pie.slices.strokeWidth = 1
pie.slices.strokeColor = black
pie.slices.label_visible = 0
legend = Legend()
legend.x = 400
legend.y = 680
legend.dx = 8
legend.dy = 8
legend.fontName = 'Helvetica'
legend.fontSize = 7
legend.boxAnchor = 'w'
legend.columnMaximum = 10
legend.strokeWidth = 1
legend.strokeColor = black
legend.deltax = 75
legend.deltay = 10
legend.autoXPadding = 5
legend.yGap = 0
legend.dxTextSpace = 5
legend.alignment = 'right'
legend.dividerLines = 1|2|4
legend.dividerOffsY = 4.5
legend.subCols.rpad = 30
n = len(pie.data)
self.__setItems(n,pie.slices,
'fillColor',self.pdf_chart_colors)
legend.colorNamePairs = [(pie.slices[i].fillColor,
(pie.labels[i][0:20],'$%0.2f' % pie.data[i])) for i in xrange(n)]
drawing.add(pie)
drawing.add(legend)
x, y = 0, 0
renderPDF.draw(drawing, self.c, x, y, showBoundary=False)
def __per_account_statistic(self):
for acc in self.accounts:
p = PageBreak()
p.drawOn(self.c, 0, 1000)
self.c.showPage()
self.l = 760
self.c.setFont('Courier', 14)
self.c.drawString(30, 800, 'Cuenta: %s' % \
acc.name)
header = ['Fecha', 'Tipo', 'Monto', 'Description']
data = [header]
g_data = list()
g_labe = list()
total = 0
for tra in self.transactions:
if tra.account == acc.name:
if tra.t_type in ['expense', 'transfer']:
tipo = self.__translate_type(tra.t_type)
data.append([tra.date, tipo.upper(),
'$%2.f' % tra.amount, tra.description])
total += tra.amount
g_data.append(tra.amount)
g_labe.append(tra.description.encode('utf-8'))
data.append(['TOTAL', '', '$%.2f' % total, ''])
if len(g_data) == 0 or len(g_labe) == 0:
self.c.setFont('Courier', 12)
self.c.drawString(30, 770, 'Sin movimientos negativos')
continue
from_title = 35
if len(data) != 2:
self.l -= ((len(data) * len(data)) + len(data)) + from_title
t = Table(data)
t.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier'),
('BACKGROUND', (0,-1), (-1,-1), red),
('TEXTCOLOR', (0,-1), (-1,-1), white)]))
t.wrapOn(self.c, 30, self.l)
t.drawOn(self.c, 30, self.l)
drawing = Drawing(200, 100)
pie = Pie()
pie.x = 30
pie.y = self.l - 300
pie.height = 200
pie.width = 200
pie.data = g_data
pie.labels = g_labe
pie.simpleLabels = 1
pie.slices.strokeWidth = 1
pie.slices.strokeColor = black
pie.slices.label_visible = 0
pie.slices.popout = 1
#pie.labels = map(str, pie.data)
legend = Legend()
legend.x = 250
legend.y = self.l - 250
legend.dx = 8
legend.dy = 8
legend.fontName = 'Helvetica'
legend.fontSize = 7
legend.boxAnchor = 'w'
legend.columnMaximum = 10
legend.strokeWidth = 1
legend.strokeColor = black
legend.deltax = 75
legend.deltay = 10
legend.autoXPadding = 5
legend.yGap = 0
legend.dxTextSpace = 5
legend.alignment = 'right'
legend.dividerLines = 1|2|4
legend.dividerOffsY = 4.5
legend.subCols.rpad = 30
n = len(pie.data)
self.__setItems(n,pie.slices,
'fillColor',self.pdf_chart_colors)
legend.colorNamePairs = [(pie.slices[i].fillColor,
(pie.labels[i][0:20],'$%0.2f' % pie.data[i])) for i in xrange(n)]
drawing.add(pie)
drawing.add(legend)
x, y = 0, 10
renderPDF.draw(drawing, self.c, x, y, showBoundary=False)
def __setItems(self, n, obj, attr, values):
m = len(values)
i = m // n
for j in xrange(n):
setattr(obj[j],attr,values[j*i % m])
def __get_tags_statistics(self):
monto_categorias = dict()
for tra in self.transactions:
if len(tra.tags) > 0:
for tag in tra.tags:
if tag in monto_categorias.keys():
monto_categorias[tag] += tra.amount
else:
monto_categorias[tag] = tra.amount
labels = [lab.encode('utf-8') for lab in monto_categorias.keys()]
data = monto_categorias.values()
p = PageBreak()
p.drawOn(self.c, 0, 1000)
self.c.showPage()
self.l = 600
self.c.setFont('Courier', 14)
self.c.drawString(30, 800, 'Categorias')
drawing = Drawing(200, 200)
pie = Pie()
pie.x = 30
pie.y = self.l - 130
pie.height = 300
pie.width = 300
pie.data = data
pie.labels = labels
pie.simpleLabels = 1
pie.slices.strokeWidth = 1
pie.slices.strokeColor = black
pie.slices.label_visible = 0
legend = Legend()
legend.x = 400
legend.y = self.l
legend.dx = 8
legend.dy = 8
legend.fontName = 'Helvetica'
legend.fontSize = 7
legend.boxAnchor = 'w'
legend.columnMaximum = 10
legend.strokeWidth = 1
legend.strokeColor = black
legend.deltax = 75
legend.deltay = 10
legend.autoXPadding = 5
legend.yGap = 0
legend.dxTextSpace = 5
legend.alignment = 'right'
legend.dividerLines = 1|2|4
legend.dividerOffsY = 4.5
legend.subCols.rpad = 30
n = len(pie.data)
self.__setItems(n,pie.slices,
'fillColor',self.pdf_chart_colors)
legend.colorNamePairs = [(pie.slices[i].fillColor,
(pie.labels[i][0:20],'$%0.2f' % pie.data[i])) for i in xrange(n)]
drawing.add(pie)
drawing.add(legend)
x, y = 0, 10
renderPDF.draw(drawing, self.c, x, y, showBoundary=False)
def __budgets_spent(self):
self.l = 800
p = PageBreak()
p.drawOn(self.c, 0, 1000)
self.c.showPage()
self.c.setFont('Courier', 14)
self.c.drawString(30, self.l, 'Budgets')
header = ['Nombre', 'Gastado', 'Balance', 'Limite']
data = [header]
for bud in self.budgets:
data.append([bud.name, bud.spent,
bud.balance, bud.limit])
self.l -= len(data) * 19
t = Table(data)
t.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t.wrapOn(self.c, 30, self.l)
t.drawOn(self.c, 30, self.l)
def __reminders(self):
self.l = 800
p = PageBreak()
p.drawOn(self.c, 0, 1000)
self.c.showPage()
self.c.setFont('Courier', 14)
self.c.drawString(30, self.l, 'Recordatorio de pagos')
header = ['Fecha', 'Descripcion', 'Monto']
data = [header]
for rem in self.reminders:
data.append([rem.start_date, rem.description,
rem.amount])
self.l -= len(data) * 19
t = Table(data)
t.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t.wrapOn(self.c, 30, self.l)
t.drawOn(self.c, 30, self.l)
def generate_report(self):
self.__prepare_document()
self.__generate_header()
self.__accounts_amount()
self.__add_graph()
self.__transactions()
self.__get_tags_statistics()
self.__per_account_statistic()
self.__budgets_spent()
self.__reminders()
self.c.showPage()
self.c.save()
| gpl-2.0 | -7,674,272,941,585,014,000 | 32.537549 | 81 | 0.478197 | false |
stephanie-wang/ray | python/ray/tune/suggest/variant_generator.py | 1 | 8452 | import copy
import logging
import numpy
import random
from ray.tune import TuneError
from ray.tune.sample import sample_from
logger = logging.getLogger(__name__)
def generate_variants(unresolved_spec):
"""Generates variants from a spec (dict) with unresolved values.
There are two types of unresolved values:
Grid search: These define a grid search over values. For example, the
following grid search values in a spec will produce six distinct
variants in combination:
"activation": grid_search(["relu", "tanh"])
"learning_rate": grid_search([1e-3, 1e-4, 1e-5])
Lambda functions: These are evaluated to produce a concrete value, and
can express dependencies or conditional distributions between values.
They can also be used to express random search (e.g., by calling
into the `random` or `np` module).
"cpu": lambda spec: spec.config.num_workers
"batch_size": lambda spec: random.uniform(1, 1000)
Finally, to support defining specs in plain JSON / YAML, grid search
and lambda functions can also be defined alternatively as follows:
"activation": {"grid_search": ["relu", "tanh"]}
"cpu": {"eval": "spec.config.num_workers"}
Use `format_vars` to format the returned dict of hyperparameters.
Yields:
(Dict of resolved variables, Spec object)
"""
for resolved_vars, spec in _generate_variants(unresolved_spec):
assert not _unresolved_values(spec)
yield resolved_vars, spec
def grid_search(values):
"""Convenience method for specifying grid search over a value.
Arguments:
values: An iterable whose parameters will be gridded.
"""
return {"grid_search": values}
_STANDARD_IMPORTS = {
"random": random,
"np": numpy,
}
_MAX_RESOLUTION_PASSES = 20
def resolve_nested_dict(nested_dict):
"""Flattens a nested dict by joining keys into tuple of paths.
Can then be passed into `format_vars`.
"""
res = {}
for k, v in nested_dict.items():
if isinstance(v, dict):
for k_, v_ in resolve_nested_dict(v).items():
res[(k, ) + k_] = v_
else:
res[(k, )] = v
return res
def format_vars(resolved_vars):
"""Formats the resolved variable dict into a single string."""
out = []
for path, value in sorted(resolved_vars.items()):
if path[0] in ["run", "env", "resources_per_trial"]:
continue # TrialRunner already has these in the experiment_tag
pieces = []
last_string = True
for k in path[::-1]:
if isinstance(k, int):
pieces.append(str(k))
elif last_string:
last_string = False
pieces.append(k)
pieces.reverse()
out.append(_clean_value("_".join(pieces)) + "=" + _clean_value(value))
return ",".join(out)
def flatten_resolved_vars(resolved_vars):
"""Formats the resolved variable dict into a mapping of (str -> value)."""
flattened_resolved_vars_dict = {}
for pieces, value in resolved_vars.items():
if pieces[0] == "config":
pieces = pieces[1:]
pieces = [str(piece) for piece in pieces]
flattened_resolved_vars_dict["/".join(pieces)] = value
return flattened_resolved_vars_dict
def _clean_value(value):
if isinstance(value, float):
return "{:.5}".format(value)
else:
return str(value).replace("/", "_")
def _generate_variants(spec):
spec = copy.deepcopy(spec)
unresolved = _unresolved_values(spec)
if not unresolved:
yield {}, spec
return
grid_vars = []
lambda_vars = []
for path, value in unresolved.items():
if callable(value):
lambda_vars.append((path, value))
else:
grid_vars.append((path, value))
grid_vars.sort()
grid_search = _grid_search_generator(spec, grid_vars)
for resolved_spec in grid_search:
resolved_vars = _resolve_lambda_vars(resolved_spec, lambda_vars)
for resolved, spec in _generate_variants(resolved_spec):
for path, value in grid_vars:
resolved_vars[path] = _get_value(spec, path)
for k, v in resolved.items():
if (k in resolved_vars and v != resolved_vars[k]
and _is_resolved(resolved_vars[k])):
raise ValueError(
"The variable `{}` could not be unambiguously "
"resolved to a single value. Consider simplifying "
"your configuration.".format(k))
resolved_vars[k] = v
yield resolved_vars, spec
def _assign_value(spec, path, value):
for k in path[:-1]:
spec = spec[k]
spec[path[-1]] = value
def _get_value(spec, path):
for k in path:
spec = spec[k]
return spec
def _resolve_lambda_vars(spec, lambda_vars):
resolved = {}
error = True
num_passes = 0
while error and num_passes < _MAX_RESOLUTION_PASSES:
num_passes += 1
error = False
for path, fn in lambda_vars:
try:
value = fn(_UnresolvedAccessGuard(spec))
except RecursiveDependencyError as e:
error = e
except Exception:
raise ValueError(
"Failed to evaluate expression: {}: {}".format(path, fn))
else:
_assign_value(spec, path, value)
resolved[path] = value
if error:
raise error
return resolved
def _grid_search_generator(unresolved_spec, grid_vars):
value_indices = [0] * len(grid_vars)
def increment(i):
value_indices[i] += 1
if value_indices[i] >= len(grid_vars[i][1]):
value_indices[i] = 0
if i + 1 < len(value_indices):
return increment(i + 1)
else:
return True
return False
if not grid_vars:
yield unresolved_spec
return
while value_indices[-1] < len(grid_vars[-1][1]):
spec = copy.deepcopy(unresolved_spec)
for i, (path, values) in enumerate(grid_vars):
_assign_value(spec, path, values[value_indices[i]])
yield spec
if grid_vars:
done = increment(0)
if done:
break
def _is_resolved(v):
resolved, _ = _try_resolve(v)
return resolved
def _try_resolve(v):
if isinstance(v, sample_from):
# Function to sample from
return False, v.func
elif isinstance(v, dict) and len(v) == 1 and "eval" in v:
# Lambda function in eval syntax
return False, lambda spec: eval(
v["eval"], _STANDARD_IMPORTS, {"spec": spec})
elif isinstance(v, dict) and len(v) == 1 and "grid_search" in v:
# Grid search values
grid_values = v["grid_search"]
if not isinstance(grid_values, list):
raise TuneError(
"Grid search expected list of values, got: {}".format(
grid_values))
return False, grid_values
return True, v
def _unresolved_values(spec):
found = {}
for k, v in spec.items():
resolved, v = _try_resolve(v)
if not resolved:
found[(k, )] = v
elif isinstance(v, dict):
# Recurse into a dict
for (path, value) in _unresolved_values(v).items():
found[(k, ) + path] = value
elif isinstance(v, list):
# Recurse into a list
for i, elem in enumerate(v):
for (path, value) in _unresolved_values({i: elem}).items():
found[(k, ) + path] = value
return found
class _UnresolvedAccessGuard(dict):
def __init__(self, *args, **kwds):
super(_UnresolvedAccessGuard, self).__init__(*args, **kwds)
self.__dict__ = self
def __getattribute__(self, item):
value = dict.__getattribute__(self, item)
if not _is_resolved(value):
raise RecursiveDependencyError(
"`{}` recursively depends on {}".format(item, value))
elif isinstance(value, dict):
return _UnresolvedAccessGuard(value)
else:
return value
class RecursiveDependencyError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
| apache-2.0 | 5,152,314,125,364,362,000 | 29.846715 | 78 | 0.57442 | false |
mattpap/sympy-polys | sympy/solvers/ode.py | 1 | 115902 | """
This module contains dsolve() and different helper functions that it
uses.
dsolve() solves ordinary differential equations. See the docstring on
the various functions for their uses. Note that partial differential
equations support is in pde.py. Note that ode_hint() functions have
docstrings describing their various methods, but they are intended for
internal use. Use dsolve(ode, func, hint=hint) to solve an ode using a
specific hint. See also the docstring on dsolve().
**Functions in this module**
These are the user functions in this module:
- dsolve() - Solves ODEs.
- classify_ode() - Classifies ODEs into possible hints for dsolve().
- checkodesol() - Checks if an equation is the solution to an ODE.
- ode_order() - Returns the order (degree) of an ODE.
- homogeneous_order() - Returns the homogeneous order of an
expression.
These are the non-solver helper functions that are for internal use.
The user should use the various options to dsolve() to obtain the
functionality provided by these functions:
- odesimp() - Does all forms of ODE simplification.
- ode_sol_simplicity() - A key function for comparing solutions by
simplicity.
- constantsimp() - Simplifies arbitrary constants.
- constant_renumber() - Renumber arbitrary constants
- _handle_Integral() - Evaluate unevaluated Integrals.
See also the docstrings of these functions.
**Solving methods currently implemented**
The following methods are implemented for solving ordinary differential
equations. See the docstrings of the various ode_hint() functions for
more information on each (run help(ode)):
- 1st order separable differential equations
- 1st order differential equations whose coefficients or dx and dy
are functions homogeneous of the same order.
- 1st order exact differential equations.
- 1st order linear differential equations
- 1st order Bernoulli differential equations.
- 2nd order Liouville differential equations.
- nth order linear homogeneous differential equation with constant
coefficients.
- nth order linear inhomogeneous differential equation with constant
coefficients using the method of undetermined coefficients.
- nth order linear inhomogeneous differential equation with constant
coefficients using the method of variation of parameters.
**Philosophy behind this module**
This module is designed to make it easy to add new ODE solving methods
without having to mess with the solving code for other methods. The
idea is that there is a classify_ode() function, which takes in an ODE
and tells you what hints, if any, will solve the ODE. It does this
without attempting to solve the ODE, so it is fast. Each solving method
is a hint, and it has its own function, named ode_hint. That function
takes in the ODE and any match expression gathered by classify_ode and
returns a solved result. If this result has any integrals in it, the
ode_hint function will return an unevaluated Integral class. dsolve(),
which is the user wrapper function around all of this, will then call
odesimp() on the result, which, among other things, will attempt to
solve the equation for the dependent variable (the function we are
solving for), simplify the arbitrary constants in the expression, and
evaluate any integrals, if the hint allows it.
**How to add new solution methods**
If you have an ODE that you want dsolve() to be able to solve, try to
avoid adding special case code here. Instead, try finding a general
method that will solve your ODE, as well as others. This way, the ode
module will become more robust, and unhindered by special case hacks.
WolphramAlpha and Maple's DETools[odeadvisor] function are two resources
you can use to classify a specific ODE. It is also better for a method
to work with an nth order ODE instead of only with specific orders, if
possible.
To add a new method, there are a few things that you need to do. First,
you need a hint name for your method. Try to name your hint so that it
is unambiguous with all other methods, including ones that may not be
implemented yet. If your method uses integrals, also include a
"hint_Integral" hint. If there is more than one way to solve ODEs with
your method, include a hint for each one, as well as a "hint_best" hint.
Your ode_hint_best() function should choose the best using min with
ode_sol_simplicity as the key argument. See
ode_1st_homogeneous_coeff_best(), for example. The function that uses
your method will be called ode_hint(), so the hint must only use
characters that are allowed in a Python function name (alphanumeric
characters and the underscore '_' character). Include a function for
every hint, except for "_Integral" hints (dsolve() takes care of those
automatically). Hint names should be all lowercase, unless a word is
commonly capitalized (such as Integral or Bernoulli). If you have a hint
that you do not want to run with "all_Integral" that doesn't have an
"_Integral" counterpart (such as a best hint that would defeat the
purpose of "all_Integral"), you will need to remove it manually in the
dsolve() code. See also the classify_ode() docstring for guidelines on
writing a hint name.
Determine *in general* how the solutions returned by your method
compare with other methods that can potentially solve the same ODEs.
Then, put your hints in the allhints tuple in the order that they should
be called. The ordering of this tuple determines which hints are
default. Note that exceptions are ok, because it is easy for the user to
choose individual hints with dsolve(). In general, "_Integral" variants
should go at the end of the list, and "_best" variants should go before
the various hints they apply to. For example, the
"undetermined_coefficients" hint comes before the
"variation_of_parameters" hint because, even though variation of
parameters is more general than undetermined coefficients, undetermined
coefficients generally returns cleaner results for the ODEs that it can
solve than variation of parameters does, and it does not require
integration, so it is much faster.
Next, you need to have a match expression or a function that matches the
type of the ODE, which you should put in classify_ode() (if the match
function is more than just a few lines, like
_undetermined_coefficients_match(), it should go outside of
classify_ode()). It should match the ODE without solving for it as much
as possible, so that classify_ode() remains fast and is not hindered by
bugs in solving code. Be sure to consider corner cases. For example, if
your solution method involves dividing by something, make sure you
exclude the case where that division will be 0.
In most cases, the matching of the ODE will also give you the various
parts that you need to solve it. You should put that in a dictionary
(.match() will do this for you), and add that as matching_hints['hint']
= matchdict in the relevant part of classify_ode. classify_ode will
then send this to dsolve(), which will send it to your function as the
match argument. Your function should be named ode_hint(eq, func, order,
match). If you need to send more information, put it in the match
dictionary. For example, if you had to substitute in a dummy variable
in classify_ode to match the ODE, you will need to pass it to your
function using the match dict to access it. You can access the
independent variable using func.args[0], and the dependent variable (the
function you are trying to solve for) as func.func. If, while trying to
solve the ODE, you find that you cannot, raise NotImplementedError.
dsolve() will catch this error with the "all" meta-hint, rather than
causing the whole routine to fail.
Add a docstring to your function that describes the method employed.
Like with anything else in SymPy, you will need to add a doctest to the
docstring, in addition to real tests in test_ode.py. Try to maintain
consistency with the other hint functions' docstrings. Add your method
to the list at the top of this docstring. Also, add your method to
ode.txt in the docs/src directory, so that the Sphinx docs will pull its
docstring into the main SymPy documentation. Be sure to make the Sphinx
documentation by running "make html" from within the doc directory to
verify that the docstring formats correctly.
If your solution method involves integrating, use C.Integral() instead
of integrate(). This allows the user to bypass hard/slow integration by
using the "_Integral" variant of your hint. In most cases, calling
.doit() will integrate your solution. If this is not the case, you will
need to write special code in _handle_Integral(). Arbitrary constants
should be symbols named C1, C2, and so on. All solution methods should
return an equality instance. If you need an arbitrary number of
arbitrary constants, you can use constants =
numbered_symbols(prefix='C', function=Symbol, start=1). If it is
possible to solve for the dependent function in a general way, do so.
Otherwise, do as best as you can, but do not call solve in your
ode_hint() function. odesimp() will attempt to solve the solution for
you, so you do not need to do that. Lastly, if your ODE has a common
simplification that can be applied to your solutions, you can add a
special case in odesimp() for it. For example, solutions returned from
the "1st_homogeneous_coeff" hints often have many log() terms, so
odesimp() calls logcombine() on them (it also helps to write the
arbitrary constant as log(C1) instead of C1 in this case). Also
consider common ways that you can rearrange your solution to have
constantsimp() take better advantage of it. It is better to put
simplification in odesimp() than in your method, because it can then be
turned off with the simplify flag in dsolve(). If you have any
extraneous simplification in your function, be sure to only run it using
"if match.get('simplify', True):", especially if it can be slow or if it
can reduce the domain of the solution.
Finally, as with every contribution to SymPy, your method will need to
be tested. Add a test for each method in test_ode.py. Follow the
conventions there, i.e., test the solver using dsolve(eq, f(x),
hint=your_hint), and also test the solution using checkodesol (you can
put these in a separate tests and skip/XFAIL if it runs too slow/doesn't
work). Be sure to call your hint specifically in dsolve, that way the
test won't be broken simply by the introduction of another matching
hint. If your method works for higher order (>1) ODEs, you will need to
run sol = constant_renumber(sol, 'C', 1, order), for each solution, where
order is the order of the ODE. This is because constant_renumber renumbers
the arbitrary constants by printing order, which is platform dependent.
Try to test every corner case of your solver, including a range of
orders if it is a nth order solver, but if your solver is slow, auch as
if it involves hard integration, try to keep the test run time down.
Feel free to refactor existing hints to avoid duplicating code or
creating inconsistencies. If you can show that your method exactly
duplicates an existing method, including in the simplicity and speed of
obtaining the solutions, then you can remove the old, less general
method. The existing code is tested extensively in test_ode.py, so if
anything is broken, one of those tests will surely fail.
"""
from sympy.core import Add, Basic, C, S, Mul, Pow, oo
from sympy.core.function import Derivative, diff, expand_mul
from sympy.core.multidimensional import vectorize
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild
from sympy.core.sympify import sympify
from sympy.functions import cos, exp, im, log, re, sin, sign
from sympy.matrices import wronskian
from sympy.polys import RootsOf, discriminant, RootOf
from sympy.series import Order
from sympy.simplify import collect, logcombine, powsimp, separatevars, \
simplify, trigsimp
from sympy.solvers import solve
from sympy.utilities import numbered_symbols, all, any, make_list
from sympy.utilities.iterables import minkey
# This is a list of hints in the order that they should be applied. That means
# that, in general, hints earlier in the list should produce simpler results
# than those later for ODEs that fit both. This is just based on my own
# empirical observations, so if you find that *in general*, a hint later in
# the list is better than one before it, fell free to modify the list. Note
# however that you can easily override the hint used in dsolve() for a specific ODE
# (see the docstring). In general, "_Integral" hints should be grouped
# at the end of the list, unless there is a method that returns an unevaluatable
# integral most of the time (which should surely go near the end of the list
# anyway).
# "default", "all", "best", and "all_Integral" meta-hints should not be
# included in this list, but "_best" and "_Integral" hints should be included.
allhints = ("separable", "1st_exact", "1st_linear", "Bernoulli",
"1st_homogeneous_coeff_best", "1st_homogeneous_coeff_subs_indep_div_dep",
"1st_homogeneous_coeff_subs_dep_div_indep", "nth_linear_constant_coeff_homogeneous",
"nth_linear_constant_coeff_undetermined_coefficients",
"nth_linear_constant_coeff_variation_of_parameters",
"Liouville", "separable_Integral", "1st_exact_Integral", "1st_linear_Integral",
"Bernoulli_Integral", "1st_homogeneous_coeff_subs_indep_div_dep_Integral",
"1st_homogeneous_coeff_subs_dep_div_indep_Integral",
"nth_linear_constant_coeff_variation_of_parameters_Integral",
"Liouville_Integral")
def dsolve(eq, func, hint="default", simplify=True, **kwargs):
"""
Solves any (supported) kind of ordinary differential equation.
**Usage**
dsolve(eq, f(x), hint) -> Solve ordinary differential equation
eq for function f(x), using method hint.
**Details**
``eq`` can be any supported ordinary differential equation (see
the ode docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x)`` is a function of one variable whose derivatives in that
variable make up the ordinary differential equation eq.
``hint`` is the solving method that you want dsolve to use. Use
classify_ode(eq, f(x)) to get all of the possible hints for
an ODE. The default hint, 'default', will use whatever hint
is returned first by classify_ode(). See Hints below for
more options that you can use for hint.
``simplify`` enables simplification by odesimp(). See its
docstring for more information. Turn this off, for example,
to disable solving of solutions for func or simplification
of arbitrary constants. It will still integrate with this
hint. Note that the solution may contain more arbitrary
constants than the order of the ODE with this option
enabled.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to dsolve():
"default":
This uses whatever hint is returned first by
classify_ode(). This is the default argument to
dsolve().
"all":
To make dsolve apply all relevant classification hints,
use dsolve(ODE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
dsolve to raise NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the ODE. See also ode_order().
- best: The simplest hint; what would be returned by
"best" below.
- best_hint: The hint that would produce the solution
given by 'best'. If more than one hint produces the
best solution, the first one in the tuple returned by
classify_ode() is chosen.
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_ode().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
dsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
"best":
To have dsolve() try all methods and return the simplest
one. This takes into account whether the solution is
solvable in the function, whether it contains any
Integral classes (i.e. unevaluatable integrals), and
which one is the shortest in size.
See also the classify_ode() docstring for more info on hints,
and the ode docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x # x is the independent variable
>>> f = Function("f")(x) # f is a function of x
>>> # f_ will be the derivative of f with respect to x
>>> f_ = Derivative(f, x)
- See test_ode.py for many tests, which serves also as a set of
examples for how to use dsolve().
- dsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). If possible, it
solves the solution explicitly for the function being solved
for. Otherwise, it returns an implicit solution.
- Arbitrary constants are symbols named C1, C2, and so on.
- Because all solutions should be mathematically equivalent,
some hints may return the exact same result for an ODE. Often,
though, two different hints will return the same solution
formatted differently. The two should be equivalent. Also
note that sometimes the values of the arbitrary constants in
two different solutions may not be the same, because one
constant may have "absorbed" other constants into it.
- Do help(ode.ode_hintname) to get help more information on a
specific hint, where hintname is the name of a hint without
"_Integral".
**Examples**
>>> from sympy import Function, dsolve, Eq, Derivative, sin, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(Derivative(f(x),x,x)+9*f(x), f(x))
f(x) == C1*sin(3*x) + C2*cos(3*x)
>>> dsolve(sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x), f(x),
... hint='separable')
-log(1 - sin(f(x))**2)/2 == C1 + log(1 - sin(x)**2)/2
>>> dsolve(sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x), f(x),
... hint='1st_exact')
f(x) == acos(C1/cos(x))
>>> dsolve(sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x), f(x),
... hint='best')
f(x) == acos(C1/cos(x))
>>> # Note that even though separable is the default, 1st_exact produces
>>> # a simpler result in this case.
"""
# TODO: Implement initial conditions
# See issue 1621. We first need a way to represent things like f'(0).
if isinstance(eq, Equality):
if eq.rhs != 0:
return dsolve(eq.lhs-eq.rhs, func, hint=hint, simplify=simplify, **kwargs)
eq = eq.lhs
# Magic that should only be used internally. Prevents classify_ode from
# being called more than it needs to be by passing its results through
# recursive calls.
if kwargs.get('classify', True):
hints = classify_ode(eq, func, dict=True)
else:
# Here is what all this means:
#
# hint: The hint method given to dsolve() by the user.
# hints: The dictionary of hints that match the ODE, along with
# other information (including the internal pass-through magic).
# default: The default hint to return, the first hint from allhints
# that matches the hint. This is obtained from classify_ode().
# match: The hints dictionary contains a match dictionary for each hint
# (the parts of the ODE for solving). When going through the
# hints in "all", this holds the match string for the current
# hint.
# order: The order of the ODE, as determined by ode_order().
hints = kwargs.get('hint',
{'default': hint,
hint: kwargs['match'],
'order': kwargs['order']})
if hints['order'] == 0:
raise ValueError(str(eq) + " is not a differential equation in " + str(func))
if not hints['default']:
# classify_ode will set hints['default'] to None if no hints match.
raise NotImplementedError("dsolve: Cannot solve " + str(eq))
if hint == 'default':
return dsolve(eq, func, hint=hints['default'], simplify=simplify, classify=False,
order=hints['order'], match=hints[hints['default']])
elif hint in ('all', 'all_Integral', 'best'):
retdict = {}
failedhints = {}
gethints = set(hints) - set(['order', 'default', 'ordered_hints'])
if hint == 'all_Integral':
for i in hints:
if i[-9:] == '_Integral':
gethints.remove(i[:-9])
# special case
if "1st_homogeneous_coeff_best" in gethints:
gethints.remove("1st_homogeneous_coeff_best")
for i in gethints:
try:
sol = dsolve(eq, func, hint=i, simplify=simplify, classify=False,
order=hints['order'], match=hints[i])
except NotImplementedError, detail: # except NotImplementedError as detail:
failedhints[i] = detail
else:
retdict[i] = sol
retdict['best'] = minkey(retdict.values(), key=lambda x:
ode_sol_simplicity(x, func, trysolving=not simplify))
if hint == 'best':
return retdict['best']
for i in hints['ordered_hints']:
if retdict['best'] == retdict.get(i, None):
retdict['best_hint'] = i
break
retdict['default'] = hints['default']
retdict['order'] = sympify(hints['order'])
retdict.update(failedhints)
return retdict
elif hint not in allhints: # and hint not in ('default', 'ordered_hints'):
raise ValueError("Hint not recognized: " + hint)
elif hint not in hints:
raise ValueError("ODE " + str(eq) + " does not match hint " + hint)
elif hint[-9:] == '_Integral':
solvefunc = globals()['ode_' + hint[:-9]]
else:
solvefunc = globals()['ode_' + hint] # convert the string into a function
# odesimp() will attempt to integrate, if necessary, apply constantsimp(),
# attempt to solve for func, and apply any other hint specific simplifications
if simplify:
return odesimp(solvefunc(eq, func, order=hints['order'],
match=hints[hint]), func, hints['order'], hint)
else:
# We still want to integrate (you can disable it separately with the hint)
r = hints[hint]
r['simplify'] = False # Some hints can take advantage of this option
return _handle_Integral(solvefunc(eq, func, order=hints['order'],
match=hints[hint]), func, hints['order'], hint)
def classify_ode(eq, func, dict=False):
"""
Returns a tuple of possible dsolve() classifications for an ODE.
The tuple is ordered so that first item is the classification that
dsolve() uses to solve the ODE by default. In general,
classifications at the near the beginning of the list will produce
better solutions faster than those near the end, thought there are
always exceptions. To make dsolve use a different classification,
use dsolve(ODE, func, hint=<classification>). See also the dsolve()
docstring for different meta-hints you can use.
If dict is true, classify_ode() will return a dictionary of
hint:match expression terms. This is intended for internal use by
dsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(ode.ode_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.ode.allhints or the sympy.ode docstring for a list of all
supported hints that can be returned from classify_ode.
**Notes on Hint Names**
*"_Integral"*
If a classification has "_Integral" at the end, it will return
the expression with an unevaluated Integral class in it. Note
that a hint may do this anyway if integrate() cannot do the
integral, though just using an "_Integral" will do so much
faster. Indeed, an "_Integral" hint will always be faster than
its corresponding hint without "_Integral" because integrate()
is an expensive routine. If dsolve() hangs, it is probably
because integrate() is hanging on a tough or impossible
integral. Try using an "_Integral" hint or "all_Integral" to
get it return something.
Note that some hints do not have "_Integral" counterparts. This
is because integrate() is not used in solving the ODE for those
method. For example, nth order linear homogeneous ODEs with
constant coefficients do not require integration to solve, so
there is no "nth_linear_homogeneous_constant_coeff_Integrate"
hint. You can easily evaluate any unevaluated Integrals in an
expression by doing expr.doit().
*Ordinals*
Some hints contain an ordinal such as "1st_linear". This is to
help differentiate them from other hints, as well as from other
methods that may not be implemented yet. If a hint has "nth" in
it, such as the "nth_linear" hints, this means that the method
used to applies to ODEs of any order.
*"indep" and "dep"*
Some hints contain the words "indep" or "dep". These reference
the independent variable and the dependent function,
respectively. For example, if an ODE is in terms of f(x), then
"indep" will refer to x and "dep" will refer to f.
*"subs"*
If a hints has the word "subs" in it, it means the the ODE is
solved by substituting the expression given after the word
"subs" for a single dummy variable. This is usually in terms of
"indep" and "dep" as above. The substituted expression will be
written only in characters allowed for names of Python objects,
meaning operators will be spelled out. For example, indep/dep
will be written as indep_div_dep.
*"coeff"*
The word "coeff" in a hint refers to the coefficients of
something in the ODE, usually of the derivative terms. See the
docstring for the individual methods for more info (help(ode)).
This is contrast to "coefficients", as in
"undetermined_coefficients", which refers to the common name of
a method.
*"_best"*
Methods that have more than one fundamental way to solve will
have a hint for each sub-method and a "_best"
meta-classification. This will evaluate all hints and return the
best, using the same considerations as the normal "best"
meta-hint.
**Examples**
>>> from sympy import Function, classify_ode, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> classify_ode(Eq(f(x).diff(x), 0), f(x))
('separable', '1st_linear', '1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_dep_div_indep',
'nth_linear_constant_coeff_homogeneous', 'separable_Integral',
'1st_linear_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral')
>>> classify_ode(f(x).diff(x, 2) + 3*f(x).diff(x) + 2*f(x) - 4, f(x))
('nth_linear_constant_coeff_undetermined_coefficients',
'nth_linear_constant_coeff_variation_of_parameters',
'nth_linear_constant_coeff_variation_of_parameters_Integral')
"""
from sympy import expand
if len(func.args) != 1:
raise ValueError("dsolve() and classify_ode() only work with functions " + \
"of one variable")
x = func.args[0]
f = func.func
y = Symbol('y', dummy=True)
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_ode(eq.lhs-eq.rhs, func)
eq = eq.lhs
order = ode_order(eq, f(x))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {"order": order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
df = f(x).diff(x)
a = Wild('a', exclude=[f(x)])
b = Wild('b', exclude=[f(x)])
c = Wild('c', exclude=[f(x)])
d = Wild('d', exclude=[df, f(x).diff(x, 2)])
e = Wild('e', exclude=[df])
k = Wild('k', exclude=[df])
n = Wild('n', exclude=[f(x)])
c1 = Wild('c1', exclude=[x])
eq = expand(eq)
# Precondition to try remove f(x) from highest order derivative
reduced_eq = None
if eq.is_Add:
deriv_coef = eq.coeff(f(x).diff(x, order))
if deriv_coef != 1:
r = deriv_coef.match(a*f(x)**c1)
if r and r[c1]:
den = f(x)**r[c1]
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
# Linear case: a(x)*y'+b(x)*y+c(x) == 0
if eq.is_Add:
ind, dep = reduced_eq.as_independent(f)
else:
u = Symbol('u', dummy=True)
ind, dep = (reduced_eq + u).as_independent(f)
ind, dep = [tmp.subs(u, 0) for tmp in [ind, dep]]
r = {a: dep.coeff(df, expand=False) or S.Zero, # if we get None for coeff, take 0
b: dep.coeff(f(x), expand=False) or S.Zero, # ditto
c: ind}
# double check f[a] since the preconditioning may have failed
if not r[a].has(f) and (r[a]*df + r[b]*f(x) + r[c]).expand() - reduced_eq == 0:
r['a'] = a
r['b'] = b
r['c'] = c
matching_hints["1st_linear"] = r
matching_hints["1st_linear_Integral"] = r
# Bernoulli case: a(x)*y'+b(x)*y+c(x)*y**n == 0
r = collect(reduced_eq, f(x), exact = True).match(a*df + b*f(x) + c*f(x)**n)
if r and r[c] != 0 and r[n] != 1: # See issue 1577
r['a'] = a
r['b'] = b
r['c'] = c
r['n'] = n
matching_hints["Bernoulli"] = r
matching_hints["Bernoulli_Integral"] = r
# Exact Differential Equation: P(x,y)+Q(x,y)*y'=0 where dP/dy == dQ/dx
# WITH NON-REDUCED FORM OF EQUATION
r = collect(eq, df, exact = True).match(d + e * df)
if r:
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = r[d].subs(f(x),y)
r[e] = r[e].subs(f(x),y)
if r[d] != 0 and simplify(r[d].diff(y)) == simplify(r[e].diff(x)):
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
# This match is used for several cases below; we now collect on
# f(x) so the matching works.
r = collect(reduced_eq, df, exact = True).match(d+e*df)
if r:
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = r[d].subs(f(x),y)
r[e] = r[e].subs(f(x),y)
# Separable Case: y' == P(y)*Q(x)
r[d] = separatevars(r[d])
r[e] = separatevars(r[e])
# m1[coeff]*m1[x]*m1[y] + m2[coeff]*m2[x]*m2[y]*y'
m1 = separatevars(r[d], dict=True, symbols=(x, y))
m2 = separatevars(r[e], dict=True, symbols=(x, y))
if m1 and m2:
r1 = {'m1':m1, 'm2':m2, 'y':y}
matching_hints["separable"] = r1
matching_hints["separable_Integral"] = r1
# First order equation with homogeneous coefficients:
# dy/dx == F(y/x) or dy/dx == F(x/y)
ordera = homogeneous_order(r[d], x, y)
orderb = homogeneous_order(r[e], x, y)
if ordera == orderb and ordera != None:
# u1=y/x and u2=x/y
u1 = Symbol('u1', dummy=True)
u2 = Symbol('u2', dummy=True)
if simplify((r[d]+u1*r[e]).subs({x:1, y:u1})) != 0:
matching_hints["1st_homogeneous_coeff_subs_dep_div_indep"] = r
matching_hints["1st_homogeneous_coeff_subs_dep_div_indep_Integral"] = r
if simplify((r[e]+u2*r[d]).subs({x:u2, y:1})) != 0:
matching_hints["1st_homogeneous_coeff_subs_indep_div_dep"] = r
matching_hints["1st_homogeneous_coeff_subs_indep_div_dep_Integral"] = r
if matching_hints.has_key("1st_homogeneous_coeff_subs_dep_div_indep") \
and matching_hints.has_key("1st_homogeneous_coeff_subs_indep_div_dep"):
matching_hints["1st_homogeneous_coeff_best"] = r
if order == 2:
# Liouville ODE f(x).diff(x, 2) + g(f(x))*(f(x).diff(x))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98
s = d*f(x).diff(x, 2) + e*df**2 + k*df
r = reduced_eq.match(s)
if r and r[d] != 0:
y = Symbol('y', dummy=True)
g = simplify(r[e]/r[d]).subs(f(x), y)
h = simplify(r[k]/r[d])
if h.has(f(x)) or g.has(x):
pass
else:
r = {'g':g, 'h':h, 'y':y}
matching_hints["Liouville"] = r
matching_hints["Liouville_Integral"] = r
if order > 0:
# nth order linear ODE
# a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y = F(x) = b
r = _nth_linear_match(reduced_eq, func, order)
# Constant coefficient case (a_i is constant for all i)
if r and not any(r[i].has(x) for i in r if i >= 0):
# Inhomogeneous case: F(x) is not identically 0
if r[-1]:
undetcoeff = _undetermined_coefficients_match(r[-1], x)
matching_hints["nth_linear_constant_coeff_variation_of_parameters"] = r
matching_hints["nth_linear_constant_coeff_variation_of_parameters" + \
"_Integral"] = r
if undetcoeff['test']:
r['trialset'] = undetcoeff['trialset']
matching_hints["nth_linear_constant_coeff_undetermined_" + \
"coefficients"] = r
# Homogeneous case: F(x) is identically 0
else:
matching_hints["nth_linear_constant_coeff_homogeneous"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so we need to make note of which
# hint would come first for dsolve(). In Python 3, this should be replaced
# with an ordered dictionary.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
@vectorize(0)
def odesimp(eq, func, order, hint):
r"""
Simplifies ODEs, including trying to solve for func and running
constantsimp().
It may use knowledge of the type of solution that that hint returns
to apply additional simplifications.
It also attempts to integrate any Integrals in the expression, if
the hint is not an "_Integral" hint.
This function should have no effect on expressions returned by
dsolve(), as dsolve already calls odesimp(), but the individual hint
functions do not call odesimp (because the dsolve() wrapper does).
Therefore, this function is designed for mainly internal use.
**Example**
>>> from sympy import sin, symbols, dsolve, pprint, Function
>>> from sympy.solvers.ode import odesimp
>>> x , u2, C1= symbols('x u2 C1')
>>> f = Function('f')
>>> eq = dsolve(x*f(x).diff(x) - f(x) - x*sin(f(x)/x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral',
... simplify=False)
>>> pprint(eq)
x
----
f(x)
/
|
| /1 \
| 1 + u2*sin|--|
| \u2/ /f(x)\
- | -------------------------- d(u2) + log|----| = 0
| / /1 \\ \ C1 /
| - |1 + u2*sin|--||*u2 + u2
| \ \u2//
|
/
<BLANKLINE>
>> pprint(odesimp(eq, f(x), 1,
... hint='1st_homogeneous_coeff_subs_indep_div_dep'
... )) # (this is slow, so we skip)
x
--------- = C1
/f(x)\
tan|----|
\2*x /
"""
x = func.args[0]
f = func.func
C1 = Symbol('C1')
# First, integrate, if the hint allows it.
eq = _handle_Integral(eq, func, order, hint)
# Second, clean up the arbitrary constants.
# Right now, nth linear hints can put as many as 2*order constants in an
# expression. If that number grows with another hint, the third argument
# here should be raised accordingly, or constantsimp() rewritten to handle
# an arbitrary number of constants.
eq = constantsimp(eq, x, 2*order)
# Lastly, now that we have cleaned up the expression, try solving for func.
# When RootOf is implemented in solve(), we will want to return a RootOf
# everytime instead of an Equality.
"""
if hint[:21] == "1st_homogeneous_coeff":
eq = logcombine(eq, assume_pos_real=True)
if eq.lhs.is_Function and eq.lhs.func is log and eq.rhs == 0:
eq = Eq(eq.lhs.args[0]/C1,C1)
"""
if eq.lhs == func and not eq.rhs.has(func):
# The solution is already solved
pass
elif eq.rhs == func and not eq.lhs.has(func):
# The solution is solved, but in reverse, so switch it
eq = Eq(eq.rhs, eq.lhs)
else:
# The solution is not solved, so try to solve it
try:
eqsol = solve(eq, func)
if eqsol == []:
raise NotImplementedError
except NotImplementedError:
eq = [eq]
else:
eq = [Eq(f(x), t) for t in eqsol]
# Special handling for certain hints that we know will usually take a
# certain form
if hint[:21] == "1st_homogeneous_coeff":
neweq = []
for i in eq:
# Solutions from this hint can almost always be logcombined
newi = logcombine(i, assume_pos_real=True)
if newi.lhs.is_Function and newi.lhs.func is log and newi.rhs == 0:
# log(C1*stuff) == 0 --> stuff == C1
# Note that this is a form of constant simplification.
# And also, the division of C1 relies on constantsimp()
# making it C1*stuff.
newi = Eq(newi.lhs.args[0]/C1,C1)
neweq.append(newi)
eq = neweq
if len(eq) == 1:
eq = eq[0] # We only want a list if there are multiple solutions
if hint[:25] == "nth_linear_constant_coeff":
# Collect terms to make the solution look nice.
# This is also necessary for constantsimp to remove unnecessary terms
# from the particular solution from variation of parameters
global collectterms
sol = eq.rhs
sol = expand_mul(sol)
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x)*sin(abs(imroot)*x))
sol = collect(sol, x**i*exp(reroot*x)*cos(imroot*x))
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x))
del collectterms
eq = Eq(f(x), sol)
# We cleaned up the costants before solving to help the solve engine with
# a simpler expression, but the solved expression could have introduced
# things like -C1, so rerun constantsimp() one last time before returning.
eq = constant_renumber(constantsimp(eq, x, 2*order), 'C', 1, 2*order)
return eq
@vectorize(2)
def checkodesol(ode, func, sol, order='auto', solve_for_func=True):
"""
Substitutes sol for func in ode and checks that the result is 0.
This only works when func is one function, like f(x). sol can be a
single solution or a list of solutions. Either way, each solution
must be an Equality instance (e.g., Eq(f(x), C1*cos(x) +
C2*sin(x))). If it is a list of solutions, it will return a list of
the checkodesol() result for each solution.
It tries the following methods, in order, until it finds zero
equivalence:
1. Substitute the solution for f in the original equation. This
only works if the ode is solved for f. It will attempt to solve
it first unless solve_for_func == False
2. Take n derivatives of the solution, where n is the order of
ode, and check to see if that is equal to the solution. This
only works on exact odes.
3. Take the 1st, 2nd, ..., nth derivatives of the solution, each
time solving for the derivative of f of that order (this will
always be possible because f is a linear operator). Then back
substitute each derivative into ode in reverse order.
This function returns a tuple. The first item in the tuple is True
if the substitution results in 0, and False otherwise. The second
item in the tuple is what the substitution results in. It should
always be 0 if the first item is True. Note that sometimes this
function will False, but with an expression that is identically
equal to 0, instead of returning True. This is because simplify()
cannot reduce the expression to 0. If an expression returned by
this function vanishes identically, then sol really is a solution to
ode.
If this function seems to hang, it is probably because of a hard
simplification.
To use this function to test, test the first item of the tuple.
**Examples**
>>> from sympy import Eq, Function, checkodesol, symbols
>>> x, C1 = symbols('x C1')
>>> f = Function('f')
>>> checkodesol(f(x).diff(x), f(x), Eq(f(x), C1))
(True, 0)
>>> assert checkodesol(f(x).diff(x), f(x), Eq(f(x), C1))[0]
>>> assert not checkodesol(f(x).diff(x), f(x), Eq(f(x), x))[0]
>>> checkodesol(f(x).diff(x, 2), f(x), Eq(f(x), x**2))
(False, 2)
"""
if not func.is_Function or len(func.args) != 1:
raise ValueError("func must be a function of one variable, not " + str(func))
x = func.args[0]
s = True
testnum = 0
if not isinstance(ode, Equality):
ode = Eq(ode, 0)
if not isinstance(sol, Equality):
raise ValueError("sol must be an Equality, got " + str(sol))
if order == 'auto':
order = ode_order(ode, func)
if solve_for_func and not (sol.lhs == func and not sol.rhs.has(func)) and not \
(sol.rhs == func and not sol.lhs.has(func)):
try:
solved = solve(sol, func)
if solved == []:
raise NotImplementedError
except NotImplementedError:
pass
else:
if len(solved) == 1:
result = checkodesol(ode, func, Eq(func, solved[0]), \
order=order, solve_for_func=False)
else:
result = checkodesol(ode, func, [Eq(func, t) for t in solved],
order=order, solve_for_func=False)
return result
while s:
if testnum == 0:
# First pass, try substituting a solved solution directly into the ode
# This has the highest chance of succeeding.
ode_diff = ode.lhs - ode.rhs
if sol.lhs == func:
s = ode_diff.subs(func, sol.rhs)
elif sol.rhs == func:
s = ode_diff.subs(func, sol.lhs)
else:
testnum += 1
continue
ss = simplify(s)
if ss:
# with the new numer_denom in power.py, if we do a simple
# expansion then testnum == 0 verifies all solutions.
s = ss.expand()
else:
s = 0
testnum += 1
elif testnum == 1:
# Second pass. If we cannot substitute f, try seeing if the nth
# derivative is equal, this will only work for odes that are exact,
# by definition.
s = simplify(trigsimp(diff(sol.lhs, x, order) - diff(sol.rhs, x, order)) - \
trigsimp(ode.lhs) + trigsimp(ode.rhs))
# s2 = simplify(diff(sol.lhs, x, order) - diff(sol.rhs, x, order) - \
# ode.lhs + ode.rhs)
testnum += 1
elif testnum == 2:
# Third pass. Try solving for df/dx and substituting that into the ode.
# Thanks to Chris Smith for suggesting this method. Many of the
# comments below are his too.
# The method:
# - Take each of 1..n derivatives of the solution.
# - Solve each nth derivative for d^(n)f/dx^(n)
# (the differential of that order)
# - Back substitute into the ode in decreasing order
# (i.e., n, n-1, ...)
# - Check the result for zero equivalence
if sol.lhs == func and not sol.rhs.has(func):
diffsols = {0:sol.rhs}
elif sol.rhs == func and not sol.lhs.has(func):
diffsols = {0:sol.lhs}
else:
diffsols = {}
sol = sol.lhs - sol.rhs
for i in range(1, order + 1):
# Differentiation is a linear operator, so there should always
# be 1 solution. Nonetheless, we test just to make sure.
# We only need to solve once. After that, we will automatically
# have the solution to the differential in the order we want.
if i == 1:
ds = sol.diff(x)
try:
sdf = solve(ds,func.diff(x, i))
if len(sdf) != 1:
raise NotImplementedError
except NotImplementedError:
testnum += 1
break
else:
diffsols[i] = sdf[0]
else:
# This is what the solution says df/dx should be.
diffsols[i] = diffsols[i - 1].diff(x)
# Make sure the above didn't fail.
if testnum > 2:
continue
else:
# Substitute it into ode to check for self consistency.
lhs, rhs = ode.lhs, ode.rhs
for i in range(order, -1, -1):
if i == 0 and not diffsols.has_key(0):
# We can only substitute f(x) if the solution was
# solved for f(x).
break
lhs = lhs.subs(func.diff(x, i), diffsols[i])
rhs = rhs.subs(func.diff(x, i), diffsols[i])
ode_or_bool = Eq(lhs,rhs)
if isinstance(ode_or_bool, bool):
if ode_or_bool:
lhs = rhs = S.Zero
else:
ode_or_bool = simplify(ode_or_bool)
lhs = ode_or_bool.lhs
rhs = ode_or_bool.rhs
# No sense in overworking simplify--just prove the numerator goes to zero
s = simplify(trigsimp((lhs-rhs).as_numer_denom()[0]))
testnum += 1
else:
break
if not s:
return (True, s)
elif s is True: # The code above never was able to change s
raise NotImplementedError("Unable to test if " + str(sol) + \
" is a solution to " + str(ode) + ".")
else:
return (False, s)
def ode_sol_simplicity(sol, func, trysolving=True):
"""
Returns an extended integer representing how simple a solution to an
ODE is.
The following things are considered, in order from most simple to
least:
- sol is solved for func.
- sol is not solved for func, but can be if passed to solve (e.g.,
a solution returned by dsolve(ode, func, simplify=False)
- If sol is not solved for func, then base the result on the length
of sol, as computed by len(str(sol)).
- If sol has any unevaluated Integrals, this will automatically be
considered less simple than any of the above.
This function returns an integer such that if solution A is simpler
than solution B by above metric, then ode_sol_simplicity(sola, func)
< ode_sol_simplicity(solb, func).
Currently, the following are the numbers returned, but if the
heuristic is ever improved, this may change. Only the ordering is
guaranteed.
sol solved for func -2
sol not solved for func but can be -1
sol is not solved or solvable for func len(str(sol))
sol contains an Integral oo
oo here means the SymPy infinity, which should compare greater than
any integer.
If you already know solve() cannot solve sol, you can use
trysolving=False to skip that step, which is the only potentially
slow step. For example, dsolve with the simplify=False flag should
do this.
If sol is a list of solutions, if the worst solution in the list
returns oo it returns that, otherwise it returns len(str(sol)), that
is, the length of the string representation of the whole list.
**Examples**
This function is designed to be passed to min as the key argument,
such as min(listofsolutions, key=lambda i: ode_sol_simplicity(i, f(x))).
Note that as long as SymPy supports Python 2.4, you must use the minkey()
function in sympy/utilities/iterables.py to emulate this behavior.
>>> from sympy import symbols, Function, Eq, tan, cos, sqrt, Integral
>>> from sympy.solvers.ode import ode_sol_simplicity
>>> from sympy.utilities.iterables import minkey
>>> x, C1 = symbols('x C1')
>>> f = Function('f')
>>> ode_sol_simplicity(Eq(f(x), C1*x**2), f(x))
-2
>>> ode_sol_simplicity(Eq(x**2 + f(x), C1), f(x))
-1
>>> ode_sol_simplicity(Eq(f(x), C1*Integral(2*x, x)), f(x))
oo
>>> # This is from dsolve(x*f(x).diff(x) - f(x) - x*sin(f(x)/x), \
>>> # f(x), hint='1st_homogeneous_coeff_subs_indep_div_dep')
>>> eq1 = Eq(x/tan(f(x)/(2*x)), C1)
>>> # This is from the same ode with the
>>> # '1st_homogeneous_coeff_subs_dep_div_indep' hint.
>>> eq2 = Eq(x*sqrt(1 + cos(f(x)/x))/sqrt(-1 + cos(f(x)/x)), C1)
>>> ode_sol_simplicity(eq1, f(x))
23
>>> minkey([eq1, eq2], key=lambda i: ode_sol_simplicity(i, f(x)))
x/tan(f(x)/(2*x)) == C1
"""
#TODO: write examples
# See the docstring for the coercion rules. We check easier (faster)
# things here first, to save time.
if type(sol) in (list, tuple):
# See if there are Integrals
for i in sol:
if ode_sol_simplicity(i, func, trysolving=trysolving) == oo:
return oo
return len(str(sol))
if sol.has(C.Integral):
return oo
# Next, try to solve for func. This code will change slightly when RootOf
# is implemented in solve(). Probably a RootOf solution should fall somewhere
# between a normal solution and an unsolvable expression.
# First, see if they are already solved
if sol.lhs == func and not sol.rhs.has(func) or\
sol.rhs == func and not sol.lhs.has(func):
return -2
# We are not so lucky, try solving manually
if trysolving:
try:
sols = solve(sol, func)
if sols == []:
raise NotImplementedError
except NotImplementedError:
pass
else:
return -1
# Finally, a naive computation based on the length of the string version
# of the expression. This may favor combined fractions because they
# will not have duplicate denominators, and may slightly favor expressions
# with fewer additions and subtractions, as those are separated by spaces
# by the printer.
# Additional ideas for simplicity heuristics are welcome, like maybe
# checking if a equation has a larger domain, or if constantsimp has
# introduced arbitrary constants numbered higher than the order of a
# given ode that sol is a solution of.
return len(str(sol))
@vectorize(0)
def constantsimp(expr, independentsymbol, endnumber, startnumber=1,
symbolname='C'):
"""
Simplifies an expression with arbitrary constants in it.
This function is written specifically to work with dsolve(), and is
not intended for general use.
Simplification is done by "absorbing" the arbitrary constants in to
other arbitrary constants, numbers, and symbols that they are not
independent of.
The symbols must all have the same name with numbers after it, for
example, C1, C2, C3. The symbolname here would be 'C', the
startnumber would be 1, and the end number would be 3. If the
arbitrary constants are independent of the variable x, then the
independent symbol would be x. There is no need to specify the
dependent function, such as f(x), because it already has the
independent symbol, x, in it.
Because terms are "absorbed" into arbitrary constants and because
constants are renumbered after simplifying, the arbitrary constants
in expr are not necessarily equal to the ones of the same name in
the returned result.
If two or more arbitrary constants are added, multiplied, or raised
to the power of each other, they are first absorbed together into a
single arbitrary constant. Then the new constant is combined into
other terms if necessary.
Absorption is done naively. constantsimp() does not attempt to
expand or simplify the expression first to obtain better absorption.
So for example, exp(C1)*exp(x) will be simplified to C1*exp(x), but
exp(C1 + x) will be left alone.
Use constant_renumber() to renumber constants after simplification.
Without using that function, simplified constants may end up
having any numbering to them.
In rare cases, a single constant can be "simplified" into two
constants. Every differential equation solution should have as many
arbitrary constants as the order of the differential equation. The
result here will be technically correct, but it may, for example,
have C1 and C2 in an expression, when C1 is actually equal to C2.
Use your discretion in such situations, and also take advantage of
the ability to use hints in dsolve().
**Examples**
>>> from sympy import symbols
>>> from sympy.solvers.ode import constantsimp
>>> C1, C2, C3, x, y = symbols('C1 C2 C3 x y')
>>> constantsimp(2*C1*x, x, 3)
C1*x
>>> constantsimp(C1 + 2 + x + y, x, 3)
C1 + x
>>> constantsimp(C1*C2 + 2 + x + y + C3*x, x, 3)
C2 + x + C3*x
"""
# This function works recursively. The idea is that, for Mul,
# Add, Pow, and Function, if the class has a constant in it, then
# we can simplify it, which we do by recursing down and
# simplifying up. Otherwise, we can skip that part of the
# expression.
from sympy.utilities import any
constantsymbols = [Symbol(symbolname+"%d" % t) for t in range(startnumber,
endnumber + 1)]
x = independentsymbol
if isinstance(expr, Equality):
# For now, only treat the special case where one side of the equation
# is a constant
if expr.lhs in constantsymbols:
return Eq(expr.lhs, constantsimp(expr.rhs + expr.lhs, x, endnumber,
startnumber, symbolname) - expr.lhs)
# this could break if expr.lhs is absorbed into another constant,
# but for now, the only solutions that return Eq's with a constant
# on one side are first order. At any rate, it will still be
# technically correct. The expression will just have too many
# constants in it
elif expr.rhs in constantsymbols:
return Eq(constantsimp(expr.lhs + expr.rhs, x, endnumber,
startnumber, symbolname) - expr.rhs, expr.rhs)
else:
return Eq(constantsimp(expr.lhs, x, endnumber, startnumber,
symbolname), constantsimp(expr.rhs, x, endnumber,
startnumber, symbolname))
if type(expr) not in (Mul, Add, Pow) and not expr.is_Function:
# We don't know how to handle other classes
# This also serves as the base case for the recursion
return expr
elif not any(expr.has(t) for t in constantsymbols):
return expr
else:
newargs = []
hasconst = False
isPowExp = False
reeval = False
for i in expr.args:
if i not in constantsymbols:
newargs.append(i)
else:
newconst = i
hasconst = True
if expr.is_Pow and i == expr.exp:
isPowExp = True
for i in range(len(newargs)):
isimp = constantsimp(newargs[i], x, endnumber, startnumber,
symbolname)
if isimp in constantsymbols:
reeval = True
hasconst = True
newconst = isimp
if expr.is_Pow and i == 1:
isPowExp = True
newargs[i] = isimp
if hasconst:
newargs = [i for i in newargs if i.has(x)]
if isPowExp:
newargs = newargs + [newconst] # Order matters in this case
else:
newargs = [newconst] + newargs
if expr.is_Pow and len(newargs) == 1:
newargs.append(S.One)
if expr.is_Function:
if (len(newargs) == 0 or hasconst and len(newargs) == 1):
return newconst
else:
newfuncargs = [constantsimp(t, x, endnumber, startnumber,
symbolname) for t in expr.args]
return expr.new(*newfuncargs)
else:
newexpr = expr.new(*newargs)
if reeval:
return constantsimp(newexpr, x, endnumber, startnumber,
symbolname)
else:
return newexpr
@vectorize(0)
def constant_renumber(expr, symbolname, startnumber, endnumber):
"""
Renumber arbitrary constants in expr.
This is a simple function that goes through and renumbers any Symbol
with a name in the form symbolname + num where num is in the range
from startnumber to endnumber.
Symbols are renumbered based on Basic._compare_pretty, so they
should be numbered roughly in the order that they appear in the
final, printed expression. Note that this ordering is based in part
on hashes, so it can produce different results on different
machines.
The structure of this function is very similar to that of
constantsimp().
**Example**
>>> from sympy import symbols, Eq, pprint
>>> from sympy.solvers.ode import constant_renumber
>>> x, C1, C2, C3 = symbols('x C1 C2 C3')
>>> pprint(C2 + C1*x + C3*x**2)
2
C2 + C1*x + C3*x
>>> pprint(constant_renumber(C2 + C1*x + C3*x**2, 'C', 1, 3))
2
C1 + C2*x + C3*x
"""
global newstartnumber
newstartnumber = 1
def _constant_renumber(expr, symbolname, startnumber, endnumber):
"""
We need to have an internal recursive function so that
newstartnumber maintains its values throughout recursive calls.
"""
from sympy.utilities import any
constantsymbols = [Symbol(symbolname+"%d" % t) for t in range(startnumber,
endnumber + 1)]
global newstartnumber
if isinstance(expr, Equality):
return Eq(_constant_renumber(expr.lhs, symbolname, startnumber, endnumber),
_constant_renumber(expr.rhs, symbolname, startnumber, endnumber))
if type(expr) not in (Mul, Add, Pow) and not expr.is_Function and\
not any(expr.has(t) for t in constantsymbols):
# Base case, as above. We better hope there aren't constants inside
# of some other class, because they won't be renumbered.
return expr
elif expr in constantsymbols:
# Renumbering happens here
newconst = Symbol(symbolname + str(newstartnumber))
newstartnumber += 1
return newconst
else:
if expr.is_Function or expr.is_Pow:
return expr.new(*[_constant_renumber(x, symbolname, startnumber,
endnumber) for x in expr.args])
else:
sortedargs = list(expr.args)
sortedargs.sort(Basic._compare_pretty)
return expr.new(*[_constant_renumber(x, symbolname, startnumber,
endnumber) for x in sortedargs])
return _constant_renumber(expr, symbolname, startnumber, endnumber)
def _handle_Integral(expr, func, order, hint):
"""
Converts a solution with Integrals in it into an actual solution.
For most hints, this simply runs expr.doit()
"""
x = func.args[0]
f = func.func
if hint == "1st_exact":
global exactvars
x0 = exactvars['x0']
y0 = exactvars['y0']
y = exactvars['y']
tmpsol = expr.lhs.doit()
sol = 0
assert tmpsol.is_Add
for i in tmpsol.args:
if x0 not in i and y0 not in i:
sol += i
assert sol != 0
sol = Eq(sol.subs(y, f(x)),expr.rhs) # expr.rhs == C1
del exactvars
elif hint == "1st_exact_Integral":
# FIXME: We still need to back substitute y
# y = exactvars['y']
# sol = expr.subs(y, f(x))
# For now, we are going to have to return an expression with f(x) replaced
# with y. Substituting results in the y's in the second integral
# becoming f(x), which prevents the integral from being evaluatable.
# For example, Integral(cos(f(x)), (x, x0, x)). If there were a way to
# do inert substitution, that could maybe be used here instead.
del exactvars
sol = expr
elif hint == "nth_linear_constant_coeff_homogeneous":
sol = expr
elif hint[-9:] != "_Integral":
sol = expr.doit()
else:
sol = expr
return sol
def ode_order(expr, func):
"""
Returns the order of a given ODE with respect to func.
This function is implemented recursively.
**Examples**
>>> from sympy import Function, ode_order
>>> from sympy.abc import x
>>> f, g = map(Function, ['f', 'g'])
>>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 +
... f(x).diff(x), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x))
3
"""
a = Wild('a', exclude=[func])
order = 0
if isinstance(expr, Derivative) and expr.args[0] == func:
order = len(expr.symbols)
else:
for arg in expr.args:
if isinstance(arg, Derivative) and arg.args[0] == func:
order = max(order, len(arg.symbols))
elif expr.match(a):
order = 0
else :
for arg1 in arg.args:
order = max(order, ode_order(arg1, func))
return order
# FIXME: replace the general solution in the docstring with
# dsolve(equation, hint='1st_exact_Integral'). You will need to be able
# to have assumptions on P and Q that dP/dy = dQ/dx.
def ode_1st_exact(eq, func, order, match):
r"""
Solves 1st order exact ordinary differential equations.
A 1st order differential equation is called exact if it is the total
differential of a function. That is, the differential equation
P(x, y)dx + Q(x, y)dy = 0 is exact if there is some function F(x, y)
such that P(x, y) = dF/dx and Q(x, y) = dF/dy (d here refers to the
partial derivative). It can be shown that a necessary and
sufficient condition for a first order ODE to be exact is that
dP/dy = dQ/dx. Then, the solution will be as given below::
>>> from sympy import Function, Eq, Integral, symbols, pprint
>>> x, y, t, x0, y0, C1= symbols('x y t x0 y0 C1')
>>> P, Q, F= map(Function, ['P', 'Q', 'F'])
>>> pprint(Eq(Eq(F(x, y), Integral(P(t, y), (t, x0, x)) +
... Integral(Q(x0, t), (t, y0, y))), C1))
x y
/ /
| |
F(x, y) = | P(t, y) dt + | Q(x0, t) dt = C1
| |
/ /
x0 y0
Where the first partials of P and Q exist and are continuous in a
simply connected region.
A note: SymPy currently has no way to represent inert substitution on
an expression, so the hint '1st_exact_Integral' will return an integral
with dy. This is supposed to represent the function that you are
solving for.
**Example**
>>> from sympy import Function, dsolve, cos, sin
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(cos(f(x)) - (x*sin(f(x)) - f(x)**2)*f(x).diff(x),
... f(x), hint='1st_exact')
x*cos(f(x)) + f(x)**3/3 == C1
**References**
- http://en.wikipedia.org/wiki/Exact_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 73
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # d+e*diff(f(x),x)
C1 = Symbol('C1')
x0 = Symbol('x0', dummy=True)
y0 = Symbol('y0', dummy=True)
global exactvars # This is the only way to pass these dummy variables to
# _handle_Integral
exactvars = {'y0':y0, 'x0':x0, 'y':r['y']}
# If we ever get a Constant class, x0 and y0 should be constants, I think
sol = C.Integral(r[r['e']].subs(x,x0),(r['y'],y0,f(x)))+C.Integral(r[r['d']],(x,x0,x))
return Eq(sol, C1)
def ode_1st_homogeneous_coeff_best(eq, func, order, match):
r"""
Returns the best solution to an ODE from the two hints
'1st_homogeneous_coeff_subs_dep_div_indep' and
'1st_homogeneous_coeff_subs_indep_div_dep'.
This is as determined by ode_sol_simplicity().
See the ode_1st_homogeneous_coeff_subs_indep_div_dep() and
ode_1st_homogeneous_coeff_subs_dep_div_indep() docstrings for more
information on these hints. Note that there is no
'1st_homogeneous_coeff_best_Integral' hint.
**Example**
::
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_best'))
___________
/ 2
/ 3*x
/ 1 + ----- *f(x) = C1
3 / 2
\/ f (x)
**References**
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
# There are two substitutions that solve the equation, u1=y/x and u2=x/y
# They produce different integrals, so try them both and see which
# one is easier.
sol1 = ode_1st_homogeneous_coeff_subs_indep_div_dep(eq,
func, order, match)
sol2 = ode_1st_homogeneous_coeff_subs_dep_div_indep(eq,
func, order, match)
simplify = match.get('simplify', True)
if simplify:
sol1 = odesimp(sol1, func, order, "1st_homogeneous_coeff_subs_indep_div_dep")
sol2 = odesimp(sol2, func, order, "1st_homogeneous_coeff_subs_dep_div_indep")
return minkey([sol1, sol2], key=lambda x: ode_sol_simplicity(x, func,
trysolving=not simplify))
def ode_1st_homogeneous_coeff_subs_dep_div_indep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution
u1 = <dependent variable>/<independent variable>.
This is a differential equation P(x, y) + Q(x, y)dy/dx = 0, that P
and Q are homogeneous of the same order. A function F(x, y) is
homogeneous of order n if F(xt, yt) = t**n*F(x, y). Equivalently,
F(x, y) can be rewritten as G(y/x) or H(x/y). See also the
docstring of homogeneous_order().
If the coefficients P and Q in the differential equation above are
homogeneous functions of the same order, then it can be shown that
the substitution y = u1*x (u1 = y/x) will turn the differential
equation into an equation separable in the variables x and u. if
h(u1) is the function that results from making the substitution
u1 = f(x)/x on P(x, f(x)) and g(u2) is the function that results
from the substitution on Q(x, f(x)) in the differential equation
P(x, f(x)) + Q(x, f(x))*diff(f(x), x) = 0, then the general solution
is::
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(f(x)/x) + h(f(x)/x)*f(x).diff(x)
>>> pprint(genform)
d /f(x)\ /f(x)\
--(f(x))*h|----| + g|----|
dx \ x / \ x /
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep_Integral'))
f(x)
----
x
/
|
| -h(u1)
- | ---------------- d(u1) + log(C1*x) = 0
| u1*h(u1) + g(u1)
|
/
Where u1*h(u1) + g(u1) != 0 and x != 0.
See also the docstrings of ode_1st_homogeneous_coeff_best() and
ode_1st_homogeneous_coeff_subs_indep_div_dep().
**Example**
::
>>> from sympy import Function, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep'))
________________
/ 3
/ 3*f(x) f (x)
x* / ------ + ----- = C1
3 / x 3
\/ x
**References**
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u1 = Symbol('u1', dummy=True) # u1 == f(x)/x
r = match # d+e*diff(f(x),x)
C1 = Symbol('C1')
int = C.Integral((-r[r['e']]/(r[r['d']]+u1*r[r['e']])).subs({x:1, r['y']:u1}),
(u1, None, f(x)/x))
sol = logcombine(Eq(log(x), int + log(C1)), assume_pos_real=True)
return sol
def ode_1st_homogeneous_coeff_subs_indep_div_dep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution
u2 = <independent variable>/<dependent variable>.
This is a differential equation P(x, y) + Q(x, y)dy/dx = 0, that P
and Q are homogeneous of the same order. A function F(x, y) is
homogeneous of order n if F(xt, yt) = t**n*F(x, y). Equivalently,
F(x, y) can be rewritten as G(y/x) or H(x/y). See also the
docstring of homogeneous_order().
If the coefficients P and Q in the differential equation above are
homogeneous functions of the same order, then it can be shown that
the substitution x = u2*y (u2 = x/y) will turn the differential
equation into an equation separable in the variables y and u2. if
h(u2) is the function that results from making the substitution
u2 = x/f(x) on P(x, f(x)) and g(u2) is the function that results
from the substitution on Q(x, f(x)) in the differential equation
P(x, f(x)) + Q(x, f(x))*diff(f(x), x) = 0, then the general solution
is:
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(x/f(x)) + h(x/f(x))*f(x).diff(x)
>>> pprint(genform)
d / x \ / x \
--(f(x))*h|----| + g|----|
dx \f(x)/ \f(x)/
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral'))
x
----
f(x)
/
|
| -g(u2)
| ---------------- d(u2)
| u2*g(u2) + h(u2)
|
/
f(x) = C1*e
Where u2*g(u2) + h(u2) != 0 and f(x) != 0.
See also the docstrings of ode_1st_homogeneous_coeff_best() and
ode_1st_homogeneous_coeff_subs_dep_div_indep().
**Example**
>>> from sympy import Function, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep'))
___________
/ 2
/ 3*x
/ 1 + ----- *f(x) = C1
3 / 2
\/ f (x)
**References**
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u2 = Symbol('u2', dummy=True) # u2 == x/f(x)
r = match # d+e*diff(f(x),x)
C1 = Symbol('C1')
int = C.Integral((-r[r['d']]/(r[r['e']]+u2*r[r['d']])).subs({x:u2, r['y']:1}),
(u2, None, x/f(x)))
sol = logcombine(Eq(log(f(x)), int + log(C1)), assume_pos_real=True)
return sol
# XXX: Should this function maybe go somewhere else?
def homogeneous_order(eq, *symbols):
"""
Returns the order n if g is homogeneous and None if it is not
homogeneous.
Determines if a function is homogeneous and if so of what order.
A function f(x,y,...) is homogeneous of order n if
f(t*x,t*y,t*...) == t**n*f(x,y,...). The function is implemented recursively.
If the function is of two variables, F(x, y), then f being
homogeneous of any order is equivalent to being able to rewrite
F(x, y) as G(x/y) or H(y/x). This fact is used to solve 1st order
ordinary differential equations whose coefficients are homogeneous
of the same order (see the docstrings of
ode.ode_1st_homogeneous_coeff_subs_indep_div_dep() and
ode.ode_1st_homogeneous_coeff_subs_indep_div_dep()
Symbols can be functions, but every argument of the function must be
a symbol, and the arguments of the function that appear in the
expression must match those given in the list of symbols. If a
declared function appears with different arguments than given in the
list of symbols, None is returned.
**Examples**
>>> from sympy import Function, homogeneous_order, sqrt
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> homogeneous_order(f(x), f(x)) == None
True
>>> homogeneous_order(f(x,y), f(y, x), x, y) == None
True
>>> homogeneous_order(f(x), f(x), x)
1
>>> homogeneous_order(x**2*f(x)/sqrt(x**2+f(x)**2), x, f(x))
2
>>> homogeneous_order(x**2+f(x), x, f(x)) == None
True
"""
if eq.has(log):
eq = logcombine(eq, assume_pos_real=True)
return _homogeneous_order(eq, *symbols)
def _homogeneous_order(eq, *symbols):
"""
The real work for homogeneous_order.
This runs as a separate function call so that logcombine doesn't
endlessly put back together what homogeneous_order is trying to take
apart.
"""
if not symbols:
raise ValueError, "homogeneous_order: no symbols were given."
n = set()
# Replace all functions with dummy variables
for i in symbols:
if i.is_Function:
if not all([j in symbols for j in i.args]):
return None
else:
dummyvar = numbered_symbols(prefix='d', dummy=True).next()
eq = eq.subs(i, dummyvar)
symbols = list(symbols)
symbols.remove(i)
symbols.append(dummyvar)
symbols = tuple(symbols)
# The following are not supported
if eq.has(Order) or eq.has(Derivative):
return None
# These are all constants
if type(eq) in (int, float) or eq.is_Number or eq.is_Integer or \
eq.is_Rational or eq.is_NumberSymbol or eq.is_Real:
return sympify(0)
# Break the equation into additive parts
if eq.is_Add:
s = set()
for i in eq.args:
s.add(_homogeneous_order(i, *symbols))
if len(s) != 1:
return None
else:
n = s
if eq.is_Pow:
if not eq.exp.is_number:
return None
o = _homogeneous_order(eq.base, *symbols)
if o == None:
return None
else:
n.add(sympify(o*eq.exp))
t = Symbol('t', dummy=True, positive=True) # It is sufficient that t > 0
r = Wild('r', exclude=[t])
a = Wild('a', exclude=[t])
eqs = eq.subs(dict(zip(symbols,(t*i for i in symbols))))
if eqs.is_Mul:
if t not in eqs:
n.add(sympify(0))
else:
m = eqs.match(r*t**a)
if m:
n.add(sympify(m[a]))
else:
s = 0
for i in eq.args:
o = _homogeneous_order(i, *symbols)
if o == None:
return None
else:
s += o
n.add(sympify(s))
if eq.is_Function:
if eq.func is log:
# The only possibility to pull a t out of a function is a power in
# a logarithm. This is very likely due to calling of logcombine().
args = make_list(eq.args[0], Mul)
if all(i.is_Pow for i in args):
base = 1
expos = set()
for pow in args:
if sign(pow.exp).is_negative:
s = -1
else:
s = 1
expos.add(s*pow.exp)
base *= pow.base**s
if len(expos) != 1:
return None
else:
return _homogeneous_order(expos.pop()*log(base), *symbols)
else:
if _homogeneous_order(eq.args[0], *symbols) == 0:
return sympify(0)
else:
return None
else:
if _homogeneous_order(eq.args[0], *symbols) == 0:
return sympify(0)
else:
return None
if len(n) != 1 or n == None:
return None
else:
return n.pop()
return None
def ode_1st_linear(eq, func, order, match):
r"""
Solves 1st order linear differential equations.
These are differential equations of the form dy/dx _ P(x)*y = Q(x).
These kinds of differential equations can be solved in a general
way. The integrating factor exp(Integral(P(x), x)) will turn the
equation into a separable equation. The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint, diff, sin
>>> from sympy.abc import x
>>> f, P, Q = map(Function, ['f', 'P', 'Q'])
>>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x))
>>> pprint(genform)
d
P(x)*f(x) + --(f(x)) = Q(x)
dx
>>> pprint(dsolve(genform, f(x), hint='1st_linear_Integral'))
/ / \
| | |
| | / | /
| | | | |
| | | P(x) dx | - | P(x) dx
| | | | |
| | / | /
f(x) = |C1 + | Q(x)*e dx|*e
| | |
\ / /
**Example**
>>> f = Function('f')
>>> pprint(dsolve(Eq(x*diff(f(x), x) - f(x), x**2*sin(x)),
... f(x), '1st_linear'))
f(x) = x*(C1 - cos(x))
**References**
- http://en.wikipedia.org/wiki/Linear_differential_equation#First_order_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 92
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # a*diff(f(x),x) + b*f(x) + c
C1 = Symbol('C1')
t = exp(C.Integral(r[r['b']]/r[r['a']], x))
tt = C.Integral(t*(-r[r['c']]/r[r['a']]), x)
return Eq(f(x),(tt + C1)/t)
def ode_Bernoulli(eq, func, order, match):
r"""
Solves Bernoulli differential equations.
These are equations of the form dy/dx + P(x)*y = Q(x)*y**n, n != 1.
The substitution w = 1/y**(1-n) will transform an equation of this
form into one that is linear (see the docstring of
ode_1st_linear()). The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x, n
>>> f, P, Q = map(Function, ['f', 'P', 'Q'])
>>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x)*f(x)**n)
>>> pprint(genform)
d n
P(x)*f(x) + --(f(x)) = f (x)*Q(x)
dx
>>> pprint(dsolve(genform, f(x), hint='Bernoulli_Integral')) #doctest: +SKIP
1
----
1 - n
// / \ \
|| | | |
|| | / | / |
|| | | | | |
|| | (1 - n)* | P(x) dx | (-1 + n)* | P(x) dx|
|| | | | | |
|| | / | / |
f(x) = ||C1 + (-1 + n)* | -Q(x)*e dx|*e |
|| | | |
\\ / / /
Note that when n = 1, then the equation is separable (see the
docstring of ode_separable()).
>>> pprint(dsolve(Eq(f(x).diff(x) + P(x)*f(x), Q(x)*f(x)), f(x),
... hint='separable_Integral'))
f(x)
/
| /
| 1 |
| - dy = C1 + | (-P(x) + Q(x)) dx
| y |
| /
/
**Example**
>>> from sympy import Function, dsolve, Eq, pprint, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(x*f(x).diff(x) + f(x), log(x)*f(x)**2),
... f(x), hint='Bernoulli'))
1
f(x) = -------------------
/ log(x) 1\
x*|C1 + ------ + -|
\ x x/
**References**
- http://en.wikipedia.org/wiki/Bernoulli_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 95
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # a*diff(f(x),x) + b*f(x) + c*f(x)**n, n != 1
C1 = Symbol('C1')
t = exp((1-r[r['n']])*C.Integral(r[r['b']]/r[r['a']],x))
tt = (r[r['n']]-1)*C.Integral(t*r[r['c']]/r[r['a']],x)
return Eq(f(x),((tt + C1)/t)**(1/(1-r[r['n']])))
def ode_Liouville(eq, func, order, match):
r"""
Solves 2nd order Liouville differential equations.
The general form of a Liouville ODE is
d^2y/dx^2 + g(y)*(dy/dx)**2 + h(x)*dy/dx. The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint, diff
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = Eq(diff(f(x),x,x) + g(f(x))*diff(f(x),x)**2 +
... h(x)*diff(f(x),x), 0)
>>> pprint(genform)
2 2
d d d
--(f(x)) *g(f(x)) + --(f(x))*h(x) + -----(f(x)) = 0
dx dx dx dx
>>> pprint(dsolve(genform, f(x), hint='Liouville_Integral'))
f(x)
/ /
| |
| / | /
| | | |
| - | h(x) dx | | g(y) dy
| | | |
| / | /
C1 + C2* | e dx + | e dy = 0
| |
/ /
**Example**
::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(diff(f(x), x, x) + diff(f(x), x)**2/f(x) +
... diff(f(x), x)/x, f(x), hint='Liouville'))
________________ ________________
[f(x) = -\/ C1 + C2*log(x) , f(x) = \/ C1 + C2*log(x) ]
**References**
- Goldstein and Braun, "Advanced Methods for the Solution of
Differential Equations", pp. 98
- http://www.maplesoft.com/support/help/view.aspx?path=odeadvisor/Liouville
# indirect doctest
"""
# Liouville ODE f(x).diff(x, 2) + g(f(x))*(f(x).diff(x, 2))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98, as well as
# http://www.maplesoft.com/support/help/view.aspx?path=odeadvisor/Liouville
x = func.args[0]
f = func.func
r = match # f(x).diff(x, 2) + g*f(x).diff(x)**2 + h*f(x).diff(x)
y = r['y']
C1 = Symbol('C1')
C2 = Symbol('C2')
int = C.Integral(exp(C.Integral(r['g'], y)), (y, None, f(x)))
sol = Eq(int + C1*C.Integral(exp(-C.Integral(r['h'], x)), x) + C2, 0)
return sol
def _nth_linear_match(eq, func, order):
"""
Matches a differential equation to the linear form:
a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y + B(x) = 0
Returns a dict of order:coeff terms, where order is the order of the
derivative on each term, and coeff is the coefficient of that
derivative. The key -1 holds the function B(x). Returns None if
the ode is not linear. This function assumes that func has already
been checked to be good.
**Examples**
>>> from sympy import Function, cos, sin
>>> from sympy.abc import x
>>> from sympy.solvers.ode import _nth_linear_match
>>> f = Function('f')
>>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) +
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) -
... sin(x), f(x), 3)
{1: 2 + cos(x), 0: -1, -1: x - sin(x), 2: x, 3: 1}
>>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) +
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) -
... sin(f(x)), f(x), 3) == None
True
"""
from sympy import S
x = func.args[0]
one_x = set([x])
terms = dict([(i, S.Zero) for i in range(-1, order+1)])
for i in make_list(eq, Add):
if not i.has(func):
terms[-1] += i
else:
c, f = i.as_independent(func)
if not ((isinstance(f, Derivative) and set(f.symbols) == one_x) or\
f == func):
return None
else:
terms[len(f.args[1:])] += c
return terms
def ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match, returns='sol'):
"""
Solves an nth order linear homogeneous differential equation with
constant coefficients.
This is an equation of the form a_n*f(x)^(n) + a_(n-1)*f(x)^(n-1) +
... + a1*f'(x) + a0*f(x) = 0
These equations can be solved in a general manner, by taking the
roots of the characteristic equation a_n*m**n + a_(n-1)*m**(n-1) +
... + a1*m + a0 = 0. The solution will then be the sum of
Cn*x**i*exp(r*x) terms, for each where Cn is an arbitrary constant,
r is a root of the characteristic equation and i is is one of each
from 0 to the multiplicity of the root - 1 (for example, a root 3 of
multiplicity 2 would create the terms C1*exp(3*x) + C2*x*exp(3*x)).
The exponential is usually expanded for complex roots using Euler's
equation exp(I*x) = cos(x) + I*sin(x). Complex roots always come in
conjugate pars in polynomials with real coefficients, so the two
roots will be represented (after simplifying the constants) as
exp(a*x)*(C1*cos(b*x) + C2*sin(b*x)).
If SymPy cannot find exact roots to the characteristic equation, a
RootOf instance will be return in its stead.
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(f(x).diff(x, 5) + 10*f(x).diff(x) - 2*f(x), f(x),
... hint='nth_linear_constant_coeff_homogeneous')
... # doctest: +NORMALIZE_WHITESPACE
f(x) == C1*exp(x*RootOf(_m**5 + 10*_m - 2, _m, domain='ZZ', index=0)) + \
C2*exp(x*RootOf(_m**5 + 10*_m - 2, _m, domain='ZZ', index=1)) + \
C3*exp(x*RootOf(_m**5 + 10*_m - 2, _m, domain='ZZ', index=2)) + \
C4*exp(x*RootOf(_m**5 + 10*_m - 2, _m, domain='ZZ', index=3)) + \
C5*exp(x*RootOf(_m**5 + 10*_m - 2, _m, domain='ZZ', index=4))
Note that because this method does not involve integration, there is
no 'nth_linear_constant_coeff_homogeneous_Integral' hint.
The following is for internal use:
- returns = 'sol' returns the solution to the ODE.
- returns = 'list' returns a list of linearly independent
solutions, for use with non homogeneous solution methods like
variation of parameters and undetermined coefficients. Note that,
though the solutions should be linearly independent, this function
does not explicitly check that. You can do "assert
simplify(wronskian(sollist)) != 0" to check for linear independence.
Also, "assert len(sollist) == order" will need to pass.
- returns = 'both', return a dictionary {'sol':solution to ODE,
'list': list of linearly independent solutions}.
**Example**
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 4) + 2*f(x).diff(x, 3) -
... 2*f(x).diff(x, 2) - 6*f(x).diff(x) + 5*f(x), f(x),
... hint='nth_linear_constant_coeff_homogeneous'))
x -2*x
f(x) = (C1 + C2*x)*e + (C3*sin(x) + C4*cos(x))*e
**References**
- http://en.wikipedia.org/wiki/Linear_differential_equation
section: Nonhomogeneous_equation_with_constant_coefficients
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 211
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match
# A generator of constants
constants = numbered_symbols(prefix='C', function=Symbol, start=1)
# First, set up characteristic equation.
m = Symbol('m', dummy=True)
chareq = S.Zero
for i in r.keys():
if type(i) == str or i < 0:
pass
else:
chareq += r[i]*m**i
chareqroots = RootsOf(chareq, m)
charroots_exact = list(chareqroots.exact_roots())
charroots_formal = list(chareqroots.formal_roots())
if charroots_formal and discriminant(chareq, m) == 0:
# If Poly cannot find the roots explicitly, we can only return
# an expression in terms of RootOf's if we know the roots
# are not repeated. We use the fact that a polynomial has
# repeated roots iff its discriminant == 0.
# Ideally, RootOf would cancel out roots from charroots_exact, so
# we check the discriminant of only the unknown part of the chareq.
# See issue 1557.
raise NotImplementedError("Cannot find all of the roots of " + \
"characteristic equation " + str(chareq) + ", which has " + \
"repeated roots.")
# Create a dict root: multiplicity or charroots
charroots = {}
for i in charroots_exact + charroots_formal:
if i in charroots:
charroots[i] += 1
else:
charroots[i] = 1
gsol = S(0)
# We need keep track of terms so we can run collect() at the end.
# This is necessary for constantsimp to work properly.
global collectterms
collectterms = []
for root, multiplicity in charroots.items():
for i in range(multiplicity):
if isinstance(root, RootOf):
gsol += exp(root*x)*constants.next()
assert multiplicity == 1
collectterms = [(0, root, 0)] + collectterms
else:
reroot = re(root)
imroot = im(root)
gsol += x**i*exp(reroot*x)*(constants.next()*sin(abs(imroot)*x) \
+ constants.next()*cos(imroot*x))
# This ordering is important
collectterms = [(i, reroot, imroot)] + collectterms
if returns == 'sol':
return Eq(f(x), gsol)
elif returns in ('list' 'both'):
# Create a list of (hopefully) linearly independent solutions
gensols = []
# Keep track of when to use sin or cos for nonzero imroot
for i, reroot, imroot in collectterms:
if imroot == 0:
gensols.append(x**i*exp(reroot*x))
else:
if x**i*exp(reroot*x)*sin(abs(imroot)*x) in gensols:
gensols.append(x**i*exp(reroot*x)*cos(imroot*x))
else:
gensols.append(x**i*exp(reroot*x)*sin(abs(imroot)*x))
if returns == 'list':
return gensols
else:
return {'sol':Eq(f(x), gsol), 'list':gensols}
else:
raise ValueError('Unknown value for key "returns".')
def ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match):
r"""
Solves an nth order linear differential equation with constant
coefficients using the method of undetermined coefficients.
This method works on differential equations of the form a_n*f(x)^(n)
+ a_(n-1)*f(x)^(n-1) + ... + a1*f'(x) + a0*f(x) = P(x), where P(x)
is a function that has a finite number of linearly independent
derivatives.
Functions that fit this requirement are finite sums functions of the
form a*x**i*exp(b*x)*sin(c*x + d) or a*x**i*exp(b*x)*cos(c*x + d),
where i is a non-negative integer and a, b, c, and d are constants.
For example any polynomial in x, functions like x**2*exp(2*x),
x*sin(x), and exp(x)*cos(x) can all be used. Products of sin's and
cos's have a finite number of derivatives, because they can be
expanded into sin(a*x) and cos(b*x) terms. However, SymPy currently
cannot do that expansion, so you will need to manually rewrite the
expression in terms of the above to use this method. So, for example,
you will need to manually convert sin(x)**2 into (1 + cos(2*x))/2 to
properly apply the method of undetermined coefficients on it.
This method works by creating a trial function from the expression
and all of its linear independent derivatives and substituting them
into the original ODE. The coefficients for each term will be a
system of linear equations, which are be solved for and substituted,
giving the solution. If any of the trial functions are linearly
dependent on the solution to the homogeneous equation, they are
multiplied by sufficient x to make them linearly independent.
**Example**
>>> from sympy import Function, dsolve, pprint, exp, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 2) + 2*f(x).diff(x) + f(x) -
... 4*exp(-x)*x**2 + cos(2*x), f(x),
... hint='nth_linear_constant_coeff_undetermined_coefficients'))
/ 4\
4*sin(2*x) 3*cos(2*x) | x | -x
f(x) = - ---------- + ---------- + |C1 + C2*x + --|*e
25 25 \ 3 /
**References**
- http://en.wikipedia.org/wiki/Method_of_undetermined_coefficients
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 221
# indirect doctest
"""
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_undetermined_coefficients(eq, func, order, match)
def _solve_undetermined_coefficients(eq, func, order, match):
"""
Helper function for the method of undetermined coefficients.
See the ode_nth_linear_constant_coeff_undetermined_coefficients()
docstring for more information on this method.
match should be a dictionary that has the following keys:
'list' - A list of solutions to the homogeneous equation, such as
the list returned by
ode_nth_linear_constant_coeff_homogeneous(returns='list')
'sol' - The general solution, such as the solution returned by
ode_nth_linear_constant_coeff_homogeneous(returns='sol')
'trialset' - The set of trial functions as returned by
_undetermined_coefficients_match()['trialset']
"""
x = func.args[0]
f = func.func
r = match
coeffs = numbered_symbols('a', dummy=True)
coefflist = []
gensols = r['list']
gsol = r['sol']
trialset = r['trialset']
notneedset = set([])
newtrialset = set([])
global collectterms
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) + \
" solutions to the homogeneous equation nessesary to apply " + \
"undetermined coefficients to " + str(eq) + " (number of terms != order)")
usedsin = set([])
mult = 0 # The multiplicity of the root
getmult = True
for i, reroot, imroot in collectterms:
if getmult:
mult = i + 1
getmult = False
if i == 0:
getmult = True
if imroot:
# Alternate between sin and cos
if (i, reroot) in usedsin:
check = x**i*exp(reroot*x)*cos(imroot*x)
else:
check = x**i*exp(reroot*x)*sin(abs(imroot)*x)
usedsin.add((i, reroot))
else:
check = x**i*exp(reroot*x)
if check in trialset:
# If an element of the trial function is already part of the homogeneous
# solution, we need to multiply by sufficient x to make it linearly
# independent. We also don't need to bother checking for the coefficients
# on those elements, since we already know it will be 0.
while True:
if check*x**mult in trialset:
mult += 1
else:
break
trialset.add(check*x**mult)
notneedset.add(check)
newtrialset = trialset - notneedset
trialfunc = 0
for i in newtrialset:
c = coeffs.next()
coefflist.append(c)
trialfunc += c*i
eqs = eq.subs(f(x), trialfunc)
coeffsdict = dict(zip(trialset, [0]*(len(trialset) + 1)))
# XXX: Replace this with as_Add when Mateusz's Polys branch gets merged in
# The else clause actually should never be run unless the ode is only one
# term, in which case it must be a derivative term and so will be inhomogeneous
eqs = expand_mul(eqs)
for i in make_list(eqs, Add):
s = separatevars(i, dict=True, symbols=[x])
coeffsdict[s[x]] += s['coeff']
coeffvals = solve(coeffsdict.values(), coefflist)
if not coeffvals:
raise NotImplementedError("Could not solve " + str(eq) + " using the " + \
" method of undetermined coefficients (unable to solve for coefficients).")
psol = trialfunc.subs(coeffvals)
return Eq(f(x), gsol.rhs + psol)
def _undetermined_coefficients_match(expr, x):
"""
Returns a trial function match if undetermined coefficients can be
applied to expr, and None otherwise.
A trial expression can be found for an expression for use with the
method of undetermined coefficients if the expression is an
additive/multiplicative combination of constants, polynomials in x
(the independent variable of expr), sin(a*x + b), cos(a*x + b), and
exp(a*x) terms (in other words, it has a finite number of linearly
independent derivatives).
Note that you may still need to multiply each term returned here by
sufficient x to make it linearly independent with the solutions to
the homogeneous equation.
This is intended for internal use by undetermined_coefficients
hints.
SymPy currently has no way to convert sin(x)**n*cos(y)**m into a sum
of only sin(a*x) and cos(b*x) terms, so these are not implemented.
So, for example, you will need to manually convert sin(x)**2 into
(1 + cos(2*x))/2 to properly apply the method of undetermined
coefficients on it.
**Example**
>>> from sympy import log, exp
>>> from sympy.solvers.ode import _undetermined_coefficients_match
>>> from sympy.abc import x
>>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x)
{'test': True, 'trialset': set([x*exp(x), exp(x), exp(-x)])}
>>> _undetermined_coefficients_match(log(x), x)
{'test': False}
"""
from sympy import S
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1)
retdict = {}
def _test_term(expr, x):
"""
Test if expr fits the proper form for undetermined coefficients.
"""
if expr.is_Add:
return all([_test_term(i, x) for i in expr.args])
elif expr.is_Mul:
if expr.has(sin) or expr.has(cos):
foundtrig = False
# Make sure that there is only on trig function in the args.
# See the docstring.
for i in expr.args:
if i.has(sin) or i.has(cos):
if foundtrig:
return False
else:
foundtrig = True
return all([_test_term(i, x) for i in expr.args])
elif expr.is_Function:
if expr.func in (sin, cos, exp):
if expr.args[0].match(a*x + b):
return True
else:
return False
else:
return False
elif expr.is_Pow and expr.base.is_Symbol and expr.exp.is_Integer and \
expr.exp >= 0:
return True
elif expr.is_Pow and expr.base.is_number:
if expr.exp.match(a*x + b):
return True
else:
return False
elif expr.is_Symbol or expr.is_Number:
return True
else:
return False
def _get_trial_set(expr, x, exprs=set([])):
"""
Returns a set of trial terms for undetermined coefficients.
The idea behind undetermined coefficients is that the terms
expression repeat themselves after a finite number of
derivatives, except for the coefficients (they are linearly
dependent). So if we collect these, we should have the terms of
our trial function.
"""
def _remove_coefficient(expr, x):
"""
Returns the expression without a coefficient.
Similar to expr.as_independent(x)[1], except it only works
multiplicatively.
"""
# I was using the below match, but it doesn't always put all of the
# coefficient in c. c.f. 2**x*6*exp(x)*log(2)
# The below code is probably cleaner anyway.
# c = Wild('c', exclude=[x])
# t = Wild('t')
# r = expr.match(c*t)
term = S.One
if expr.is_Mul:
for i in expr.args:
if i.has(x):
term *= i
elif expr.has(x):
term = expr
return term
expr = expand_mul(expr)
if expr.is_Add:
for term in expr.args:
if _remove_coefficient(term, x) in exprs:
pass
else:
exprs.add(_remove_coefficient(term, x))
exprs = exprs.union(_get_trial_set(term, x, exprs))
else:
term = _remove_coefficient(expr, x)
tmpset = exprs.union(set([term]))
oldset = set([])
while tmpset != oldset:
# If you get stuck in this loop, then _test_term is probably broken
oldset = tmpset.copy()
expr = expr.diff(x)
term = _remove_coefficient(expr, x)
if term.is_Add:
tmpset = tmpset.union(_get_trial_set(term, x, tmpset))
else:
tmpset.add(term)
exprs = tmpset
return exprs
retdict['test'] = _test_term(expr, x)
if retdict['test']:
# Try to generate a list of trial solutions that will have the undetermined
# coefficients. Note that if any of these are not linearly independent
# with any of the solutions to the homogeneous equation, then they will
# need to be multiplied by sufficient x to make them so. This function
# DOES NOT do that (it doesn't even look at the homogeneous equation).
retdict['trialset'] = _get_trial_set(expr, x)
return retdict
def ode_nth_linear_constant_coeff_variation_of_parameters(eq, func, order, match):
r"""
Solves an nth order linear differential equation with constant
coefficients using the method of undetermined coefficients.
This method works on any differential equations of the form
f(x)^(n) + a_(n-1)*f(x)^(n-1) + ... + a1*f'(x) + a0*f(x) = P(x).
This method works by assuming that the particular solution takes the
form Sum(c_i(x)*y_i(x), (x, 1, n)), where y_i is the ith solution to
the homogeneous equation. The solution is then solved using
Wronskian's and Cramer's Rule. The particular solution is given by
Sum(Integral(W_i(x)/W(x), x)*y_i(x), (x, 1, n)), where W(x) is the
Wronskian of the fundamental system (the system of n linearly
independent solutions to the homogeneous equation), and W_i(x) is
the Wronskian of the fundamental system with the ith column replaced
with [0, 0, ..., 0, P(x)].
This method is general enough to solve any nth order inhomogeneous
linear differential equation with constant coefficients, but
sometimes SymPy cannot simplify the Wronskian well enough to
integrate it. If this method hangs, try using the
'nth_linear_constant_coeff_variation_of_parameters_Integral' hint
and simplifying the integrals manually. Also, prefer using
'nth_linear_constant_coeff_undetermined_coefficients' when it
applies, because it doesn't use integration, making it faster and
more reliable.
Warning, using simplify=False with
'nth_linear_constant_coeff_variation_of_parameters' in dsolve()
may cause it to hang, because it will not attempt to simplify
the Wronskian before integrating. It is recommended that you only
use simplify=False with
'nth_linear_constant_coeff_variation_of_parameters_Integral' for
this method, especially if the solution to the homogeneous
equation has trigonometric functions in it.
**Example**
>>> from sympy import Function, dsolve, pprint, exp, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 3) - 3*f(x).diff(x, 2) +
... 3*f(x).diff(x) - f(x) - exp(x)*log(x), f(x),
... hint='nth_linear_constant_coeff_variation_of_parameters'))
/ 3 /11 log(x)\ 2\ x
f(x) = |C1 + C2*x - x *|-- - ------| + C3*x |*e
\ \36 6 / /
**References**
- http://en.wikipedia.org/wiki/Variation_of_parameters
- http://planetmath.org/encyclopedia/VariationOfParameters.html
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 233
# indirect doctest
"""
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_variation_of_parameters(eq, func, order, match)
def _solve_variation_of_parameters(eq, func, order, match):
"""
Helper function for the method of variation of parameters.
See the ode_nth_linear_constant_coeff_undetermined_coefficients()
docstring for more information on this method.
match should be a dictionary that has the following keys:
'list' - A list of solutions to the homogeneous equation, such as
the list returned by
ode_nth_linear_constant_coeff_homogeneous(returns='list')
'sol' - The general solution, such as the solution returned by
ode_nth_linear_constant_coeff_homogeneous(returns='sol')
"""
x = func.args[0]
f = func.func
r = match
psol = 0
gensols = r['list']
gsol = r['sol']
wr = wronskian(gensols, x)
if r.get('simplify', True):
wr = simplify(wr) # We need much better simplification for some ODEs.
# See issue 1563, for example.
# To reduce commonly occuring sin(x)**2 + cos(x)**2 to 1
wr = trigsimp(wr, deep=True, recursive=True)
if not wr:
# The wronskian will be 0 iff the solutions are not linearly independent.
raise NotImplementedError("Cannot find " + str(order) + \
" solutions to the homogeneous equation nessesary to apply " + \
"variation of parameters to " + str(eq) + " (Wronskian == 0)")
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) + \
" solutions to the homogeneous equation nessesary to apply " + \
"variation of parameters to " + str(eq) + " (number of terms != order)")
negoneterm = (-1)**(order)
for i in gensols:
psol += negoneterm*C.Integral(wronskian(filter(lambda x: x != i, \
gensols), x)*r[-1]/wr, x)*i/r[order]
negoneterm *= -1
if r.get('simplify', True):
psol = simplify(psol)
psol = trigsimp(psol, deep=True)
return Eq(f(x), gsol.rhs + psol)
def ode_separable(eq, func, order, match):
r"""
Solves separable 1st order differential equations.
This is any differential equation that can be written as
P(y)*dy/dx = Q(x). The solution can then just be found by
rearranging terms and integrating:
Integral(P(y), y) = Integral(Q(x), x). This hint uses separatevars()
as its back end, so if a separable equation is not caught by this
solver, it is most likely the fault of that function. seperatevars()
is smart enough to do most expansion and factoring necessary to
convert a separable equation F(x, y) into the proper form P(x)*Q(y).
The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> a, b, c, d, f = map(Function, ['a', 'b', 'c', 'd', 'f'])
>>> genform = Eq(a(x)*b(f(x))*f(x).diff(x), c(x)*d(f(x)))
>>> pprint(genform)
d
--(f(x))*a(x)*b(f(x)) = c(x)*d(f(x))
dx
>>> pprint(dsolve(genform, f(x), hint='separable_Integral'))
f(x)
/ /
| |
| b(y) | c(x)
| ---- dy = C1 + | ---- dx
| d(y) | a(x)
| |
/ /
**Example**
::
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(f(x)*f(x).diff(x) + x, 3*x*f(x)**2), f(x),
... hint='separable'))
/ 2 \ 2
-log\1 - 3*f (x)/ x
----------------- = C1 - --
6 2
**Reference**
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 52
# indirect doctest
"""
x = func.args[0]
f = func.func
C1 = Symbol('C1')
r = match # {'m1':m1, 'm2':m2, 'y':y}
return Eq(C.Integral(r['m2']['coeff']*r['m2'][r['y']]/r['m1'][r['y']],
(r['y'], None, f(x))), C.Integral(-r['m1']['coeff']*r['m1'][x]/
r['m2'][x], x)+C1)
| bsd-3-clause | -7,464,464,908,759,745,000 | 40.646425 | 91 | 0.579524 | false |
rwl/PyCIM | CIM14/IEC61968/AssetModels/ToWindingSpec.py | 1 | 6782 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.IdentifiedObject import IdentifiedObject
class ToWindingSpec(IdentifiedObject):
"""For short-circuit tests, specifies the winding and tap for all short-circuited windings. For open-circuit tests, specifies the winding, tap, induced voltage, and induced angle for any non-excited windings that were measured during the test. This won't apply if only the exciting current and no-load losses were measured.
"""
def __init__(self, voltage=0.0, phaseShift=0.0, toTapStep=0, ToWinding=None, OpenCircuitTests=None, ShortCircuitTests=None, *args, **kw_args):
"""Initialises a new 'ToWindingSpec' instance.
@param voltage: (if open-circuit test) Voltage measured at the open-circuited 'to' winding, with the 'from' winding set to the 'from' winding's rated voltage and all other windings open-circuited.
@param phaseShift: (if open-circuit test) Phase shift measured at the open-circuited 'to' winding, with the 'from' winding set to the 'from' winding's rated voltage and all other windings open-circuited.
@param toTapStep: Tap step number for the 'to' winding of the test pair.
@param ToWinding: Winding short-circuited in a short-circuit test, or measured for induced voltage and angle in an open-circuit test.
@param OpenCircuitTests: All open-circuit tests in which this winding was measured.
@param ShortCircuitTests: All short-circuit tests in which this winding was short-circuited.
"""
#: (if open-circuit test) Voltage measured at the open-circuited 'to' winding, with the 'from' winding set to the 'from' winding's rated voltage and all other windings open-circuited.
self.voltage = voltage
#: (if open-circuit test) Phase shift measured at the open-circuited 'to' winding, with the 'from' winding set to the 'from' winding's rated voltage and all other windings open-circuited.
self.phaseShift = phaseShift
#: Tap step number for the 'to' winding of the test pair.
self.toTapStep = toTapStep
self._ToWinding = None
self.ToWinding = ToWinding
self._OpenCircuitTests = []
self.OpenCircuitTests = [] if OpenCircuitTests is None else OpenCircuitTests
self._ShortCircuitTests = []
self.ShortCircuitTests = [] if ShortCircuitTests is None else ShortCircuitTests
super(ToWindingSpec, self).__init__(*args, **kw_args)
_attrs = ["voltage", "phaseShift", "toTapStep"]
_attr_types = {"voltage": float, "phaseShift": float, "toTapStep": int}
_defaults = {"voltage": 0.0, "phaseShift": 0.0, "toTapStep": 0}
_enums = {}
_refs = ["ToWinding", "OpenCircuitTests", "ShortCircuitTests"]
_many_refs = ["OpenCircuitTests", "ShortCircuitTests"]
def getToWinding(self):
"""Winding short-circuited in a short-circuit test, or measured for induced voltage and angle in an open-circuit test.
"""
return self._ToWinding
def setToWinding(self, value):
if self._ToWinding is not None:
filtered = [x for x in self.ToWinding.ToWindingSpecs if x != self]
self._ToWinding._ToWindingSpecs = filtered
self._ToWinding = value
if self._ToWinding is not None:
if self not in self._ToWinding._ToWindingSpecs:
self._ToWinding._ToWindingSpecs.append(self)
ToWinding = property(getToWinding, setToWinding)
def getOpenCircuitTests(self):
"""All open-circuit tests in which this winding was measured.
"""
return self._OpenCircuitTests
def setOpenCircuitTests(self, value):
for p in self._OpenCircuitTests:
filtered = [q for q in p.MeasuredWindingSpecs if q != self]
self._OpenCircuitTests._MeasuredWindingSpecs = filtered
for r in value:
if self not in r._MeasuredWindingSpecs:
r._MeasuredWindingSpecs.append(self)
self._OpenCircuitTests = value
OpenCircuitTests = property(getOpenCircuitTests, setOpenCircuitTests)
def addOpenCircuitTests(self, *OpenCircuitTests):
for obj in OpenCircuitTests:
if self not in obj._MeasuredWindingSpecs:
obj._MeasuredWindingSpecs.append(self)
self._OpenCircuitTests.append(obj)
def removeOpenCircuitTests(self, *OpenCircuitTests):
for obj in OpenCircuitTests:
if self in obj._MeasuredWindingSpecs:
obj._MeasuredWindingSpecs.remove(self)
self._OpenCircuitTests.remove(obj)
def getShortCircuitTests(self):
"""All short-circuit tests in which this winding was short-circuited.
"""
return self._ShortCircuitTests
def setShortCircuitTests(self, value):
for p in self._ShortCircuitTests:
filtered = [q for q in p.ShortedWindingSpecs if q != self]
self._ShortCircuitTests._ShortedWindingSpecs = filtered
for r in value:
if self not in r._ShortedWindingSpecs:
r._ShortedWindingSpecs.append(self)
self._ShortCircuitTests = value
ShortCircuitTests = property(getShortCircuitTests, setShortCircuitTests)
def addShortCircuitTests(self, *ShortCircuitTests):
for obj in ShortCircuitTests:
if self not in obj._ShortedWindingSpecs:
obj._ShortedWindingSpecs.append(self)
self._ShortCircuitTests.append(obj)
def removeShortCircuitTests(self, *ShortCircuitTests):
for obj in ShortCircuitTests:
if self in obj._ShortedWindingSpecs:
obj._ShortedWindingSpecs.remove(self)
self._ShortCircuitTests.remove(obj)
| mit | 7,964,606,802,234,008,000 | 48.867647 | 328 | 0.696697 | false |
walterbender/infoslicer | infoslicer/widgets/Gallery_View.py | 1 | 8660 | # Copyright (C) IBM Corporation 2008
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import GdkPixbuf
import os
import cPickle
import logging
from Editable_Textbox import Editable_Textbox
from infoslicer.processing.Article_Data import *
from infoslicer.processing.Article import Article
import book
logger = logging.getLogger('infoslicer')
class Gallery_View( Gtk.HBox ):
"""
Created by Christopher Leonard
Drag-and-drop methods added by Jonathan Mace
The gallery view acts in the same was as the Reading_View
except instead of displaying the text of an article, it
displays the images associated with that article, in a scrollable display.
Drag-and-drop methods have been added to set up the images as a
drag source. The data returned by drag-data-get will be a list
containing an Image_Data object and a Sentence_Data object. These
correspond to the image and caption respectively.
"""
def __init__(self):
self.image_list = []
GObject.GObject.__init__(self)
self.set_size_request(int(Gdk.Screen.width() / 2), -1)
self.current_index = -1
left_button = Gtk.Button(label="\n\n << \n\n")
right_button = Gtk.Button(label="\n\n >> \n\n")
self.imagenumberlabel = Gtk.Label()
self.image = Gtk.Image()
self.imagebox = Gtk.EventBox()
self.imagebox.add(self.image)
self.imagebox.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,
[],
Gdk.DragAction.COPY)
self.imagebox.drag_source_add_image_targets()
self.imagebox.connect("drag-begin", self.drag_begin_event, None)
logging.debug('##################### Gallery_View.connect')
self.imagebox.connect("drag-data-get", self.drag_data_get_event, None)
self.caption = Gtk.Label(label="")
self.caption.set_size_request(int(Gdk.Screen.width() / 3), -1)
self.caption.set_line_wrap(True)
self.caption.set_max_width_chars(40)
self.image_drag_container = Gtk.VBox()
self.image_drag_container.pack_start(self.imagenumberlabel, expand=False,
fill=False, padding=0)
self.image_drag_container.pack_start(self.imagebox, False, False, 0)
self.image_drag_container.pack_start(self.caption, False, False, 0)
image_container = Gtk.VBox()
image_container.pack_start(Gtk.Label(" "), True, True, 0)
image_container.pack_start(self.image_drag_container, False, True, 0)
image_container.pack_start(Gtk.Label(" "), True, True, 0)
left_button_container = Gtk.VBox()
left_button_container.pack_start(Gtk.Label(" "), True, True, 0)
left_button_container.pack_start(left_button, False, True, 0)
left_button_container.pack_start(Gtk.Label(" "), True, True, 0)
right_button_container = Gtk.VBox()
right_button_container.pack_start(Gtk.Label(" "), True, True, 0)
right_button_container.pack_start(right_button, False, True, 0)
right_button_container.pack_start(Gtk.Label(" "), True, True, 0)
self.pack_start(left_button_container, False, True, 0)
self.pack_start(image_container, True, True, 0)
self.pack_start(right_button_container, False, True, 0)
self._source_article = None
self.show_all()
right_button.connect("clicked", self.get_next_item, None)
left_button.connect("clicked", self.get_prev_item, None)
self.get_next_item(right_button, None)
self.source_article_id = 0
def get_next_item(self, button, param):
if self.image_list == []:
if self._source_article and self._source_article.article_title:
self.caption.set_text("This article does not have any images")
else:
self.caption.set_text("Please select a Wikipedia article from the menu above")
self.image.clear()
return
self.current_index += 1
if self.current_index == len(self.image_list):
self.current_index = 0
self.imagebuf = GdkPixbuf.Pixbuf.new_from_file(self.image_list[self.current_index][0])
self.image.set_from_pixbuf(self.imagebuf)
self.caption.set_text("\n" + self.image_list[self.current_index][1])
self.imagenumberlabel.set_text("(%d / %d)\n" % (self.current_index+1, len(self.image_list)))
def get_prev_item(self, button, param):
if self.image_list == []:
if self._source_article and self._source_article.article_title:
self.caption.set_text("This article does not have any images")
else:
self.caption.set_text("Please select a Wikipedia article from the menu above")
self.image.clear()
return
if self.current_index == 0:
self.current_index = len(self.image_list)
self.current_index -= 1
self.imagebuf = GdkPixbuf.Pixbuf.new_from_file(self.image_list[self.current_index][0])
self.image.set_from_pixbuf(self.imagebuf)
self.caption.set_text("\n" + self.image_list[self.current_index][1])
self.imagenumberlabel.set_text("(%d / %d)\n" % (self.current_index+1, len(self.image_list)))
def get_first_item(self):
if self.image_list == []:
if self._source_article and self._source_article.article_title:
self.caption.set_text("This article does not have any images")
else:
self.caption.set_text("Please select a Wikipedia article from the menu above")
self.image.clear()
return
self.current_index = 0
self.imagebuf = GdkPixbuf.Pixbuf.new_from_file(self.image_list[self.current_index][0])
self.image.set_from_pixbuf(self.imagebuf)
self.caption.set_text("\n" + self.image_list[self.current_index][1])
logger.debug("setting text to:")
logger.debug("(%d / %d)\n" %
(self.current_index+1, len(self.image_list)))
self.imagenumberlabel.set_text("(%d / %d)\n" % (self.current_index+1, len(self.image_list)))
def set_image_list(self, image_list):
logger.debug("validagting image list")
self.image_list = _validate_image_list(book.wiki.root, image_list)
logger.debug(self.image_list)
def drag_begin_event(self, widget, context, data):
logging.debug('########### Gallery_View.drag_begin_event called')
self.imagebox.drag_source_set_icon_pixbuf(self.imagebuf)
def drag_data_get_event(self, widget, context, selection_data, info, timestamp, data):
logger.debug('############# Gallery_View.drag_data_get_event')
atom = Gdk.atom_intern("section", only_if_exists=False)
imagedata = Picture_Data(self.source_article_id,
self.image_list[self.current_index][0],
self.image_list[self.current_index][2])
captiondata = Sentence_Data(0, self.source_article_id, 0, 0, 0, self.image_list[self.current_index][1])
paragraph1data = Paragraph_Data(0, self.source_article_id, 0, 0, [imagedata])
paragraph2data = Paragraph_Data(0, self.source_article_id, 0, 0, [captiondata])
sectionsdata = [Section_Data(0, self.source_article_id, 0, [paragraph1data, paragraph2data])]
string = cPickle.dumps(sectionsdata)
selection_data.set(atom, 8, string)
def _validate_image_list(root, image_list):
"""
provides a mechanism for validating image lists and expanding relative paths
@param image_list: list of images to validate
@return: list of images with corrected paths, and broken images removed
"""
for i in xrange(len(image_list)):
if not os.access(image_list[i][0], os.F_OK):
if os.access(os.path.join(root, image_list[i][0]), os.F_OK):
image_list[i] = (os.path.join(root, image_list[i][0]),
image_list[i][1], image_list[i][2])
else:
image = None
#removing during for loop was unreliable
while None in image_list:
image_list.remove(None)
return image_list
| gpl-2.0 | -250,266,366,728,502,240 | 45.065217 | 111 | 0.600577 | false |
mrchristine/dbc-notebooks | tools/pyspark_sync/workspace.py | 1 | 12134 | import base64
import argparse
import json
import requests
import sys
import os
import fnmatch
WS_LIST = "/workspace/list"
WS_STATUS = "/workspace/get-status"
WS_MKDIRS = "/workspace/mkdirs"
WS_IMPORT = "/workspace/import"
WS_EXPORT = "/workspace/export"
LS_ZONES = "/clusters/list-zones"
error_401 = """
Credentials are incorrect. Please verify the credentials passed into the APIs.
If using SSO, log out of the Databricks environment.
1. Click on the Admin login page
2. Enter your e-mail
3. Click 'Forgot my Password'
This will create a new password for you to use against the REST API. This should **not** be your SSO password
"""
class WorkspaceClient:
"""A class to define wrappers for the REST API"""
def __init__(self, host="https://myenv.cloud.databricks.com", user="admin", pwd="fakePassword", is_shared=False):
self.user = user
self.pwd = pwd
self.creds = (user, pwd)
self.host = host
self.is_shared = is_shared
self.url = host.rstrip('/') + '/api/2.0'
def get(self, endpoint, json_params={}, print_json=False):
url = self.url + endpoint
if json_params:
raw_results = requests.get(url, auth=self.creds, params=json_params)
else:
raw_results = requests.get(url, auth=self.creds)
if raw_results.status_code == 401:
print(error_401)
raise ValueError("Unauthorized error")
results = raw_results.json()
if print_json:
print(json.dumps(results, indent=4, sort_keys=True))
return results
def post(self, endpoint, json_params={}, print_json=True):
url = self.url + endpoint
if json_params:
raw_results = requests.post(url, auth=self.creds, json=json_params)
results = raw_results.json()
else:
print("Must have a payload in json_args param.")
return {}
if print_json:
print(json.dumps(results, indent=4, sort_keys=True))
# if results are empty, let's return the return status
if results:
results['http_status_code'] = raw_results.status_code
return results
else:
return {'http_status_code': raw_results.status_code}
@staticmethod
def my_map(F, items):
to_return = []
for elem in items:
to_return.append(F(elem))
return to_return
def is_file(self, path):
""" Checks if the file is a notebook or folder in Databricks"""
status = {'path': path}
resp = self.get(WS_STATUS, json_params=status)
if resp.get('error_code', None):
print(resp)
raise NameError('File does not exist in Databricks workspace.')
print("Is the path a file or folder: ")
print(resp)
if resp['object_type'] == 'DIRECTORY':
return False
return True
def get_full_path(self, in_path):
""" Get the full path of the Databricks workspace
User's can provide the relative path to push / pull from Databricks"""
path = in_path.lstrip('[\"\']').rstrip('[\"\']')
if path[0] == '/':
# return path is absolute so return here
return path
elif path[0] == '.':
full_path = '/Users/' + self.user.strip() + path[1:]
return full_path
elif str.isalnum(path[0]):
full_path = '/Users/' + self.user.strip() + '/' + path
return full_path
else:
raise ValueError('Path should start with . for relative paths or / for absolute.')
def save_single_notebook(self, fullpath):
""" Saves a single notebook from Databricks to the local directory"""
get_args = {'path': fullpath}
resp = self.get(WS_EXPORT, get_args)
# grab the relative path from the constructed full path
# this code chops of the /Users/[email protected]/ to create a local reference
save_filename = '/'.join(fullpath.split('/')[3:]) + '.' + resp['file_type']
if self.is_shared:
save_filename = self.user.split("@")[0] + '/' + save_filename
save_path = os.path.dirname(save_filename)
print("Local path to save: " + save_path)
print("Saving file in local path: " + save_filename)
# If the local path doesn't exist,we create it before we save the contents
if not os.path.exists(save_path) and save_path:
os.makedirs(save_path)
with open(save_filename, "wb") as f:
f.write(base64.b64decode(resp['content']))
def get_all_notebooks(self, fullpath):
""" Recursively list all notebooks within the folder"""
get_args = {'path': fullpath}
items = self.get(WS_LIST, get_args)['objects']
folders = list(self.my_map(lambda y: y.get('path', None),
filter(lambda x: x.get('object_type', None) == 'DIRECTORY', items)))
notebooks = list(self.my_map(lambda y: y.get('path', None),
filter(lambda x: x.get('object_type', None) == 'NOTEBOOK', items)))
print('DIRECTORIES: ' + str(folders))
print('NOTEBOOKS: ' + str(notebooks))
if folders == [] and notebooks == []:
print('Folder does not contain any notebooks')
return []
# save the notebooks with the current method
if notebooks:
self.my_map(lambda y: self.save_single_notebook(y), notebooks)
if folders:
nested_list_notebooks = list(self.my_map(lambda y: self.get_all_notebooks(y), folders))
flatten_list = [item for sublist in nested_list_notebooks for item in sublist]
return notebooks + flatten_list
return notebooks
def save_folder(self, fullpath):
""" We will save the notebooks within the paths, and exclude Library links """
list_of_notebooks = self.get_all_notebooks(fullpath)
return list_of_notebooks
# Run map of save_single_notebook across list of notebooks
def pull(self, path):
# get_args = "/Users/[email protected]/demo/reddit/Reddit SQL Analysis"
cur_path = self.get_full_path(path)
# pull the file or archive
if self.is_file(cur_path):
self.save_single_notebook(cur_path)
else:
self.save_folder(cur_path)
@staticmethod
def _parse_extension(src_path):
supported = ['scala', 'py', 'r', 'sql']
ext = src_path.split('.')[-1]
if ext == 'scala':
return {'language': 'SCALA'}
elif ext == 'py':
return {'language': 'PYTHON'}
elif ext == 'ipynb':
return {'format': 'JUPYTER'}
elif ext == 'r':
return {'language': 'R'}
elif ext == 'sql':
return {'language': 'SQL'}
elif ext == 'txt':
return {'language': 'SQL'}
else:
raise ValueError('Unsupported file format: %s. Supported formats are: ' % ext +
'[%s].' % ', '.join(supported))
def push_file(self, local_path, dst_folder = None):
"""Push a single file to DBC
This assumes the local path matches the Databricks workspace"""
# get the databricks path using the users hostname
if self.is_shared:
username = self.user.split('@')[0]
tmp_path = '/Users/' + self.user.strip() + '/' + local_path.lstrip('./').replace(username + '/', "")
elif dst_folder:
tmp_path = '/Users/' + self.user.strip() + '/' + dst_folder.replace('/', '') + '/' + local_path.lstrip('./')
else:
tmp_path = '/Users/' + self.user.strip() + '/' + local_path.lstrip('./')
overwrite = True
dirname = os.path.dirname(tmp_path)
dbc_path, file_ext = os.path.splitext(tmp_path)
data = open(local_path, 'r').read()
create_notebook = {
"path": dbc_path,
"content": base64.b64encode(data.encode('utf-8')).decode(),
"overwrite": overwrite
}
create_notebook.update(self._parse_extension(local_path))
# create a folder, if exists then it succeeds as well
folder_resp = self.post(WS_MKDIRS, {'path': dirname}, False)
# import the notebook
resp = self.post(WS_IMPORT, create_notebook, False)
print("Push Notebook: " + dbc_path)
print(resp)
@staticmethod
def find_all_file_paths(local_dir):
matches = []
supported = ['scala', 'py', 'r', 'sql']
for root, dirnames, filenames in os.walk(local_dir):
for ext in supported:
for filename in fnmatch.filter(filenames, '*.' + ext):
matches.append(os.path.join(root, filename))
return matches
def push_folder(self, local_path):
""" Find all source files first, grab all the folders, batch create folders, push notebooks"""
file_list = self.find_all_file_paths(local_path)
cwd = os.getcwd()
file_list_rel_path = list(self.my_map(lambda x: x.replace(cwd, "."), file_list))
for fname in file_list_rel_path:
self.push_file(fname)
return file_list_rel_path
def push(self, path):
if path[0] == '/':
raise ValueError("Path should be relative to your git repo home dir and start with ./ or with folder name")
if os.path.isfile(path):
self.push_file(path)
else:
self.push_folder(path)
if __name__ == '__main__':
debug = False
parser = argparse.ArgumentParser(description="""
Sync Databricks workspace to/from local directory for git support.
e.g.
$ python workspaces.py pull demo/reddit/
$ python workspaces.py push demo/reddit/
Or
$ python workspaces.py pull --host='https://myenv.cloud.databricks.com/ [email protected] --password=HAHAHA
I personally use the environment variables to store this information
DBC_HOST
DBC_USERNAME
DBC_PASSWORD
DBC_SHARED
DBC_SHARED is set to true if the single repo needs to host multiple home directories.
It creates a local directory from the users e-mail
""")
# subparser for mutually exclusive arguments
sp = parser.add_subparsers(dest='action')
sp_push = sp.add_parser('push', help='Push path to Databricks workspace')
sp_pull = sp.add_parser('pull', help='Pull workspace from Databricks to local directory')
parser.add_argument('--user', dest='user', help='Username for the Databricks env')
parser.add_argument('--password', dest='password', help='Password for the Databricks env')
parser.add_argument('--host', dest='host', help='Password for the Databricks env')
parser.add_argument('--shared', dest='shared', action='store_true',
help='Boolean to notify if this is a \
shared repo to add a username prefix to the directories')
parser.add_argument('path', type=str,
help='The path/directory in Databricks or locally to sync')
args = parser.parse_args()
# the arguments
user = args.user
host = args.host
password = args.password
is_shared = args.shared
if not host:
host = os.environ.get('DBC_HOST')
if not user:
user = os.environ.get('DBC_USERNAME')
if not password:
password = os.environ.get('DBC_PASSWORD')
if not is_shared:
is_shared = bool(os.environ.get('DBC_SHARED'))
helper = WorkspaceClient(host, user, password, is_shared)
if debug:
print("ACTION IS: " + args.action)
print("PATH IS: " + args.path)
print("USER IS: " + user)
print("PASS IS: " + "I_DONT_PRINT_PASSWORDS")
print("HOST IS: " + host)
if args.path is None:
print("Need path")
exit(0)
else:
input_path = args.path
if args.action.lower() == "push":
helper.push(input_path)
elif args.action.lower() == "pull":
helper.pull(input_path)
else:
print("Push / pull are only supported as the action.")
| apache-2.0 | -2,087,793,057,794,017,300 | 39.178808 | 120 | 0.589171 | false |
tkanemoto/unittest-xml-reporting | xmlrunner/result.py | 1 | 18019 |
import os
import sys
import time
import traceback
import six
import re
from os import path
from six import unichr
from six.moves import StringIO
from .unittest import TestResult, _TextTestResult
# Matches invalid XML1.0 unicode characters, like control characters:
# http://www.w3.org/TR/2006/REC-xml-20060816/#charsets
# http://stackoverflow.com/questions/1707890/fast-way-to-filter-illegal-xml-unicode-chars-in-python
_illegal_unichrs = [
(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F),
(0x7F, 0x84), (0x86, 0x9F),
(0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF),
]
if sys.maxunicode >= 0x10000: # not narrow build
_illegal_unichrs.extend([
(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF),
(0x3FFFE, 0x3FFFF), (0x4FFFE, 0x4FFFF),
(0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),
(0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF),
(0x9FFFE, 0x9FFFF), (0xAFFFE, 0xAFFFF),
(0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),
(0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF),
(0xFFFFE, 0xFFFFF), (0x10FFFE, 0x10FFFF),
])
_illegal_ranges = [
"%s-%s" % (unichr(low), unichr(high))
for (low, high) in _illegal_unichrs
]
INVALID_XML_1_0_UNICODE_RE = re.compile(u'[%s]' % u''.join(_illegal_ranges))
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
def xml_safe_unicode(base, encoding='utf-8'):
"""Return a unicode string containing only valid XML characters.
encoding - if base is a byte string it is first decoded to unicode
using this encoding.
"""
if isinstance(base, six.binary_type):
base = base.decode(encoding)
return INVALID_XML_1_0_UNICODE_RE.sub('', base)
def to_unicode(data):
"""Returns unicode in Python2 and str in Python3"""
if six.PY3:
return six.text_type(data)
try:
# Try utf8
return six.text_type(data)
except UnicodeDecodeError:
return repr(data).decode('utf8', 'replace')
def safe_unicode(data, encoding=None):
return xml_safe_unicode(to_unicode(data), encoding)
def testcase_name(test_method):
testcase = type(test_method)
# Ignore module name if it is '__main__'
module = testcase.__module__ + '.'
if module == '__main__.':
module = ''
result = module + testcase.__name__
return result
class _TestInfo(object):
"""
This class keeps useful information about the execution of a
test method.
"""
# Possible test outcomes
(SUCCESS, FAILURE, ERROR, SKIP) = range(4)
def __init__(self, test_result, test_method, outcome=SUCCESS, err=None, subTest=None):
self.test_result = test_result
self.outcome = outcome
self.elapsed_time = 0
self.err = err
self.stdout = test_result._stdout_data
self.stderr = test_result._stderr_data
self.test_description = self.test_result.getDescription(test_method)
self.test_exception_info = (
'' if outcome in (self.SUCCESS, self.SKIP)
else self.test_result._exc_info_to_string(
self.err, test_method)
)
self.test_name = testcase_name(test_method)
self.test_id = test_method.id()
if subTest:
self.test_id = subTest.id()
def id(self):
return self.test_id
def test_finished(self):
"""Save info that can only be calculated once a test has run.
"""
self.elapsed_time = \
self.test_result.stop_time - self.test_result.start_time
def get_description(self):
"""
Return a text representation of the test method.
"""
return self.test_description
def get_error_info(self):
"""
Return a text representation of an exception thrown by a test
method.
"""
return self.test_exception_info
class _XMLTestResult(_TextTestResult):
"""
A test result class that can express test results in a XML report.
Used by XMLTestRunner.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
elapsed_times=True, properties=None):
_TextTestResult.__init__(self, stream, descriptions, verbosity)
self.buffer = True # we are capturing test output
self._stdout_data = None
self._stderr_data = None
self.successes = []
self.callback = None
self.elapsed_times = elapsed_times
self.properties = None # junit testsuite properties
def _prepare_callback(self, test_info, target_list, verbose_str,
short_str):
"""
Appends a _TestInfo to the given target list and sets a callback
method to be called by stopTest method.
"""
target_list.append(test_info)
def callback():
"""Prints the test method outcome to the stream, as well as
the elapsed time.
"""
test_info.test_finished()
# Ignore the elapsed times for a more reliable unit testing
if not self.elapsed_times:
self.start_time = self.stop_time = 0
if self.showAll:
self.stream.writeln(
'%s (%.3fs)' % (verbose_str, test_info.elapsed_time)
)
elif self.dots:
self.stream.write(short_str)
self.callback = callback
def startTest(self, test):
"""
Called before execute each test method.
"""
self.start_time = time.time()
TestResult.startTest(self, test)
if self.showAll:
self.stream.write(' ' + self.getDescription(test))
self.stream.write(" ... ")
def _save_output_data(self):
# Only try to get sys.stdout and sys.sterr as they not be
# StringIO yet, e.g. when test fails during __call__
try:
self._stdout_data = sys.stdout.getvalue()
self._stderr_data = sys.stderr.getvalue()
except AttributeError:
pass
def stopTest(self, test):
"""
Called after execute each test method.
"""
self._save_output_data()
# self._stdout_data = sys.stdout.getvalue()
# self._stderr_data = sys.stderr.getvalue()
_TextTestResult.stopTest(self, test)
self.stop_time = time.time()
if self.callback and callable(self.callback):
self.callback()
self.callback = None
def addSuccess(self, test):
"""
Called when a test executes successfully.
"""
self._save_output_data()
self._prepare_callback(
_TestInfo(self, test), self.successes, 'OK', '.'
)
def addFailure(self, test, err):
"""
Called when a test method fails.
"""
self._save_output_data()
testinfo = _TestInfo(self, test, _TestInfo.FAILURE, err)
self.failures.append((
testinfo,
self._exc_info_to_string(err, test)
))
self._prepare_callback(testinfo, [], 'FAIL', 'F')
def addError(self, test, err):
"""
Called when a test method raises an error.
"""
self._save_output_data()
testinfo = _TestInfo(self, test, _TestInfo.ERROR, err)
self.errors.append((
testinfo,
self._exc_info_to_string(err, test)
))
self._prepare_callback(testinfo, [], 'ERROR', 'E')
def addSubTest(self, testcase, test, err):
"""
Called when a subTest method raises an error.
"""
if err is not None:
self._save_output_data()
testinfo = _TestInfo(self, testcase, _TestInfo.ERROR, err, subTest=test)
self.errors.append((
testinfo,
self._exc_info_to_string(err, testcase)
))
self._prepare_callback(testinfo, [], 'ERROR', 'E')
def addSkip(self, test, reason):
"""
Called when a test method was skipped.
"""
self._save_output_data()
testinfo = _TestInfo(self, test, _TestInfo.SKIP, reason)
self.skipped.append((testinfo, reason))
self._prepare_callback(testinfo, [], 'SKIP', 'S')
def printErrorList(self, flavour, errors):
"""
Writes information about the FAIL or ERROR to the stream.
"""
for test_info, error in errors:
self.stream.writeln(self.separator1)
self.stream.writeln(
'%s [%.3fs]: %s' % (flavour, test_info.elapsed_time,
test_info.get_description())
)
self.stream.writeln(self.separator2)
self.stream.writeln('%s' % test_info.get_error_info())
def _get_info_by_testcase(self):
"""
Organizes test results by TestCase module. This information is
used during the report generation, where a XML report will be created
for each TestCase.
"""
tests_by_testcase = {}
for tests in (self.successes, self.failures, self.errors,
self.skipped):
for test_info in tests:
if isinstance(test_info, tuple):
# This is a skipped, error or a failure test case
test_info = test_info[0]
testcase_name = test_info.test_name
if testcase_name not in tests_by_testcase:
tests_by_testcase[testcase_name] = []
tests_by_testcase[testcase_name].append(test_info)
return tests_by_testcase
def _report_testsuite_properties(xml_testsuite, xml_document, properties):
xml_properties = xml_document.createElement('properties')
xml_testsuite.appendChild(xml_properties)
if properties:
for key, value in properties.items():
prop = xml_document.createElement('property')
prop.setAttribute('name', str(key))
prop.setAttribute('value', str(value))
xml_properties.appendChild(prop)
return xml_properties
_report_testsuite_properties = staticmethod(_report_testsuite_properties)
def _report_testsuite(suite_name, tests, xml_document, parentElement,
properties):
"""
Appends the testsuite section to the XML document.
"""
testsuite = xml_document.createElement('testsuite')
parentElement.appendChild(testsuite)
testsuite.setAttribute('name', suite_name)
testsuite.setAttribute('tests', str(len(tests)))
testsuite.setAttribute(
'time', '%.3f' % sum(map(lambda e: e.elapsed_time, tests))
)
failures = filter(lambda e: e.outcome == _TestInfo.FAILURE, tests)
testsuite.setAttribute('failures', str(len(list(failures))))
errors = filter(lambda e: e.outcome == _TestInfo.ERROR, tests)
testsuite.setAttribute('errors', str(len(list(errors))))
_XMLTestResult._report_testsuite_properties(
testsuite, xml_document, properties)
systemout = xml_document.createElement('system-out')
testsuite.appendChild(systemout)
stdout = StringIO()
for test in tests:
# Merge the stdout from the tests in a class
if test.stdout is not None:
stdout.write(test.stdout)
_XMLTestResult._createCDATAsections(
xml_document, systemout, stdout.getvalue())
systemerr = xml_document.createElement('system-err')
testsuite.appendChild(systemerr)
stderr = StringIO()
for test in tests:
# Merge the stderr from the tests in a class
if test.stderr is not None:
stderr.write(test.stderr)
_XMLTestResult._createCDATAsections(
xml_document, systemerr, stderr.getvalue())
return testsuite
_report_testsuite = staticmethod(_report_testsuite)
def _test_method_name(test_id):
"""
Returns the test method name.
"""
return test_id.split('.')[-1]
_test_method_name = staticmethod(_test_method_name)
def _createCDATAsections(xmldoc, node, text):
text = safe_unicode(text)
pos = text.find(']]>')
while pos >= 0:
tmp = text[0:pos+2]
cdata = xmldoc.createCDATASection(tmp)
node.appendChild(cdata)
text = text[pos+2:]
pos = text.find(']]>')
cdata = xmldoc.createCDATASection(text)
node.appendChild(cdata)
_createCDATAsections = staticmethod(_createCDATAsections)
def _report_testcase(suite_name, test_result, xml_testsuite, xml_document):
"""
Appends a testcase section to the XML document.
"""
testcase = xml_document.createElement('testcase')
xml_testsuite.appendChild(testcase)
testcase.setAttribute('classname', suite_name)
testcase.setAttribute(
'name', _XMLTestResult._test_method_name(test_result.test_id)
)
testcase.setAttribute('time', '%.3f' % test_result.elapsed_time)
if (test_result.outcome != _TestInfo.SUCCESS):
elem_name = ('failure', 'error', 'skipped')[test_result.outcome-1]
failure = xml_document.createElement(elem_name)
testcase.appendChild(failure)
if test_result.outcome != _TestInfo.SKIP:
failure.setAttribute(
'type',
safe_unicode(test_result.err[0].__name__)
)
failure.setAttribute(
'message',
safe_unicode(test_result.err[1])
)
error_info = safe_unicode(test_result.get_error_info())
_XMLTestResult._createCDATAsections(
xml_document, failure, error_info)
else:
failure.setAttribute('type', 'skip')
failure.setAttribute('message', safe_unicode(test_result.err))
_report_testcase = staticmethod(_report_testcase)
def generate_reports(self, test_runner):
"""
Generates the XML reports to a given XMLTestRunner object.
"""
from xml.dom.minidom import Document
all_results = self._get_info_by_testcase()
outputHandledAsString = \
isinstance(test_runner.output, six.string_types)
if (outputHandledAsString and not os.path.exists(test_runner.output)):
os.makedirs(test_runner.output)
if not outputHandledAsString:
doc = Document()
testsuite = doc.createElement('testsuites')
doc.appendChild(testsuite)
parentElement = testsuite
for suite, tests in all_results.items():
if outputHandledAsString:
doc = Document()
parentElement = doc
suite_name = suite
if test_runner.outsuffix:
# not checking with 'is not None', empty means no suffix.
suite_name = '%s-%s' % (suite, test_runner.outsuffix)
# Build the XML file
testsuite = _XMLTestResult._report_testsuite(
suite_name, tests, doc, parentElement, self.properties
)
for test in tests:
_XMLTestResult._report_testcase(suite, test, testsuite, doc)
xml_content = doc.toprettyxml(
indent='\t',
encoding=test_runner.encoding
)
if outputHandledAsString:
filename = path.join(
test_runner.output,
'TEST-%s.xml' % suite_name)
with open(filename, 'wb') as report_file:
report_file.write(xml_content)
if not outputHandledAsString:
# Assume that test_runner.output is a stream
test_runner.output.write(xml_content)
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
if six.PY3:
# It works fine in python 3
try:
return super(_XMLTestResult, self)._exc_info_to_string(
err, test)
except AttributeError:
# We keep going using the legacy python <= 2 way
pass
# This comes directly from python2 unittest
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
# Only try to get sys.stdout and sys.sterr as they not be
# StringIO yet, e.g. when test fails during __call__
try:
output = sys.stdout.getvalue()
except AttributeError:
output = None
try:
error = sys.stderr.getvalue()
except AttributeError:
error = None
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
# This is the extra magic to make sure all lines are str
encoding = getattr(sys.stdout, 'encoding', 'utf-8')
lines = []
for line in msgLines:
if not isinstance(line, str):
# utf8 shouldnt be hard-coded, but not sure f
line = line.encode(encoding)
lines.append(line)
return ''.join(lines)
| bsd-2-clause | 3,852,937,358,608,100,400 | 33.191651 | 99 | 0.571896 | false |
googleads/google-ads-python | google/ads/googleads/v8/services/services/domain_category_service/transports/base.py | 1 | 3692 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v8.resources.types import domain_category
from google.ads.googleads.v8.services.types import domain_category_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class DomainCategoryServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for DomainCategoryService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_domain_category: gapic_v1.method.wrap_method(
self.get_domain_category,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_domain_category(
self,
) -> typing.Callable[
[domain_category_service.GetDomainCategoryRequest],
domain_category.DomainCategory,
]:
raise NotImplementedError
__all__ = ("DomainCategoryServiceTransport",)
| apache-2.0 | -2,947,684,209,471,778,000 | 35.92 | 78 | 0.660347 | false |
ask/kamqp | kamqp/client_0_8/basic_message.py | 1 | 3707 | # Copyright (C) 2007-2008 Barry Pederson <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import
from .serialization import GenericContent
__all__ = ["Message"]
class Message(GenericContent):
"""A Message for use with the ``Channnel.basic_*`` methods.
:param body: string
:param children: (not supported)
Keyword properties may include:
:keyword content_type: shortstr
MIME content type
:keyword content_encoding: shortstr
MIME content encoding
:keyword application_headers: table
Message header field table, a dict with string keys,
and string | int | Decimal | datetime | dict values.
:keyword delivery_mode: octet
Non-persistent (1) or persistent (2)
:keyword priority: octet
The message priority, 0 to 9
:keyword correlation_id: shortstr
The application correlation identifier
:keyword reply_to: shortstr
The destination to reply to
:keyword expiration: shortstr
Message expiration specification
:keyword message_id: shortstr
The application message identifier
:keyword timestamp: datetime.datetime
The message timestamp
:keyword type: shortstr
The message type name
:keyword user_id: shortstr
The creating user id
:keyword app_id: shortstr
The creating application id
:keyword cluster_id: shortstr
Intra-cluster routing identifier
Unicode bodies are encoded according to the ``content_encoding``
argument. If that's None, it's set to 'UTF-8' automatically.
*Example*:
.. code-block:: python
msg = Message('hello world',
content_type='text/plain',
application_headers={'foo': 7})
"""
#: Instances of this class have these attributes, which
#: are passed back and forth as message properties between
#: client and server
PROPERTIES = [
("content_type", "shortstr"),
("content_encoding", "shortstr"),
("application_headers", "table"),
("delivery_mode", "octet"),
("priority", "octet"),
("correlation_id", "shortstr"),
("reply_to", "shortstr"),
("expiration", "shortstr"),
("message_id", "shortstr"),
("timestamp", "timestamp"),
("type", "shortstr"),
("user_id", "shortstr"),
("app_id", "shortstr"),
("cluster_id", "shortstr")]
def __init__(self, body='', children=None, **properties):
super(Message, self).__init__(**properties)
self.body = body
def __eq__(self, other):
"""Check if the properties and bodies of this message and another
message are the same.
Received messages may contain a :attr:`delivery_info` attribute,
which isn't compared.
"""
return (super(Message, self).__eq__(other) and
hasattr(other, 'body') and
self.body == other.body)
| lgpl-2.1 | -81,401,606,623,979,250 | 29.385246 | 75 | 0.642029 | false |
openearth/aeolis-python | aeolis/gridparams.py | 1 | 6901 | '''This file is part of AeoLiS.
AeoLiS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
AeoLiS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with AeoLiS. If not, see <http://www.gnu.org/licenses/>.
AeoLiS Copyright (C) 2015 Bas Hoonhout
[email protected] [email protected]
Deltares Delft University of Technology
Unit of Hydraulic Engineering Faculty of Civil Engineering and Geosciences
Boussinesqweg 1 Stevinweg 1
2629 HVDelft 2628CN Delft
The Netherlands The Netherlands
'''
from __future__ import absolute_import, division
import logging
import numpy as np
# package modules
#from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def initialize(s, p):
'''EXPLAIN WHAT HAPPENS IN THIS FUNCTION?
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
# initialize x-dimensions
s['x'][:,:] = p['xgrid_file']
# World coordinates of z-points
s['xz'][:,:] = s['x'][:,:]
# World coordinates of u-points
s['xu'][:,1:] = 0.5 * (s['xz'][:,:-1] + s['xz'][:,1:])
s['xu'][:,0] = 1.5 * s['xz'][:,0] - 0.5 * s['xz'][:,1]
# World coordinates of v-points
s['xv'][1:,:] = 0.5 * (s['xz'][:-1,:] + s['xz'][1:,:])
s['xv'][0,:] = 1.5 * s['xz'][0,:] - 0.5 * s['xz'][1,:]
# World coordinates of c-points
s['xc'][1:,1:] = 0.25 *(s['xz'][:-1,:-1] + s['xz'][:-1,1:] + s['xz'][1:,:-1] + s['xz'][1:,1:])
s['xc'][1:,0] = 0.5 * (s['xu'][:-1,0] + s['xu'][1:,0])
s['xc'][0,1:] = 0.5 * (s['xv'][0,:-1] + s['xv'][0,1:])
s['xc'][0,0] = s['xu'][0,0]
# initialize y-dimension
ny = p['ny']
if ny == 0:
s['y'][:,:] = 0.
s['yz'][:,:] = 0.
s['yu'][:,:] = 0.
s['yv'][:,:] = 0.
s['dnz'][:,:] = 1.
s['dnu'][:,:] = 1.
s['dnv'][:,:] = 1.
s['dnc'][:,:] = 1.
s['alfaz'][:,:] = 0.
else:
# initialize y-dimensions
s['y'][:,:] = p['ygrid_file']
# World coordinates of z-points
s['yz'][:,:] = s['y'][:,:] # Different from XBeach
# World coordinates of u-points
s['yu'][:,1:] = 0.5 * (s['yz'][:,:-1] + s['yz'][:,1:])
s['yu'][:,0] = 1.5 * s['yz'][:,0] - 0.5 * s['yz'][:,1]
# World coordinates of v-points
s['yv'][1:,:] = 0.5 * (s['yz'][:-1,:] + s['yz'][1:,:])
s['yv'][0,:] = 1.5 * s['yz'][0,:] - 0.5 * s['yz'][1,:]
# World coordinates of c-points
s['yc'][1:,1:] = 0.25 *(s['yz'][:-1,:-1] + s['yz'][:-1,1:] + s['yz'][1:,:-1] + s['yz'][1:,1:])
s['yc'][0,1:] = 0.5 * (s['yv'][0,:-1] + s['yv'][0,1:])
s['yc'][1:,0] = 0.5 * (s['yu'][:-1,0] + s['yu'][1:,0])
s['yc'][0,0] = s['yv'][0,0]
# Distances in n-direction
s['dnz'][:-1,:] = ((s['yv'][:-1,:]-s['yv'][1:,:])**2.+(s['xv'][:-1,:]-s['xv'][1:,:])**2.)**0.5
s['dnu'][1:,:] = ((s['xc'][:-1,:]-s['xc'][1:,:])**2.+(s['yc'][:-1,:]-s['yc'][1:,:])**2.)**0.5
s['dnv'][1:,:] = ((s['xz'][:-1,:]-s['xz'][1:,:])**2.+(s['yz'][:-1,:]-s['yz'][1:,:])**2.)**0.5
s['dnc'][1:,:] = ((s['xu'][:-1,:]-s['xu'][1:,:])**2.+(s['yu'][:-1,:]-s['yu'][1:,:])**2.)**0.5
s['dnz'][-1,:] = s['dnz'][-2,:]
s['dnu'][0,:] = s['dnu'][1,:]
s['dnv'][0,:] = s['dnv'][1,:]
s['dnc'][0,:] = s['dnc'][1,:]
# Distances in s-direction
s['dsz'][:,:-1] = ((s['xu'][:,:-1]-s['xu'][:,1:])**2.+(s['yu'][:,:-1]-s['yu'][:,1:])**2.)**0.5
s['dsu'][:,1:] = ((s['xz'][:,:-1]-s['xz'][:,1:])**2.+(s['yz'][:,:-1]-s['yz'][:,1:])**2.)**0.5
s['dsv'][:,1:] = ((s['xc'][:,:-1]-s['xc'][:,1:])**2.+(s['yc'][:,:-1]-s['yc'][:,1:])**2.)**0.5
s['dsc'][:,1:] = ((s['xv'][:,:-1]-s['xv'][:,1:])**2.+(s['yv'][:,:-1]-s['yv'][:,1:])**2.)**0.5
s['dsz'][:,-1] = s['dsz'][:,-2]
s['dsu'][:,0] = s['dsu'][:,1]
s['dsv'][:,0] = s['dsv'][:,1]
s['dsc'][:,0] = s['dsc'][:,1]
# # Distances diagonal in sn-direction (a)
# s['dsnca'][1:,1:] = ((s['xz'][:-1,:-1]-s['xz'][1:,1:])**2.+(s['yz'][:-1,:-1]-s['yz'][1:,1:])**2.)**0.5
# s['dsnca'][0,:] = s['dsnza'][1,:]
# s['dsnca'][:,0] = s['dsnza'][:,1]
# s['dsnca'][0,0] = s['dsnza'][1,1]
#
# # Distances diagonal in sn-direction (a)
# s['dsncb'][1:,1:] = ((s['xz'][:-1,:-1]-s['xz'][1:,1:])**2.+(s['yz'][:-1,:-1]-s['yz'][1:,1:])**2.)**0.5
# s['dsncb'][0,:] = s['dsnzb'][1,:]
# s['dsncb'][:,0] = s['dsnzb'][:,1]
# s['dsncb'][0,0] = s['dsnzb'][1,1]
# Cell areas
# s['dsdnu'][:-1,:-1] = (0.5*(s['dsc'][:-1,:-1]+s['dsc'][1:,:-1])) * (0.5*(s['dnz'][:-1,:-1]+s['dnz'][:-1,1:]))
# s['dsdnv'][:-1,:-1] = (0.5*(s['dsz'][:-1,:-1]+s['dsz'][1:,:-1])) * (0.5*(s['dnc'][:-1,:-1]+s['dnc'][:-1,1:]))
s['dsdnz'][:-1,:-1] = (0.5*(s['dsv'][:-1,:-1]+s['dsv'][1:,:-1])) * (0.5*(s['dnu'][:-1,:-1]+s['dnu'][:-1,1:]))
# s['dsdnu'][:-1,-1] = s['dsdnu'][:-1,-2]
# s['dsdnv'][:-1,-1] = s['dsdnv'][:-1,-2]
s['dsdnz'][:-1,-1] = s['dsdnz'][:-1,-2]
# s['dsdnu'][-1,:] = s['dsdnu'][-2,:]
# s['dsdnv'][-1,:] = s['dsdnv'][-2,:]
s['dsdnz'][-1,:] = s['dsdnz'][-2,:]
# Inverse cell areas
# s['dsdnui'][:,:] = 1. / s['dsdnu']
# s['dsdnvi'][:,:] = 1. / s['dsdnv']
s['dsdnzi'][:,:] = 1. / s['dsdnz']
# Alfaz, grid orientation in z-points
s['alfaz'][:-1,:] = np.arctan2(s['yu'][1:,:] - s['yu'][:-1,:], s['xu'][1:,:] - s['xu'][:-1,:])
s['alfaz'][-1,:] = s['alfaz'][-2,:]
# Alfau, grid orientation in u-points
s['alfau'][1:,:] = np.arctan2(s['yz'][1:,:] - s['yz'][:-1,:], s['xz'][1:,:] - s['xz'][:-1,:])
s['alfau'][0,:] = s['alfau'][1,:]
# Alfav, grid orientation in v-points
s['alfav'][:-1,:] = np.arctan2(s['yc'][1:,:] - s['yc'][:-1,:], s['xc'][1:,:] - s['xc'][:-1,:])
s['alfav'][-1,:] = s['alfav'][-2,:]
# print(np.rad2deg(s['alfaz']))
# print(np.rad2deg(s['alfau']))
# print(np.rad2deg(s['alfav']))
# print(s['sz'][:,:])
# print(s['nz'][:,:])
# print(s['sv'][:,:])
# print(s['sc'][:,:])
# print(s['dsz'][:,:])
# print(s['dsu'][:,:])
# print(s['dsv'][:,:])
# print(s['dsc'][:,:])
# print(s['dsdnz'][:,:])
# print(s['dsdnu'][:,:])
return s | gpl-3.0 | -2,599,561,361,544,794,000 | 34.947917 | 114 | 0.409941 | false |
jpirko/lnst | lnst/Recipes/ENRT/VlansOverTeamRecipe.py | 1 | 6663 | from lnst.Common.Parameters import Param, StrParam
from lnst.Common.IpAddress import ipaddress
from lnst.Controller import HostReq, DeviceReq, RecipeParam
from lnst.Recipes.ENRT.BaseEnrtRecipe import BaseEnrtRecipe
from lnst.Recipes.ENRT.ConfigMixins.OffloadSubConfigMixin import (
OffloadSubConfigMixin)
from lnst.Recipes.ENRT.ConfigMixins.CommonHWSubConfigMixin import (
CommonHWSubConfigMixin)
from lnst.Recipes.ENRT.PingMixins import VlanPingEvaluatorMixin
from lnst.RecipeCommon.Ping.PingEndpoints import PingEndpoints
from lnst.Devices import VlanDevice
from lnst.Devices.VlanDevice import VlanDevice as Vlan
from lnst.Devices import TeamDevice
from lnst.Recipes.ENRT.PingMixins import VlanPingEvaluatorMixin
class VlansOverTeamRecipe(VlanPingEvaluatorMixin,
CommonHWSubConfigMixin, OffloadSubConfigMixin,
BaseEnrtRecipe):
host1 = HostReq()
host1.eth0 = DeviceReq(label="tnet", driver=RecipeParam("driver"))
host1.eth1 = DeviceReq(label="tnet", driver=RecipeParam("driver"))
host2 = HostReq()
host2.eth0 = DeviceReq(label="tnet", driver=RecipeParam("driver"))
offload_combinations = Param(default=(
dict(gro="on", gso="on", tso="on", tx="on"),
dict(gro="off", gso="on", tso="on", tx="on"),
dict(gro="on", gso="off", tso="off", tx="on"),
dict(gro="on", gso="on", tso="off", tx="off")))
runner_name = StrParam(mandatory = True)
def test_wide_configuration(self):
host1, host2 = self.matched.host1, self.matched.host2
#The config argument needs to be used with a team device normally
#(e.g to specify the runner mode), but it is not used here due to
#a bug in the TeamDevice module
host1.team0 = TeamDevice()
for dev in [host1.eth0, host1.eth1]:
dev.down()
host1.team0.slave_add(dev)
host1.vlan0 = VlanDevice(realdev=host1.team0, vlan_id=10)
host1.vlan1 = VlanDevice(realdev=host1.team0, vlan_id=20)
host1.vlan2 = VlanDevice(realdev=host1.team0, vlan_id=30)
host2.vlan0 = VlanDevice(realdev=host2.eth0, vlan_id=10)
host2.vlan1 = VlanDevice(realdev=host2.eth0, vlan_id=20)
host2.vlan2 = VlanDevice(realdev=host2.eth0, vlan_id=30)
configuration = super().test_wide_configuration()
configuration.test_wide_devices = []
for host in [host1, host2]:
configuration.test_wide_devices.extend([host.vlan0,
host.vlan1, host.vlan2])
configuration.test_wide_devices.append(host1.team0)
net_addr = "192.168"
net_addr6 = "fc00:0:0"
for i, host in enumerate([host1, host2]):
host.vlan0.ip_add(ipaddress(net_addr + '.10' + '.' + str(i+1)
+ "/24"))
host.vlan0.ip_add(ipaddress(net_addr6 + ":1::" + str(i+1) +
"/64"))
host.vlan1.ip_add(ipaddress(net_addr + '.20' + '.' + str(i+1)
+ "/24"))
host.vlan1.ip_add(ipaddress(net_addr6 + ":2::" + str(i+1) +
"/64"))
host.vlan2.ip_add(ipaddress(net_addr + '.30' + '.' + str(i+1)
+ "/24"))
host.vlan2.ip_add(ipaddress(net_addr6 + ":3::" + str(i+1) +
"/64"))
for dev in [host1.eth0, host1.eth1, host1.team0, host1.vlan0,
host1.vlan1, host1.vlan2, host2.eth0, host2.vlan0, host2.vlan1,
host2.vlan2]:
dev.up()
self.wait_tentative_ips(configuration.test_wide_devices)
return configuration
def generate_test_wide_description(self, config):
host1, host2 = self.matched.host1, self.matched.host2
desc = super().generate_test_wide_description(config)
desc += [
"\n".join([
"Configured {}.{}.ips = {}".format(
dev.host.hostid, dev.name, dev.ips
)
for dev in config.test_wide_devices if isinstance(dev,
Vlan)
]),
"\n".join([
"Configured {}.{}.vlan_id = {}".format(
dev.host.hostid, dev.name, dev.vlan_id
)
for dev in config.test_wide_devices if isinstance(dev,
Vlan)
]),
"\n".join([
"Configured {}.{}.realdev = {}".format(
dev.host.hostid, dev.name,
'.'.join([dev.host.hostid, dev.realdev.name])
)
for dev in config.test_wide_devices if isinstance(dev,
Vlan)
]),
"Configured {}.{}.slaves = {}".format(
host1.hostid, host1.team0.name,
['.'.join([host1.hostid, slave.name])
for slave in host1.team0.slaves]
),
"Configured {}.{}.runner_name = {}".format(
host1.hostid, host1.team0.name,
host1.team0.config
)
]
return desc
def test_wide_deconfiguration(self, config):
del config.test_wide_devices
super().test_wide_deconfiguration(config)
def generate_ping_endpoints(self, config):
host1, host2 = self.matched.host1, self.matched.host2
result = []
for src in [host1.vlan0, host1.vlan1, host1.vlan2]:
for dst in [host2.vlan0, host2.vlan1, host2.vlan2]:
result += [PingEndpoints(src, dst,
reachable=(src.vlan_id == dst.vlan_id))]
return result
def generate_perf_endpoints(self, config):
return [(self.matched.host1.vlan0, self.matched.host2.vlan0)]
@property
def offload_nics(self):
host1, host2 = self.matched.host1, self.matched.host2
return [host1.eth0, host1.eth1, host2.eth0]
@property
def mtu_hw_config_dev_list(self):
host1, host2 = self.matched.host1, self.matched.host2
result = []
for host in [host1, host2]:
for dev in [host.vlan0, host.vlan1, host.vlan2]:
result.append(dev)
result.extend([host1.team0, host2.eth0])
return result
@property
def coalescing_hw_config_dev_list(self):
host1, host2 = self.matched.host1, self.matched.host2
return [host1.eth0, host1.eth1, host2.eth0]
@property
def dev_interrupt_hw_config_dev_list(self):
host1, host2 = self.matched.host1, self.matched.host2
return [host1.eth0, host1.eth1, host2.eth0]
@property
def parallel_stream_qdisc_hw_config_dev_list(self):
host1, host2 = self.matched.host1, self.matched.host2
return [host1.eth0, host1.eth1, host2.eth0]
| gpl-2.0 | 2,490,923,806,000,048,600 | 38.660714 | 75 | 0.590275 | false |
sony/nnabla | python/src/nnabla/utils/nnp_graph.py | 1 | 10867 | # Copyright 2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import OrderedDict
import os
import weakref
import numpy as np
import itertools
import nnabla as nn
import nnabla.function as F
class NnpNetwork(object):
'''A graph object which is read from nnp file.
An instance of NnpNetwork is usually created by an NnpLoader instance.
See an example usage described in :obj:`NnpLoader`.
Attributes:
variables (dict): A dict of all variables in a created graph
with a variable name as a key, and a nnabla.Variable as a value.
inputs (dict): All input variables.
outputs (dict): All output variables.
'''
def __init__(self, proto_network, batch_size, callback):
proto_network = proto_network.expand_loop_control()
self.proto_network = proto_network.promote(callback)
self.proto_network(batch_size=batch_size)
for k, v in itertools.chain(
self.proto_network.variables.items(), self.proto_network.parameters.items()):
v.variable_instance.name = k
self._inputs = {
i: self.proto_network.variables[i].variable_instance
for i in self.proto_network.inputs
}
self._outputs = {
i: self.proto_network.variables[i].variable_instance
for i in self.proto_network.outputs
}
self._variables = {
k: v.variable_instance
for k, v in itertools.chain(
self.proto_network.variables.items(), self.proto_network.parameters.items())
}
# publish network's parameters to current parameter scope
# like original implementation.
with nn.parameter_scope('', nn.get_current_parameter_scope()):
for k, v in self.proto_network.parameters.items():
nn.parameter.set_parameter(k, v.variable_instance)
@property
def inputs(self):
return self._inputs
@property
def outputs(self):
return self._outputs
@property
def variables(self):
return self._variables
class NnpLoader(object):
'''An NNP file loader.
Args:
filepath : file-like object or filepath.
extension: if filepath is file-like object, extension is one of ".nnp", ".nntxt", ".prototxt".
Example:
.. code-block:: python
from nnabla.utils.nnp_graph import NnpLoader
# Read a .nnp file.
nnp = NnpLoader('/path/to/nnp.nnp')
# Assume a graph `graph_a` is in the nnp file.
net = nnp.get_network(network_name, batch_size=1)
# `x` is an input of the graph.
x = net.inputs['x']
# 'y' is an outputs of the graph.
y = net.outputs['y']
# Set random data as input and perform forward prop.
x.d = np.random.randn(*x.shape)
y.forward(clear_buffer=True)
print('output:', y.d)
'''
def __init__(self, filepath, scope=None, extension=".nntxt"):
# OrderedDict maintains loaded parameters from nnp files.
# The loaded parameters will be copied to the current
# scope when get_network is called.
self._params = scope if scope else OrderedDict()
self.g = nn.graph_def.load(
filepath, parameter_scope=self._params, rng=np.random.RandomState(1223), extension=extension)
self.network_dict = {
name: pn for name, pn in self.g.networks.items()
}
def get_network_names(self):
'''Returns network names available.
'''
return list(self.network_dict.keys())
def get_network(self, name, batch_size=None, callback=None):
'''Create a variable graph given network by name
Returns: NnpNetwork
'''
return NnpNetwork(self.network_dict[name], batch_size, callback=callback)
class NnpNetworkPass(object):
def _no_verbose(self, *a, **kw):
pass
def _verbose(self, *a, **kw):
print(*a, **kw)
def __init__(self, verbose=0):
self._variable_callbacks = {}
self._function_callbacks_by_name = {}
self._function_callbacks_by_type = {}
self._passes_by_name = {}
self._passes_by_type = {}
self._fix_parameters = False
self._use_up_to_variables = set()
self.verbose = self._no_verbose
self.verbose2 = self._no_verbose
if verbose:
self.verbose = self._verbose
if verbose > 1:
self.verbose2 = self._verbose
def on_function_pass_by_name(self, name):
def _on_function_pass_by_name(callback):
def _callback(f, variables, param_scope):
return callback(f, variables, param_scope)
self._passes_by_name[name] = _callback
return _callback
return _on_function_pass_by_name
def on_function_pass_by_type(self, name):
def _on_function_pass_by_type(callback):
def _callback(f, variables, param_scope):
return callback(f, variables, param_scope)
self._passes_by_name[name] = _callback
return _callback
return _on_function_pass_by_type
def on_generate_variable(self, name):
def _on_generate_variable(callback):
def _callback(v):
return callback(v)
self._variable_callbacks[name] = _callback
return _callback
return _on_generate_variable
def on_generate_function_by_name(self, name):
def _on_generate_function_by_name(callback):
def _callback(v):
return callback(v)
self._function_callbacks_by_name[name] = _callback
return _callback
return _on_generate_function_by_name
def on_generate_function_by_type(self, name):
def _on_generate_function_by_type(callback):
def _callback(v):
return callback(v)
self._function_callbacks_by_type[name] = _callback
return _callback
return _on_generate_function_by_type
def drop_function(self, *names):
def callback(f, variables, param_scope):
self.verbose('Pass: Deleting {}.'.format(f.name))
f.disable()
for name in names:
self.on_function_pass_by_name(name)(callback)
def fix_parameters(self):
self._fix_parameters = True
def use_up_to(self, *names):
self._use_up_to_variables.update(set(names))
def remove_and_rewire(self, name, i=0, o=0):
@self.on_function_pass_by_name(name)
def on_dr(f, variables, param_scope):
fi = f.inputs[i]
fo = f.outputs[o]
self.verbose('Removing {} and rewire input={} and output={}.'.format(
f.name, fi.name, fo.name))
fo.rewire_on(fi)
# Use input name
fo.proto.name = fi.name
def set_variable(self, name, input_var):
@self.on_generate_variable(name)
def on_input_x(v):
self.verbose('Replace {} by {}.'.format(name, input_var))
v.proto.shape.dim[:] = input_var.shape
v.variable = input_var
input_var.name = v.name
return v
def force_average_pooling_global(self, name, by_type=False):
dec = self.on_generate_function_by_name
if by_type:
dec = self.on_generate_function_by_type
@dec(name)
def on_avgpool(f):
pool_shape = f.inputs[0].proto.shape.dim[2:]
self.verbose('Change strides of {} to {}.'.format(
f.name, pool_shape))
p = f.proto.average_pooling_param
p.kernel.dim[:] = pool_shape
p.stride.dim[:] = pool_shape
return f
def check_average_pooling_global(self, name, by_type=False):
dec = self.on_generate_function_by_name
if by_type:
dec = self.on_generate_function_by_type
@dec(name)
def on_avgpool_check(f):
pool_shape = f.inputs[0].proto.shape.dim[2:]
p = f.proto.average_pooling_param
if p.kernel.dim[:] != pool_shape or p.stride.dim[:] != pool_shape:
raise ValueError(
'Stride configuration of average pooling is not for global pooling.'
' Given Image shape is {}, whereas pooling window size is {} and its stride is {}.'
' Consider using force_global_pooling=True'.format(
pool_shape, p.kernel.dim[:], p.stride.dim[:]))
return f
def set_batch_normalization_batch_stat_all(self, batch_stat):
@self.on_generate_function_by_type('BatchNormalization')
def on_bn(f):
self.verbose('Setting batch_stat={} at {}.'.format(
batch_stat, f.name))
p = f.proto.batch_normalization_param
p.batch_stat = batch_stat
return f
def _apply_function_pass_by_name(self, f, variables, param_scope):
if f.name not in self._passes_by_name:
return f
return self._passes_by_name[f.name](f, variables, param_scope)
def _apply_function_pass_by_type(self, f, variables, param_scope):
if f.proto.type not in self._passes_by_type:
return f
return self._passes_by_type[f.proto.type](f, variables, param_scope)
def _apply_generate_variable(self, v):
if v.name in self._variable_callbacks:
v = self._variable_callbacks[v.name](v)
if self._fix_parameters:
v.need_grad = False
return v
def _apply_generate_function_by_name(self, f):
if f.name not in self._function_callbacks_by_name:
return f
return self._function_callbacks_by_name[f.name](f)
def _apply_generate_function_by_type(self, f):
if f.proto.type not in self._function_callbacks_by_type:
return f
return self._function_callbacks_by_type[f.proto.type](f)
def _apply_use_up_to(self, variables):
for v in variables:
if v.name in self._use_up_to_variables:
self.verbose('Stopping at {}.'.format(v.name))
v.stop = True
| apache-2.0 | 1,661,325,578,871,901,400 | 34.168285 | 105 | 0.594092 | false |
bwohlberg/sporco | tests/admm/test_tvl2.py | 1 | 6275 | from __future__ import division
from builtins import object
from past.utils import old_div
import numpy as np
from sporco.admm import tvl2
import sporco.metric as sm
class TestSet01(object):
def setup_method(self, method):
np.random.seed(12345)
self.D = np.random.randn(16, 15)
self.Dc = np.random.randn(16, 15) + 1j * np.random.randn(16, 15)
def test_01(self):
lmbda = 3
try:
b = tvl2.TVL2Denoise(self.D, lmbda)
b.solve()
except Exception as e:
print(e)
assert 0
def test_01cplx(self):
lmbda = 3
try:
b = tvl2.TVL2Denoise(self.Dc, lmbda)
b.solve()
except Exception as e:
print(e)
assert 0
def test_02(self):
lmbda = 3
try:
b = tvl2.TVL2Deconv(np.ones((1, 1)), self.D, lmbda)
b.solve()
except Exception as e:
print(e)
assert 0
def test_02cplx(self):
lmbda = 3
try:
b = tvl2.TVL2Deconv(np.ones((1, 1)), self.Dc, lmbda)
b.solve()
except Exception as e:
print(e)
assert 0
def test_03(self):
lmbda = 3
dt = np.float16
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True}, 'DataType': dt})
b = tvl2.TVL2Denoise(self.D, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_04(self):
lmbda = 3
dt = np.float32
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True}, 'DataType': dt})
b = tvl2.TVL2Denoise(self.D, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_05(self):
lmbda = 3
dt = np.float64
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True}, 'DataType': dt})
b = tvl2.TVL2Denoise(self.D, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_06(self):
lmbda = 3
dt = np.float32
opt = tvl2.TVL2Deconv.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True}, 'DataType': dt})
b = tvl2.TVL2Deconv(np.ones((1, 1)), self.D, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_07(self):
lmbda = 3
dt = np.float64
opt = tvl2.TVL2Deconv.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True}, 'DataType': dt})
b = tvl2.TVL2Deconv(np.ones((1, 1)), self.D, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
class TestSet02(object):
def setup_method(self, method):
np.random.seed(12345)
N = 64
self.U = np.ones((N, N))
self.U[:, 0:(old_div(N, 2))] = -1
self.V = 1e-1 * np.random.randn(N, N)
self.D = self.U + self.V
def test_01(self):
lmbda = 1e-1
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 300, 'rho': 75*lmbda})
b = tvl2.TVL2Denoise(self.D, lmbda, opt)
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 32.875710674129564) < 1e-3
assert sm.mse(self.U, X) < 1e-3
def test_02(self):
lmbda = 1e-1
opt = tvl2.TVL2Deconv.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250})
b = tvl2.TVL2Deconv(np.ones((1)), self.D, lmbda, opt)
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 45.45958573088) < 1e-3
assert sm.mse(self.U, X) < 1e-3
class TestSet03(object):
def setup_method(self, method):
np.random.seed(12345)
N = 32
self.U = np.ones((N, N, N))
self.U[:, 0:(old_div(N, 2)), :] = -1
self.V = 1e-1 * np.random.randn(N, N, N)
self.D = self.U + self.V
def test_01(self):
lmbda = 1e-1
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250, 'rho': 10*lmbda})
b = tvl2.TVL2Denoise(self.D, lmbda, opt, axes=(0, 1))
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 363.0802047) < 1e-3
assert sm.mse(self.U, X) < 1e-3
def test_02(self):
lmbda = 1e-1
opt = tvl2.TVL2Deconv.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250})
b = tvl2.TVL2Deconv(np.ones((1)), self.D, lmbda, opt, axes=(0, 1))
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 564.1586542) < 1e-3
assert sm.mse(self.U, X) < 1e-3
class TestSet04(object):
def setup_method(self, method):
np.random.seed(12345)
N = 32
self.U = np.ones((N, N, N))
self.U[:, 0:(old_div(N, 2)), :] = -1
self.V = 1e-1 * np.random.randn(N, N, N)
self.D = self.U + self.V
def test_01(self):
lmbda = 1e-1
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250, 'rho': 10*lmbda})
b = tvl2.TVL2Denoise(self.D, lmbda, opt, axes=(0, 1, 2))
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 366.04267554965134) < 1e-3
assert sm.mse(self.U, X) < 1e-3
def test_02(self):
lmbda = 1e-1
opt = tvl2.TVL2Deconv.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250})
b = tvl2.TVL2Deconv(np.ones((1)), self.D, lmbda, opt, axes=(0, 1, 2))
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 567.72425227) < 1e-3
assert sm.mse(self.U, X) < 1e-3
| bsd-3-clause | 3,366,329,822,207,616,500 | 28.050926 | 77 | 0.499602 | false |
jrwdunham/old-webapp | onlinelinguisticdatabase/lib/base.py | 1 | 1441 | # Copyright (C) 2010 Joel Dunham
#
# This file is part of OnlineLinguisticDatabase.
#
# OnlineLinguisticDatabase is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OnlineLinguisticDatabase is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OnlineLinguisticDatabase. If not, see
# <http://www.gnu.org/licenses/>.
"""The base Controller API
Provides the BaseController class for subclassing.
"""
from pylons.controllers import WSGIController
from pylons.templating import render_mako as render
from onlinelinguisticdatabase.model import meta
class BaseController(WSGIController):
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# WSGIController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
try:
return WSGIController.__call__(self, environ, start_response)
finally:
meta.Session.remove()
| gpl-3.0 | -2,711,575,708,464,094,700 | 35.025 | 77 | 0.739764 | false |
denz/flask_introspect | test/test_base_view.py | 1 | 20172 | from types import NoneType
from collections import OrderedDict as odict
import unittest
from pprint import pprint
from flask.ext.introspect import TreeView, Tree, DictViewMixin, ObjectViewMixin, NOTEXIST
class O(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class O1(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class SortedNamesObjectViewMixin(ObjectViewMixin):
def get_names(self):
names = (k for k in self.obj.__dict__.iterkeys() if not k.startswith('__'))
return sorted(names)
class SortedNamesDictViewMixin(DictViewMixin):
def get_names(self):
return sorted(self.obj.iterkeys())
class TestIntegrational(unittest.TestCase):
'''Use this case for learning'''
def assertTree(self, tree, spec):
'''
Tree matching tester
'''
traverse = list(tree.traverse())
self.assertTrue(len(traverse) == len(spec))
for ((item_depth,
item_name,
item_headers,
view_object),
(req_depth,
req_name,
req_headers,
req_view_class_name)) in zip(traverse, spec):
self.assertTrue(item_depth==req_depth)
self.assertTrue(item_name==req_name)
self.assertTrue(item_headers==req_headers)
self.assertTrue(type(view_object).__name__==req_view_class_name)
def test_two_level_tree(self):
'''
Simple two level tree.
Root objects are of type O and all attributes of O of type O1 are tree leafs
'''
class TopView(SortedNamesObjectViewMixin, TreeView):
'''
`__type__ defines what subitem type should be rendered with this view
'''
__type__ = O
'''__cell__ is OrderedDict with key and header name
Tree.cell_keys contains list of unique cell_keys
and Tree.cell_labels contains list of unique cell_labels in corresponding order
'''
__cell__ = odict((('a', 'The a'),))
def get_cell(self, objname, key):
'''Return cell value for key
if required we can test self.obj, current `objname` in root hierarchy `key` name
or self.parent (view)
'''
return getattr(self.obj, key, None)
def get_names(self):
'''- return single value or list or yield
- this list is not required (but allowed) to make prior filtering
Note: type based filtering will occur later
Note: `ObjectViewMixin` defines its own (obj.__dict__.iterkeys()) `get_names`
method so as `DictViewMixin`. So this should be redefined only in special cases
'''
return (('sub', 'sub3'))
class SubView(SortedNamesObjectViewMixin, TopView):
'''
Attention: `names`, `get_cell`, `get_children`, `__type__` and `__cell__`
does not inherited from class hierarchy branch that leads to TreeView
thats why that methods/attributes must be defined in mixin
or directly in TreeView subclass
Note: Mixins SHOULD BE old-style classes for not to create MRO conflicts
'''
__type__ = O1
__cell__ = odict((('b', 'The b'), ('a', 'The a'),))
'''tree accepts dict of root objects and top hierarchy class (or classes list)
type(s) of dict items must correlate with top classes __type__ attr
'''
tree = Tree({'root':O(a='root header value for `a` key',
sub=O1(),
sub2=O(subsub={'a':'b'}),
sub3=O1(subsub={'a':'b'}))},
TopView)
self.assertTrue(tree.cell_keys == ['a', 'b'])
self.assertTrue(tree.cell_labels == ['The a', 'The b'])
self.assertTree(tree, [(0, 'root', ['root header value for `a` key', None], 'TopView'),
(1, 'sub', [None, None], 'SubView'),
(1, 'sub3', [None, None], 'SubView')])
def test_two_level_plus_recursive_tree(self):
'''
Recursion tree with O or O1 objects as leafs
or sub O1 dicts as leafs
'''
class OView(SortedNamesObjectViewMixin, TreeView):
__type__ = O
__cell__ = odict((('a', 'The a'),))
__recursive__ = True
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class SubView(SortedNamesObjectViewMixin, OView):
__type__ = O1
__cell__ = odict((('b', 'The b'), ('a', 'The a'),))
def get_cell(self, objname, key):
'''
legacy mixins does not define `get_cell` since it is specific for each object and level
so it is a good decision to define own get_cell for each level
'''
return getattr(self.obj, key, None)
def get_child(self, name):
'''
just an example of get child
'''
return getattr(self.obj, name)
# `names` should not return not existent names
# or `get_child` should get rid of it
class DictSubView(SortedNamesDictViewMixin, SubView):
def get_cell(self, objname, key):
return self.obj.get(key, None)
tree = Tree({'root':O(a='root header value for `a` key',
sub=O1(),
sub2=O(subsub={'a':'Dict `a` header'}),
sub3=O1(subsub={'b':'Dict `b` header'}),
sub4=O(subsub=O1(b='sub4 header value for `b` key',
a='sub4 header value for `a` key'
)),
)},
OView)
self.assertTree(tree, [(0, 'root', ['root header value for `a` key', None], 'OView'),
(1, 'sub', [None, None], 'SubView'),
(1, 'sub2', [None, None], 'OView'),
(1, 'sub3', [None, None], 'SubView'),
(2, 'subsub', [None, 'Dict `b` header'], 'DictSubView'),
(1, 'sub4', [None, None], 'OView'),
(2, 'subsub', ['sub4 header value for `a` key',
'sub4 header value for `b` key'], 'SubView')]
)
def test_limit_tree_bottom(self):
class OView(SortedNamesObjectViewMixin, TreeView):
__type__ = O
__cell__ = odict((('a', 'The a'),))
__recursive__ = True
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class SubView(SortedNamesObjectViewMixin, OView):
__type__ = O1
__cell__ = odict((('b', 'The b'), ('a', 'The a'),))
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class DictSubView(SortedNamesDictViewMixin, SubView):
__cell__ = odict((('c', 'The c'),))
def get_cell(self, objname, key):
return self.obj.get(key, None)
'''
When tree should be generated partially
`leaf_classes` kwarg allows to limit bottom levels
and named class(es) become bottom views
no headers or converters will be processed for views below `leaf_classes`
and all objects below are NOTEXIST
this allow to generate some parts of tree as one piece with `traverse()`
and others prepared for xhr extending
'''
tree = Tree({'root':O(a='root header value for `a` key',
sub=O1(),
sub2=O(subsub={'a':'Dict `a` header'}),
sub3=O1(subsub={'b':'Dict `b` header'}),
sub4=O(subsub=O1(b='sub4 header value for `b` key',
a='sub4 header value for `a` key'
)),
)},
OView,
leaf_classes=SubView,
)
self.assertTree(tree, [(0, 'root', ['root header value for `a` key', None], 'OView'),
(1, 'sub', [None, None], 'SubView'),
(1, 'sub2', [None, None], 'OView'),
(1, 'sub3', [None, None], 'SubView'),
(1, 'sub4', [None, None], 'OView'),
(2, 'subsub', ['sub4 header value for `a` key', 'sub4 header value for `b` key'], 'SubView')])
def test_get_item_by_path(self):
class OView(SortedNamesObjectViewMixin, TreeView):
__type__ = O
__cell__ = odict((('a', 'The a'),))
__recursive__ = True
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class SubView(SortedNamesObjectViewMixin, OView):
__type__ = O1
__cell__ = odict((('b', 'The b'), ('a', 'The a'),))
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class DictSubView(SortedNamesDictViewMixin, SubView):
__cell__ = odict((('c', 'The c'),))
def get_cell(self, objname, key):
return self.obj.get(key, None)
obj = {'root':O(a='root header value for `a` key',
sub=O1(),
sub2=O(subsub={'a':'Dict `a` header'}),
sub3=O1(subsub={'b':'Dict `b` header'}),
sub4=O(subsub=O1(b='sub4 header value for `b` key',
a='sub4 header value for `a` key'
)),
)}
tree = Tree(obj, OView, leaf_classes=SubView, sep='.')
'''
Separator `TreeView.__sep__` can be different for each level
'''
item, tail = tree.get('root.sub3.subsub')
self.assertTrue(item is NOTEXIST)
self.assertTrue(tail == 'subsub')
item, tail = tree.get('root.sub3')
self.assertTrue(item.cell == [None, None])
tree = Tree(obj, OView, sep='.')
item, tail = tree.get('root.sub3.subsub.sub')
self.assertTrue(item is NOTEXIST)
self.assertTrue(tail == 'sub')
tree = Tree(obj, OView, sep='.')
item, tail = tree.get('root.sub3.subsub')
self.assertTrue(item.obj == {'b': 'Dict `b` header'})
self.assertTrue(item.cell == [None, 'Dict `b` header', None])
self.assertTrue(item.path == 'root.sub3.subsub')
def test_converter(self):
from werkzeug.routing import UnicodeConverter
from base64 import (urlsafe_b64encode as encodestring,
urlsafe_b64decode as decodestring)
class Base64Converter(UnicodeConverter):
def to_python(self, value):
tail = len(value) % 4 is 0 and 0 or 4 - len(value) % 4
return decodestring(str(value) + tail*'=')
def to_url(self, value):
return encodestring(value).rstrip('=')
class OView(SortedNamesObjectViewMixin, TreeView):
__type__ = O
__cell__ = odict((('a', 'The a'),))
__recursive__ = True
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class SubView(SortedNamesObjectViewMixin, OView):
__type__ = O1
__cell__ = odict((('b', 'The b'), ('a', 'The a'),))
'''
path separator can be redefined for sublevels
`sep` kwarg for `Tree.__init__` will not be respected in this case
'''
__sep__ = '.'
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class DictSubView(SortedNamesDictViewMixin, SubView):
__cell__ = odict((('c', 'The c'),))
'''
Converter defines name conversion rules for
`get` and `path` methods
'''
__converter__ = 'base64'
'''Tree converters will be composed from TreeView subclasses (e.g. levels)
`__converters__` attrs and `converters` kwarg for `Tree.__init__`
So if hierarchy tree is complex and used in multiple tree generation processes
it is better to define converters at level where it is used
In simple cases its enough to set `converters` kwarg
'''
__converters__ = {'base64':Base64Converter}
def get_cell(self, objname, key):
return self.obj.get(key, None)
obj = {'root':O(a='root header value for `a` key',
sub=O1(),
sub2=O(subsub={'a':'Dict `a` header'}),
sub3=O1(subsub={'b':'Dict `b` header'}),
sub4=O(subsub=O1(b='sub4 header value for `b` key',
a='sub4 header value for `a` key'
)),
)}
tree = Tree(obj, OView, sep='.')
item, tail = tree.get('root.sub3.c3Vic3Vi')
self.assertTrue(item.obj == {'b': 'Dict `b` header'})
def test_strict_types(self):
class O2(O1):
pass
class OView(SortedNamesObjectViewMixin, TreeView):
__type__ = O
__cell__ = odict((('a', 'The a'),))
__recursive__ = True
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class SubView(SortedNamesObjectViewMixin, OView):
__type__ = O1
__cell__ = odict((('b', 'The b'), ('a', 'The a'),))
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class DictSubView(SortedNamesDictViewMixin, SubView):
__cell__ = odict((('c', 'The c'),))
__recursive__ = True
def get_cell(self, objname, key):
return self.obj.get(key, None)
obj = {'root':O(a='root header value for `a` key',
sub=O2(),
sub2=O1(subsub={'a':'Dict `a` header', 'd':{'e':'f'}}),
sub3=O1(subsub={'b':'Dict `b` header'}),
sub4=O(subsub=O1(b='sub4 header value for `b` key',
a='sub4 header value for `a` key'
)),
)}
tree = Tree(obj,
OView,
sep='.',
)
'''
With `strict_types=False` (default) an object of `__type__` subtype treated as suitable child
'''
self.assertTree(tree, [(0, 'root', ['root header value for `a` key', None, None], 'OView'),
(1, 'sub', [None, None, None], 'SubView'),
(1, 'sub2', [None, None, None], 'SubView'),
(2, 'subsub', ['Dict `a` header', None, None], 'DictSubView'),
(3, 'd', [None, None, None], 'DictSubView'),
(1, 'sub3', [None, None, None], 'SubView'),
(2, 'subsub', [None, 'Dict `b` header', None], 'DictSubView'),
(1, 'sub4', [None, None, None], 'OView'),
(2, 'subsub', ['sub4 header value for `a` key', 'sub4 header value for `b` key', None], 'SubView')]
)
tree = Tree(obj,
OView,
sep='.',
strict_types=True
)
'''
With `strict_types=True` only object with exact __type__ are suitable children
'''
self.assertTree(tree, [(0, 'root', ['root header value for `a` key', None, None], 'OView'),
(1, 'sub2', [None, None, None], 'SubView'),
(2, 'subsub', ['Dict `a` header', None, None], 'DictSubView'),
(3, 'd', [None, None, None], 'DictSubView'),
(1, 'sub3', [None, None, None], 'SubView'),
(2, 'subsub', [None, 'Dict `b` header', None], 'DictSubView'),
(1, 'sub4', [None, None, None], 'OView'),
(2, 'subsub', ['sub4 header value for `a` key', 'sub4 header value for `b` key', None], 'SubView')])
def test_strict_types_tree_with_multiple_subtype_views(self):
class O2(O1):
pass
class O3(O2):
pass
class O4(O3):
pass
class OView(SortedNamesObjectViewMixin, TreeView):
__type__ = O
__cell__ = odict((('a', 'The a'),))
__recursive__ = True
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class O2View(SortedNamesObjectViewMixin, OView):
__type__ = O2
__cell__ = odict((('b', 'The b'), ('a', 'The a'),))
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class O3View(SortedNamesObjectViewMixin, OView):
__type__ = O3
__cell__ = odict((('b', 'The b'), ('a', 'The a'),))
def get_cell(self, objname, key):
return getattr(self.obj, key, None)
class DictSubView(SortedNamesDictViewMixin, O3View):
__cell__ = odict((('c', 'The c'),))
__recursive__ = True
def get_cell(self, objname, key):
return self.obj.get(key, None)
'''
In case when two levels are supertypes for object - lowest type will be selected
'''
obj = {'root':O(a='root header value for `a` key',
sub=O2(),
sub2=O4(subsub={'a':'Dict `a` header', 'd':{'e':'f'}}),
sub3=O1(subsub={'b':'Dict `b` header'}),
sub4=O(subsub=O4(b='sub4 header value for `b` key',
a='sub4 header value for `a` key'
)),
)}
tree = Tree(obj,
OView,
sep='.',
)
self.assertTree(tree, [(0, 'root', ['root header value for `a` key', None, None], 'OView'),
(1, 'sub', [None, None, None], 'O2View'),
(1, 'sub2', [None, None, None], 'O3View'),
(2, 'subsub', ['Dict `a` header', None, None], 'DictSubView'),
(3, 'd', [None, None, None], 'DictSubView'),
(1, 'sub4', [None, None, None], 'OView'),
(2, 'subsub', ['sub4 header value for `a` key', 'sub4 header value for `b` key', None], 'O3View')])
tree = Tree(obj,
OView,
sep='.',
strict_types=True
)
self.assertTree(tree, [(0, 'root', ['root header value for `a` key', None, None], 'OView'),
(1, 'sub', [None, None, None], 'O2View'),
(1, 'sub4', [None, None, None], 'OView')])
if __name__ == '__main__':
from unittest import main
main() | bsd-3-clause | -2,047,304,181,324,625,700 | 40.766046 | 131 | 0.45925 | false |
MirkoRossini/pybuilder_django_enhanced_plugin | src/integrationtest/python/django_test_creates_report_files_tests.py | 1 | 1205 | __author__ = 'Mirko Rossini'
import unittest
import shutil
from integrationtest_support import IntegrationTestSupport
from pybuilder.errors import BuildFailedException
from common import BUILD_FILE_TEMPLATE
class DjangoEnhancedPluginTest(IntegrationTestSupport):
def test_django_test(self):
# self.set_tmp_dir()
self.write_build_file(BUILD_FILE_TEMPLATE.format(apps=['testapp']))
shutil.copytree('src/integrationtest/resources/testproject/', self.full_path('src/main/python/testproject/'))
reactor = self.prepare_reactor()
try:
reactor.build()
raise self.failureException("Build should fail due to django_tests, but it's successful")
except BuildFailedException:
# We know tests are failing
pass
self.assert_directory_exists('target/reports')
self.assert_file_exists('target/reports/django_tests')
self.assert_file_exists('target/reports/django_tests.err')
self.assert_file_contains('target/reports/django_tests.err', 'FAIL')
self.assert_file_contains('target/reports/django_tests.err', 'AssertionError: 1 != 2')
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -743,992,517,311,579,300 | 39.166667 | 117 | 0.691286 | false |
eduardoneira/SistemasDistribuidos_TPFinal | CentroMonitoreoCiudad/FaceRecognizer/modules/old_feature_matcher.py | 1 | 4628 | #!/bin/python3
import numpy as np
import cv2
import base64
import pdb
from tkinter import *
from matplotlib import pyplot as plt
class FeatureMatcher:
__PORC_DISTANCE = 0.7
def __init__(self,feature_extractor='SURF',upright=True,min_match_count=10,threshold=400):
self.MIN_MATCH_COUNT = min_match_count
self.__create_feature_extractor(feature_extractor,upright,threshold)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 200)
self.flann = cv2.FlannBasedMatcher(index_params, search_params)
def __create_feature_extractor(self,feature_extractor,upright,threshold):
if feature_extractor == 'SURF':
self.feature_finder = cv2.xfeatures2d.SURF_create(threshold,extended=True)
self.feature_finder.setUpright(upright)
elif feature_extractor == 'SIFT':
self.feature_finder = cv2.xfeatures2d.SIFT_create(edgeThreshold=20,sigma=1.1)
elif feature_extractor == 'ORB':
self.feature_finder = cv2.ORB_create()
else:
raise 'Feature extractor no encontrado'
def compare(self,img1,img2):
self.features_img1 = self.find_features(img1)
self.features_img2 = self.find_features(img2)
pdb.set_trace()
return self.flann.knnMatch(self.features_img1[1],self.features_img2[1],k=2)
def compare_base64(self,image1_base64,image2_base64):
img1 = self.base64_to_img(image1_base64)
img2 = self.base64_to_img(image2_base64)
return self.compare(img1,img2)
def are_similar(self,img1,img2):
self.good_matches = []
for m,n in self.compare(img1,img2):
if m.distance < self.__PORC_DISTANCE*n.distance:
self.good_matches.append(m)
return (len(self.good_matches) > self.MIN_MATCH_COUNT)
def find_features(self,img):
return self.feature_finder.detectAndCompute(img,None)
def bytes_to_img(self,image_bytes):
nparr = np.fromstring(image_bytes, np.uint8)
return cv2.imdecode(nparr, 0)
def base64_to_img(self,image_base64):
return self.bytes_to_img(base64.b64decode(image_base64))
def compare_and_draw_base64(self,img1,img2):
self.compare_and_draw(self.base64_to_img(img1),self.base64_to_img(img2))
def compare_and_draw(self,img1,img2):
# if self.are_similar(img1,img2):
# src_pts = np.float32([ self.features_img1[0][m.queryIdx].pt for m in self.good_matches ]).reshape(-1,1,2)
# dst_pts = np.float32([ self.features_img2[0][m.trainIdx].pt for m in self.good_matches ]).reshape(-1,1,2)
# M, mask = cv2.findHomography(src_pts,dst_pts,cv2.RANSAC,5.0)
# matchesMask = mask.ravel().tolist()
# h,w = img1.shape
# pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
# dst = cv2.perspectiveTransform(pts,M)
# img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3,cv2.LINE_AA)
# else:
# print("Not enough matches are found - %d/%d" % (len(self.good_matches),self.MIN_MATCH_COUNT))
# matchesMask = None
# draw_params = dict(matchColor = (0,255,0),
# singlePointColor = (255,0,0),
# matchesMask = matchesMask,
# flags = 2)
# img3 = cv2.drawMatchesKnn(img1,self.features_img1[0],img2,self.features_img2[0],self.good_matches,None,**draw_params)
# plt.imshow(img3,'gray'),plt.show()
hash1 = self.find_features(img1)
hash2 = self.find_features(img2)
matches = self.flann.knnMatch(hash1[1],hash2[1],k=2)
good = []
for m,n in matches:
if m.distance < 0.95*n.distance:
good.append(m)
print(len(good))
if len(good)>self.MIN_MATCH_COUNT:
src_pts = np.float32([ hash1[0][m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ hash2[0][m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), self.MIN_MATCH_COUNT) )
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = (255,0,0),
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,hash1[0],img2,hash2[0],good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show() | gpl-3.0 | 3,252,840,999,377,199,600 | 36.942623 | 123 | 0.645635 | false |
mpdehaan/camp | camp/core/scale.py | 1 | 3292 | """
Copyright 2016, Michael DeHaan <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# https://en.wikipedia.org/wiki/List_of_musical_scales_and_modes
SCALE_TYPES = dict(
major = [ 1, 2, 3, 4, 5, 6, 7 ],
natural_minor = [ 1, 2, 'b3', 4, 5, 'b6', 'b7' ],
blues = [ 1, 'b3', 4, 'b5', 5, 'b7' ],
dorian = [ 1, 2, 'b3', 4, 5, 6, 'b7' ],
chromatic = [ 1, 'b2', 2, 'b3', 3, 4, 'b5', 5, 'b6', 6, 'b7', 7 ],
harmonic_major = [ 1, 2, 3, 4, 5, 'b6', 7 ],
harmonic_minor = [ 1, 2, 3, 4, 5, 'b6', 7 ],
locrian = [ 1, 'b2', 'b3', 4, 'b5', 'b6', 'b7' ],
lydian = [ 1, 2, 3, 'b4', 5, 6, 7 ],
major_pentatonic = [ 1, 2, 3, 5, 6 ],
melodic_minor_asc = [ 1, 2, 'b3', 4, 5, 'b7', 'b8', 8 ],
melodic_minor_desc = [ 1, 2, 'b3', 4, 5, 'b6', 'b7', 8 ],
minor_pentatonic = [ 1, 'b3', 4, 5, 'b7' ],
mixolydian = [ 1, 2, 3, 4, 5, 6, 'b7' ],
phyrigian = [ 1, 'b2', 'b3', 4, 5, 'b6', 'b7' ],
)
SCALE_ALIASES = dict(
aeolian = 'natural_minor',
ionian = 'major',
minor = 'natural_minor'
)
from camp.core.note import note
class Scale(object):
def __init__(self, root=None, typ=None):
"""
Constructs a scale:
scale = Scale(root='C4', typ='major')
"""
assert root is not None
assert typ is not None
if isinstance(root, str):
root = note(root)
self.root = root
self.typ = typ
def generate(self, length=None):
"""
Allows traversal of a scale in a forward direction.
Example:
for note in scale.generate(length=2):
print note
"""
assert length is not None
typ = SCALE_ALIASES.get(self.typ, self.typ)
scale_data = SCALE_TYPES[typ][:]
octave_shift = 0
index = 0
while (length is None or length > 0):
if index == len(scale_data):
index = 0
octave_shift = octave_shift + 1
result = self.root.transpose(degrees=scale_data[index], octaves=octave_shift)
yield(result)
index = index + 1
if length is not None:
length = length - 1
def __eq__(self, other):
"""
Scales are equal if they are the ... same scale
"""
if other is None:
return False
return self.root == other.root and self.typ == other.typ
def short_name(self):
return "%s %s" % (self.root.short_name(), self.typ)
def __repr__(self):
return "Scale<%s>" % self.short_name()
def scale(input):
"""
Shortcut: scale(['C major') -> Scale object
"""
(root, typ) = input.split()
return Scale(root=note(root), typ=typ)
| apache-2.0 | 5,020,501,145,689,694,000 | 29.766355 | 89 | 0.536148 | false |
ingkebil/trost | scripts/process_xls.py | 1 | 1832 | #!/usr/bin/python
# -*- coding: utf8 -*-
import os
import sys
import math
import xlrd
import data_objects as DO
import cast
""" Excel cell type decides which cast function to use. """
CAST_FUNC = {xlrd.XL_CELL_EMPTY: str,
xlrd.XL_CELL_TEXT: cast.cast_str,
xlrd.XL_CELL_NUMBER: float,
xlrd.XL_CELL_DATE: cast.cast_str,
xlrd.XL_CELL_BOOLEAN: int,
xlrd.XL_CELL_ERROR: int,
xlrd.XL_CELL_BLANK: cast.cast_str}
""" Parcelle information is stored on sheet 3, at least for Golm.xls. """
DEFAULT_PARCELLE_INDEX = 2
""" Treatment/Aliquot relations are stored on sheet 1. """
DEFAULT_TREATMENT_ALIQUOT_INDEX = 0
#
def read_xls_data(fn, sheet_index=0, include_time=False):
data = []
book = xlrd.open_workbook(fn)
sheet = book.sheet_by_index(sheet_index)
col_headers = [str(cell.value.encode('utf8')).replace(' ', '_')
for cell in sheet.row(0)]
for i in xrange(1, sheet.nrows):
row = []
for cell in sheet.row(i):
if cell.ctype == xlrd.XL_CELL_DATE:
# print 'DATE', cell.value
# print xlrd.xldate_as_tuple(cell.value, book.datemode)
cell_date = xlrd.xldate_as_tuple(cell.value, book.datemode)
if not include_time:
row.append('%4i-%02i-%02i ' % cell_date[:3])
else:
row.append('%4i-%02i-%02i %02i:%02i:%02i' % (cell_date[:3] + cell_date[-3:]))
else:
row.append(CAST_FUNC[cell.ctype](cell.value))
# row = [CAST_FUNC[cell.ctype](cell.value) for cell in sheet.row(i)]
data.append(DO.DataObject(col_headers, row))
# print data[-1].__dict__
return data, col_headers
if __name__ == '__main__': main(sys.argv[1:])
| gpl-2.0 | -938,439,755,346,230,800 | 30.050847 | 97 | 0.566048 | false |
induane/stomp.py3 | stomp/test/threading_test.py | 1 | 3873 | try:
from queue import Queue, Empty, Full
except ImportError:
from Queue import Queue, Empty, Full
import threading
import sys
import time
import unittest
import stomp
from testutils import *
class MQ(object):
def __init__(self):
self.connection = stomp.Connection(get_standard_host(), 'admin', 'password')
self.connection.set_listener('', None)
self.connection.start()
self.connection.connect(wait=True)
def send(self, topic, msg, persistent='true', retry=False):
self.connection.send(destination="/topic/%s" % topic, message=msg,
persistent=persistent)
mq = MQ()
class TestThreading(unittest.TestCase):
def setUp(self):
"""Test that mq sends don't wedge their threads.
Starts a number of sender threads, and runs for a set amount of
time. Each thread sends messages as fast as it can, and after each
send, pops from a Queue. Meanwhile, the Queue is filled with one
marker per second. If the Queue fills, the test fails, as that
indicates that all threads are no longer emptying the queue, and thus
must be wedged in their send() calls.
"""
self.Q = Queue(10)
self.Cmd = Queue()
self.Error = Queue()
self.clients = 20
self.threads = []
self.runfor = 20
for i in range(0, self.clients):
t = threading.Thread(name="client %s" % i,
target=self.make_sender(i))
t.setDaemon(1)
self.threads.append(t)
def tearDown(self):
for t in self.threads:
if not t.isAlive:
print("thread", t, "died")
self.Cmd.put('stop')
for t in self.threads:
t.join()
print()
print()
errs = []
while 1:
try:
errs.append(self.Error.get(block=False))
except Empty:
break
print("Dead threads:", len(errs), "of", self.clients)
etype = {}
for ec, ev, tb in errs:
if ec in etype:
etype[ec] = etype[ec] + 1
else:
etype[ec] = 1
for k in sorted(etype.keys()):
print("%s: %s" % (k, etype[k]))
mq.connection.disconnect()
def make_sender(self, i):
Q = self.Q
Cmd = self.Cmd
Error = self.Error
def send(i=i, Q=Q, Cmd=Cmd, Error=Error):
counter = 0
print("%s starting" % i)
try:
while 1:
# print "%s sending %s" % (i, counter)
try:
mq.send('testclientwedge',
'Message %s:%s' % (i, counter))
except:
Error.put(sys.exc_info())
# thread will die
raise
else:
# print "%s sent %s" % (i, counter)
try:
Q.get(block=False)
except Empty:
pass
try:
if Cmd.get(block=False):
break
except Empty:
pass
counter +=1
finally:
print("final", i, counter)
return send
def test_threads_dont_wedge(self):
for t in self.threads:
t.start()
start = time.time()
while time.time() - start < self.runfor:
try:
self.Q.put(1, False)
time.sleep(1.0)
except Full:
assert False, "Failed: 'request' queue filled up"
print("passed")
| apache-2.0 | 8,511,249,930,972,715,000 | 30.745902 | 84 | 0.46992 | false |
Dziolas/invenio | modules/bibformat/lib/elements/bfe_keywords.py | 1 | 2158 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints keywords
"""
__revision__ = "$Id$"
import cgi
from urllib import quote
from invenio.config import CFG_BASE_URL
def format_element(bfo, keyword_prefix, keyword_suffix, separator=' ; ', link='yes'):
"""
Display keywords of the record.
@param keyword_prefix: a prefix before each keyword
@param keyword_suffix: a suffix after each keyword
@param separator: a separator between keywords
@param link: links the keywords if 'yes' (HTML links)
"""
keywords = bfo.fields('6531_a')
if len(keywords) > 0:
if link == 'yes':
keywords = ['<a href="' + CFG_BASE_URL + '/search?f=keyword&p='+ \
quote('"' + keyword + '"') + \
'&ln='+ bfo.lang+ \
#'">' + cgi.escape(keyword) + '</a>'
'">' + keyword + '</a>'
for keyword in keywords]
#else:
# keywords = [cgi.escape(keyword)
# for keyword in keywords]
keywords = [keyword_prefix + keyword + keyword_suffix
for keyword in keywords]
return separator.join(keywords)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 | 7,402,563,837,806,346,000 | 34.966667 | 85 | 0.616775 | false |
oudalab/phyllo | phyllo/extractors/frodebertusDB.py | 1 | 2443 | import sqlite3
import urllib
from urllib.request import urlopen
from bs4 import BeautifulSoup, NavigableString
import nltk
nltk.download('punkt')
from nltk import sent_tokenize
def parseRes2(soup, title, url, cur, author, date, collectiontitle):
chapter = '-'
sen = ""
num = 1
[e.extract() for e in soup.find_all('br')]
[e.extract() for e in soup.find_all('table')]
[e.extract() for e in soup.find_all('font')]
getp = soup.find_all('p')
#print(getp)
i = 0
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
if p.b:
chapter = p.b.text
chapter = chapter.strip()
if chapter[0].isdigit():
chapter = chapter[2:]
chapter = chapter.strip()
else:
sen = p.text
sen = sen.strip()
num = 0
if sen != '':
for s in sen.split('\n'):
sentn = s
num += 1
cur.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, collectiontitle, title, 'Latin', author, date, chapter,
num, sentn, url, 'prose'))
def main():
# get proper URLs
siteURL = 'http://www.thelatinlibrary.com'
biggsURL = 'http://www.thelatinlibrary.com/frodebertus.html'
biggsOPEN = urllib.request.urlopen(biggsURL)
biggsSOUP = BeautifulSoup(biggsOPEN, 'html5lib')
textsURL = []
title = 'Frodebertus & Importunus'
author = 'Frodebertus & Importunus'
collectiontitle = 'FRODEBERTUS AND IMPORTUNUS'
date = '-'
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Frodebertus & Importunus'")
parseRes2(biggsSOUP, title, biggsURL, c, author, date, collectiontitle)
if __name__ == '__main__':
main()
| apache-2.0 | 8,485,862,798,001,893,000 | 31.573333 | 103 | 0.548506 | false |
DTOcean/dtocean-core | tests/test_data_definitions_simplepie.py | 1 | 2601 | import pytest
import matplotlib.pyplot as plt
from aneris.control.factory import InterfaceFactory
from dtocean_core.core import (AutoFileInput,
AutoFileOutput,
AutoPlot,
Core)
from dtocean_core.data import CoreMetaData
from dtocean_core.data.definitions import SimplePie
def test_SimplePie_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "SimplePie" in all_objs.keys()
def test_SimplePie():
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"types": ["float"]})
test = SimplePie()
raw = {"a": 0, "b": 1}
a = test.get_data(raw, meta)
b = test.get_value(a)
assert b["a"] == 0
assert b["b"] == 1
def test_get_None():
test = SimplePie()
result = test.get_value(None)
assert result is None
@pytest.mark.parametrize("fext", [".csv", ".xls", ".xlsx"])
def test_SimplePie_auto_file(tmpdir, fext):
test_path = tmpdir.mkdir("sub").join("test{}".format(fext))
test_path_str = str(test_path)
raw = {"a": 0, "b": 1}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"types": ["float"]})
test = SimplePie()
fout_factory = InterfaceFactory(AutoFileOutput)
FOutCls = fout_factory(meta, test)
fout = FOutCls()
fout._path = test_path_str
fout.data.result = test.get_data(raw, meta)
fout.connect()
assert len(tmpdir.listdir()) == 1
fin_factory = InterfaceFactory(AutoFileInput)
FInCls = fin_factory(meta, test)
fin = FInCls()
fin._path = test_path_str
fin.connect()
result = test.get_data(fin.data.result, meta)
assert result["a"] == 0
assert result["b"] == 1
def test_SimplePie_auto_plot():
raw = {"a": 0, "b": 1}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"types": ["float"]})
test = SimplePie()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
| gpl-3.0 | -4,399,417,363,574,288,400 | 23.308411 | 63 | 0.522107 | false |
anrl/gini3 | frontend/src/gbuilder/UI/Edge.py | 1 | 4871 | """The graphical representation of connections of nodes"""
import math
from PyQt4 import QtCore, QtGui
from Core.Item import *
from Core.globals import options, mainWidgets, defaultOptions
class Edge(QtGui.QGraphicsLineItem, Item):
def __init__(self, startItem, endItem, parent=None, scene=None):
"""
Create an edge between two nodes, linking them together graphically.
"""
QtGui.QGraphicsLineItem.__init__(self, parent, scene)
self.source = startItem
self.dest = endItem
self.sourcePoint = QtCore.QPointF()
self.destPoint = QtCore.QPointF()
self.source.addEdge(self)
self.dest.addEdge(self)
self.properties = {}
self.setProperty("id", "SomeEdge")
self.interfaces = []
self.setPen(QtGui.QPen(QtCore.Qt.black, 2, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)
self.adjust()
def boundingRect(self):
"""
Get the bounding rectangle of the edge.
"""
extra = (self.pen().width() + 20) / 2.0
p1 = self.line().p1()
p2 = self.line().p2()
return QtCore.QRectF(p1, QtCore.QSizeF(p2.x() - p1.x(), p2.y() - p1.y())).normalized().adjusted(-extra, -extra, extra, extra)
def sourceNode(self):
"""
Get the source node.
"""
return self.source
def setSourceNode(self, node):
"""
Set the source node.
"""
self.source = node
self.adjust()
def destNode(self):
"""
Get the destination node.
"""
return self.dest
def setDestNode(self, node):
"""
Set the destination node.
"""
self.dest = node
self.adjust()
def shape(self):
"""
Get the shape of the edge.
"""
return QtGui.QGraphicsLineItem.shape(self)
def adjust(self):
"""
Adjust length and angle of edge based on movement of nodes.
"""
if not self.source or not self.dest:
return
line = QtCore.QLineF(self.mapFromItem(self.source, 0, 0), self.mapFromItem(self.dest, 0, 0))
self.setLine(line)
length = line.length()
if length == 0.0:
return
edgeOffset = QtCore.QPointF((line.dx() * 20) / length, (line.dy() * 20) / length)
self.prepareGeometryChange()
self.sourcePoint = line.p1() + edgeOffset
self.destPoint = line.p2() - edgeOffset
def paint(self, painter, option, widget=None):
"""
Draw the representation.
"""
if (self.source.collidesWithItem(self.dest)):
return
painter.setRenderHint(QtGui.QPainter.Antialiasing, options["smoothing"])
if self.device_type == "Wireless_Connection":
pen = QtGui.QPen()
pen.setDashPattern([10,10])
painter.setPen(pen)
painter.drawLine(self.line())
if self.isSelected():
painter.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.DashLine))
baseLine = QtCore.QLineF(0,0,1,0)
myLine = QtCore.QLineF(self.line())
angle = math.radians(myLine.angle(baseLine))
myLine.translate(4.0 * math.sin(angle), 4.0 * math.cos(angle))
painter.drawLine(myLine)
myLine.translate(-8.0 * math.sin(angle), -8.0 * math.cos(angle))
painter.drawLine(myLine)
def delete(self):
"""
Delete the edge and remove it from its nodes.
"""
if mainWidgets["main"].isRunning():
mainWidgets["log"].append("You cannot delete items from a running topology!")
return
from Tutorial import Tutorial
if isinstance(mainWidgets["canvas"], Tutorial):
mainWidgets["log"].append("You cannot delete items from the tutorial!")
return
self.source.removeEdge(self)
self.dest.removeEdge(self)
self.scene().removeItem(self)
def contextMenu(self, pos):
"""
Pop up the context menu on right click.
"""
self.menu = QtGui.QMenu()
self.menu.setPalette(defaultOptions["palette"])
self.menu.addAction("Delete", self.delete)
self.menu.exec_(pos)
def toString(self):
"""
Return a string representation of the graphical edge.
"""
graphical = "edge:(" + self.source.getName() + "," + self.dest.getName() + ")\n"
logical = ""
for prop, value in self.properties.iteritems():
logical += "\t" + prop + ":" + value + "\n"
return graphical + logical
| mit | 1,296,057,378,893,043,700 | 29.836601 | 133 | 0.554917 | false |
seanjh/CanadianInsiderTransactions | __main__.py | 1 | 1156 | """Canadian Insider Transactions.
Usage:
sedi_transactions <issuer_num>...
Options:
-h --help Show this screen.
--version Show version.
"""
import os
from docopt import docopt
from sedi_transactions.transactions import SEDIView
OUTPUT_PATH = os.path.abspath(
os.path.join(os.path.abspath(__file__), '..', 'output')
)
if not os.path.exists(OUTPUT_PATH):
os.mkdir(OUTPUT_PATH)
def write_html(html_text, encoding, filename):
with open(filename, 'w', encoding=encoding) as outfile:
outfile.write(html_text)
def main():
arguments = docopt(__doc__, version='Canadian Insider Transactions 0.1')
sedar_issuers = arguments.get('<issuer_num>')
with SEDIView() as sv:
i = 0
while i < len(sedar_issuers):
html = sv.get_transactions_view(sedar_issuers[i])
filename = os.path.join(OUTPUT_PATH,
('{0}.html').format(sedar_issuers[i]))
if html:
print("Downloading HTML to {0}".format(filename))
write_html(html, sv.encoding, filename)
i += 1
if __name__ == '__main__':
main() | mit | 6,034,782,397,232,671,000 | 25.906977 | 76 | 0.595156 | false |
eyp-developers/statistics | statistics/migrations/0044_topic_overview_link.py | 1 | 2256 | # Generated by Django 2.0.1 on 2018-07-25 07:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('statistics', '0043_merge_20180207_1821'),
]
operations = [
migrations.AddField(
model_name='session',
name='topic_overview_link',
field=models.URLField(blank=True),
),
migrations.AlterField(
model_name='historictopicplace',
name='historic_country',
field=models.CharField(blank=True, choices=[('AL', 'Albania'), ('AM', 'Armenia'), ('AT', 'Austria'), ('AZ', 'Azerbaijan'), ('BY', 'Belarus'), ('BE', 'Belgium'), ('BA', 'Bosnia and Herzegovina'), ('HR', 'Croatia'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DK', 'Denmark'), ('EE', 'Estonia'), ('FI', 'Finland'), ('FR', 'France'), ('GE', 'Georgia'), ('DE', 'Germany'), ('GR', 'Greece'), ('HU', 'Hungary'), ('IE', 'Ireland'), ('IT', 'Italy'), ('XK', 'Kosovo'), ('LV', 'Latvia'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('NL', 'The Netherlands'), ('NO', 'Norway'), ('PL', 'Poland'), ('PT', 'Portugal'), ('RO', 'Romania'), ('RU', 'Russia'), ('RS', 'Serbia'), ('SI', 'Slovenia'), ('SK', 'Slovakia'), ('ES', 'Spain'), ('SE', 'Sweden'), ('CH', 'Switzerland'), ('TR', 'Turkey'), ('UA', 'Ukraine'), ('GB', 'The United Kingdom')], max_length=2, null=True),
),
migrations.AlterField(
model_name='historictopicplace',
name='historic_session_type',
field=models.CharField(blank=True, choices=[('IS', 'International Session'), ('IF', 'International Forum'), ('NS', 'National Session'), ('RS', 'Regional Session'), ('SS', 'Small Scale Session'), ('OE', 'Other Event')], max_length=3, null=True),
),
migrations.AlterField(
model_name='topic',
name='difficulty',
field=models.CharField(blank=True, choices=[('E', 'Easy'), ('I', 'Intermediate'), ('H', 'Hard')], max_length=1, null=True),
),
migrations.AlterField(
model_name='topic',
name='type',
field=models.CharField(blank=True, choices=[('CR', 'Creative'), ('CF', 'Conflict'), ('ST', 'Strategy')], max_length=2, null=True),
),
]
| gpl-3.0 | -440,385,167,275,794,300 | 58.368421 | 864 | 0.542996 | false |
takmid/inasafe | safe_qgis/test_impact_calculator.py | 1 | 8436 | """
InaSAFE Disaster risk assessment tool developed by AusAid -
**Impact calculator test suite.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__version__ = '0.5.0'
__date__ = '10/01/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import sys
import os
# Add PARENT directory to path to make test aware of other modules
pardir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(pardir)
import unittest
from safe_qgis.impact_calculator import ImpactCalculator
from safe_qgis.exceptions import (InsufficientParametersException,
KeywordNotFoundException,
StyleInfoNotFoundException)
from safe_qgis.safe_interface import (readKeywordsFromLayer, getStyleInfo)
from safe.common.testing import HAZDATA, EXPDATA, TESTDATA
# Retired impact function for characterisation
# (need import here if test is run independently)
# pylint: disable=W0611
from safe.engine.impact_functions_for_testing import BNPB_earthquake_guidelines
# pylint: enable=W0611
class ImpactCalculatorTest(unittest.TestCase):
"""Test the InaSAFE plugin stub"""
def setUp(self):
"""Create shared resources that all tests can use"""
self.calculator = ImpactCalculator()
self.vectorPath = os.path.join(TESTDATA, 'Padang_WGS84.shp')
self.rasterShakePath = os.path.join(HAZDATA,
'Shakemap_Padang_2009.asc')
# UTM projected layer
fn = 'tsunami_max_inundation_depth_BB_utm.asc'
self.rasterTsunamiBBPath = os.path.join(TESTDATA, fn)
self.rasterExposureBBPath = os.path.join(TESTDATA,
'tsunami_building_'
'exposure.shp')
self.rasterPopulationPath = os.path.join(EXPDATA, 'glp10ag.asc')
self.calculator.setHazardLayer(self.rasterShakePath)
self.calculator.setExposureLayer(self.vectorPath)
self.calculator.setFunction('Earthquake Guidelines Function')
def tearDown(self):
"""Tear down - destroy the QGIS app"""
pass
def test_properties(self):
"""Test if the properties work as expected."""
myMessage = 'Vector property incorrect.'
assert (self.calculator.exposureLayer() ==
self.vectorPath), myMessage
myMessage = 'Raster property incorrect.'
assert (self.calculator.hazardLayer() ==
self.rasterShakePath), myMessage
myMessage = 'Function property incorrect.'
assert (self.calculator.function() ==
'Earthquake Guidelines Function'), myMessage
def test_run(self):
"""Test that run works as expected in non threading mode"""
try:
myRunner = self.calculator.getRunner()
# run non threaded
myRunner.run()
myMessage = myRunner.result()
myImpactLayer = myRunner.impactLayer()
myFilename = myImpactLayer.get_filename()
assert(myFilename and not myFilename == '')
assert(myMessage and not myMessage == '')
except Exception, e: # pylint: disable=W0703
myMessage = 'Calculator run failed. %s' % str(e)
assert(), myMessage
def test_thread(self):
"""Test that starting it in a thread works as expected."""
try:
myRunner = self.calculator.getRunner()
myRunner.start()
# wait until the thread is done
myRunner.join()
myMessage = myRunner.result()
myImpactLayer = myRunner.impactLayer()
myFilename = myImpactLayer.get_filename()
assert(myFilename and not myFilename == '')
assert(myMessage and not myMessage == '')
except Exception, e: # pylint: disable=W0703
myMessage = 'Calculator run failed:\n' + str(e)
assert(), myMessage
def test_startWithNoParameters(self):
"""Test that run raises an error properly when no parameters defined.
"""
try:
self.calculator.setExposureLayer(None)
self.calculator.setHazardLayer(None)
# Next line should raise an error
myRunner = self.calculator.getRunner()
myRunner.start()
except RuntimeError, e:
myMessage = 'Runtime error encountered: %s' % str(e)
assert(), myMessage
except InsufficientParametersException:
return # expected outcome
except:
myMessage = 'Missing parameters not raised as error.'
assert(), myMessage
myMessage = 'Expected an error, none encountered.'
assert(), myMessage
def test_getKeywordFromImpactLayer(self):
"""Check that we can get keywords from a created impact layer."""
myRunner = self.calculator.getRunner()
myRunner.run()
myImpactLayer = myRunner.impactLayer()
myKeyword = readKeywordsFromLayer(myImpactLayer,
'impact_summary')
myMessage = 'Keyword request returned an empty string'
assert(myKeyword is not ''), myMessage
# Test we get an exception if keyword is not found
try:
myKeyword = readKeywordsFromLayer(
myImpactLayer, 'boguskeyword')
except KeywordNotFoundException:
pass # this is good
except Exception, e:
myMessage = ('Request for bogus keyword raised incorrect '
'exception type: \n %s') % str(e)
assert(), myMessage
def test_issue100(self):
"""Test for issue 100: unhashable type dict"""
exposure_path = os.path.join(TESTDATA,
'OSM_building_polygons_20110905.shp')
hazard_path = os.path.join(HAZDATA,
'Flood_Current_Depth_Jakarta_geographic.asc')
# Verify relevant metada is ok
#H = readSafeLayer(hazard_path)
#E = readSafeLayer(exposure_path)
self.calculator.setHazardLayer(hazard_path)
self.calculator.setExposureLayer(exposure_path)
self.calculator.setFunction('Flood Building Impact Function')
try:
myRunner = self.calculator.getRunner()
# Run non threaded
myRunner.run()
myMessage = myRunner.result()
myImpactLayer = myRunner.impactLayer()
myFilename = myImpactLayer.get_filename()
assert(myFilename and not myFilename == '')
assert(myMessage and not myMessage == '')
except Exception, e: # pylint: disable=W0703
myMessage = 'Calculator run failed. %s' % str(e)
assert(), myMessage
def test_getStyleInfo(self):
"""Test that we can get styleInfo data from a vector's keyword file
"""
myRunner = self.calculator.getRunner()
myRunner.start()
myRunner.join()
myImpactLayer = myRunner.impactLayer()
myMessage = ('Incorrect type returned from '
'myRunner.impactlayer(). Expected an impactlayer'
'but received a %s' % type(myImpactLayer))
assert hasattr(myImpactLayer, 'get_style_info'), myMessage
myStyleInfo = getStyleInfo(myImpactLayer)
myMessage = 'Style info request returned an empty string'
assert myStyleInfo is not '', myMessage
#print myStyleInfo
# Test we get an exception if style info is not found
try:
myStyleInfo = getStyleInfo('boguspath')
except StyleInfoNotFoundException:
pass # This is good
except Exception, e:
myMessage = ('StyleInfo request for bogus file raised incorrect' +
' exception type: \n %s') % str(e)
raise StyleInfoNotFoundException(myMessage)
if __name__ == '__main__':
suite = unittest.makeSuite(ImpactCalculatorTest, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gpl-3.0 | -7,854,523,289,256,302,000 | 38.420561 | 79 | 0.615695 | false |
f-prettyland/angr | angr/state_plugins/fast_memory.py | 1 | 8768 | import logging
import claripy
from ..storage.memory import SimMemory
from ..errors import SimFastMemoryError
l = logging.getLogger("angr.state_plugins.fast_memory")
l.setLevel(logging.DEBUG)
class SimFastMemory(SimMemory):
def __init__(self, memory_backer=None, memory_id=None, endness=None, contents=None, width=None, uninitialized_read_handler=None):
SimMemory.__init__(self, endness=endness)
self._contents = { } if contents is None else contents
self.width = width
self._uninitialized_read_handler = uninitialized_read_handler
self.id = memory_id
self._backer = memory_backer
if self._backer is not None:
raise SimFastMemoryError("TODO: support memory backers in SimFastMemory")
# TODO: support backers
#def _get_from_backer(self, missing_addr, size):
# for addr, backer in self._memory_backer.cbackers:
# start_backer = missing_addr - addr
# if start_backer < 0 and abs(start_backer) >= self._page_size: continue
# if start_backer >= len(backer): continue
# snip_start = max(0, start_backer)
# write_start = max(missing_addr, addr + snip_start)
# write_size = self._page_size - write_start%self._page_size
# snip = _ffi.buffer(backer)[snip_start:snip_start+write_size]
# mo = SimMemoryObject(claripy.BVV(snip), write_start)
# self._apply_object_to_page(n*self._page_size, mo, page=new_page)
def set_state(self, state):
super(SimFastMemory, self).set_state(state)
if self.width is None:
self.width = self.state.arch.bytes
def _handle_uninitialized_read(self, addr, inspect=True, events=True):
"""
The default uninitialized read handler. Returns symbolic bytes.
"""
if self._uninitialized_read_handler is None:
v = self.state.se.BVS("%s_%s" % (self.id, addr), self.width*self.state.arch.byte_width, key=self.variable_key_prefix + (addr,), inspect=inspect, events=events)
return v.reversed if self.endness == "Iend_LE" else v
else:
return self._uninitialized_read_handler(self, addr, inspect=inspect, events=events)
def _translate_addr(self, a): #pylint:disable=no-self-use
"""
Resolves this address.
"""
if isinstance(a, claripy.ast.Base) and not a.singlevalued:
raise SimFastMemoryError("address not supported")
return self.state.se.eval(a)
def _translate_data(self, d): #pylint:disable=no-self-use
"""
Checks whether this data can be supported by FastMemory."
"""
return d
def _translate_size(self, s): #pylint:disable=no-self-use
"""
Checks whether this size can be supported by FastMemory."
"""
if isinstance(s, claripy.ast.Base) and not s.singlevalued:
raise SimFastMemoryError("size not supported")
if s is None:
return s
return self.state.se.eval(s)
def _translate_cond(self, c): #pylint:disable=no-self-use
"""
Checks whether this condition can be supported by FastMemory."
"""
if isinstance(c, claripy.ast.Base) and not c.singlevalued:
raise SimFastMemoryError("size not supported")
if c is None:
return True
else:
return self.state.se.eval_upto(c, 1)[0]
def _resolve_access(self, addr, size):
"""
Resolves a memory access of a certain size. Returns a sequence of the bases, offsets, and sizes of the accesses required
to fulfil this.
"""
# if we fit in one word
first_offset = addr % self.width
first_base = addr - first_offset
if first_offset + size <= self.width:
return [ (first_base, first_offset, size) ]
last_size = (addr + size) % self.width
last_base = addr + size - last_size
accesses = [ ]
accesses.append((first_base, first_offset, self.width - first_offset))
accesses.extend((a, 0, self.width) for a in range(first_base+self.width, last_base, self.width))
if last_size != 0:
accesses.append((last_base, 0, last_size))
return accesses
def _single_load(self, addr, offset, size, inspect=True, events=True):
"""
Performs a single load.
"""
try:
d = self._contents[addr]
except KeyError:
d = self._handle_uninitialized_read(addr, inspect=inspect, events=events)
self._contents[addr] = d
if offset == 0 and size == self.width:
return d
else:
return d.get_bytes(offset, size)
def _single_store(self, addr, offset, size, data):
"""
Performs a single store.
"""
if offset == 0 and size == self.width:
self._contents[addr] = data
elif offset == 0:
cur = self._single_load(addr, size, self.width - size)
self._contents[addr] = data.concat(cur)
elif offset + size == self.width:
cur = self._single_load(addr, 0, offset)
self._contents[addr] = cur.concat(data)
else:
cur = self._single_load(addr, 0, self.width)
start = cur.get_bytes(0, offset)
end = cur.get_bytes(offset+size, self.width-offset-size)
self._contents[addr] = start.concat(data, end)
def _store(self, req):
data = self._translate_data(req.data) if self._translate_cond(req.condition) else self._translate_data(req.fallback)
if data is None:
l.debug("Received false condition. Returning.")
req.completed = False
req.actual_addresses = [ req.addr ]
return
if req.endness == "Iend_LE" or (req.endness is None and self.endness == "Iend_LE"):
data = data.reversed
addr = self._translate_addr(req.addr)
size = self._translate_addr(req.size) if req.size is not None else data.length/self.state.arch.byte_width
#
# simplify
#
if (self.category == 'mem' and options.SIMPLIFY_MEMORY_WRITES in self.state.options) or \
(self.category == 'reg' and options.SIMPLIFY_REGISTER_WRITES in self.state.options):
data = self.state.se.simplify(data)
accesses = self._resolve_access(addr, size)
if len(accesses) == 1:
# simple case
a,o,s = accesses[0]
self._single_store(a, o, s, data)
else:
cur_offset = 0
for a,o,s in accesses:
portion = data.get_bytes(cur_offset, s)
cur_offset += s
self._single_store(a, o, s, portion)
# fill out the request
req.completed = True
req.actual_addresses = [ req.addr ]
req.stored_values = [ data ]
return req
def _load(self, addr, size, condition=None, fallback=None,
inspect=True, events=True, ret_on_segv=False):
if not self._translate_cond(condition):
l.debug("Received false condition. Returning fallback.")
return fallback
addr = self._translate_addr(addr)
size = self._translate_addr(size)
accesses = self._resolve_access(addr, size)
if len(accesses) == 1:
a,o,s = accesses[0]
return [addr], self._single_load(a, o, s, inspect=inspect, events=events), []
else:
return [addr], claripy.Concat(*[self._single_load(a, o, s) for a,o,s in accesses]), []
def _find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1):
raise SimFastMemoryError("find unsupported")
def _copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None):
raise SimFastMemoryError("copy unsupported")
def copy(self):
return SimFastMemory(
endness=self.endness,
contents=dict(self._contents),
width=self.width,
uninitialized_read_handler=self._uninitialized_read_handler,
memory_id=self.id
)
def changed_bytes(self, other):
"""
Gets the set of changed bytes between self and other.
"""
changes = set()
l.warning("FastMemory.changed_bytes(): This implementation is very slow and only for debug purposes.")
for addr,v in self._contents.iteritems():
for i in range(self.width):
other_byte = other.load(addr+i, 1)
our_byte = v.get_byte(i)
if other_byte is our_byte:
changes.add(addr+i)
return changes
from .. import sim_options as options
| bsd-2-clause | -5,351,947,909,110,622,000 | 37.28821 | 171 | 0.591811 | false |
JohnLunzer/flexx | flexx/event/__init__.py | 1 | 11458 | """
The event module provides a simple system for properties and events,
to let different components of an application react to each-other and
to user input.
In short:
* The :class:`HasEvents <flexx.event.HasEvents>` class provides objects
that have properties and can emit events.
* There are three decorators to create :func:`properties <flexx.event.prop>`,
:func:`readonlies <flexx.event.readonly>` and
:func:`emitters <flexx.event.emitter>`.
* There is a decorator to :func:`connect <flexx.event.connect>` a method
to an event.
Event
-----
An event is something that has occurred at a certain moment in time,
such as the mouse being pressed down or a property changing its value.
In this framework events are represented with dictionary objects that
provide information about the event (such as what button was pressed,
or the old and new value of a property). A custom :class:`Dict <flexx.event.Dict>`
class is used that inherits from ``dict`` but allows attribute access,
e.g. ``ev.button`` as an alternative to ``ev['button']``.
The HasEvents class
-------------------
The :class:`HasEvents <flexx.event.HasEvents>` class provides a base
class for objects that have properties and/or emit events. E.g. a
``flexx.ui.Widget`` inherits from ``flexx.app.Model``, which inherits
from ``flexx.event.HasEvents``.
Events are emitted using the :func:`emit() <flexx.event.HasEvents.emit>`
method, which accepts a name for the type of the event, and optionally a dict,
e.g. ``emitter.emit('mouse_down', dict(button=1, x=103, y=211))``.
The HasEvents object will add two attributes to the event: ``source``,
a reference to the HasEvents object itself, and ``type``, a string
indicating the type of the event.
As a user, you generally do not need to emit events explicitly; events are
automatically emitted, e.g. when setting a property.
Handler
-------
A handler is an object that can handle events. Handlers can be created
using the :func:`connect <flexx.event.connect>` decorator:
.. code-block:: python
from flexx import event
class MyObject(event.HasEvents):
@event.connect('foo')
def handle_foo(self, *events):
print(events)
ob = MyObject()
ob.emit('foo', dict(value=42)) # will invoke handle_foo()
This example demonstrates a few concepts. Firstly, the handler is
connected via a *connection-string* that specifies the type of the
event; in this case the handler is connected to the event-type "foo"
of the object. This connection-string can also be a path, e.g.
"sub.subsub.event_type". This allows for some powerful mechanics, as
discussed in the section on dynamism.
One can also see that the handler function accepts ``*events`` argument.
This is because handlers can be passed zero or more events. If a handler
is called manually (e.g. ``ob.handle_foo()``) it will have zero events.
When called by the event system, it will have at least 1 event. When
e.g. a property is set twice, the handler function is called
just once, with multiple events, in the next event loop iteration. It
is up to the programmer to determine whether only one action is
required, or whether all events need processing. In the latter case,
just use ``for ev in events: ...``.
In most cases, you will connect to events that are known beforehand,
like those they correspond to properties, readonlies and emitters.
If you connect to an event that is not known (as in the example above)
it might be a typo and Flexx will display a warning. Use `'!foo'` as a
connection string (i.e. prepend an exclamation mark) to suppress such
warnings.
Another useful feature of the event system is that a handler can connect to
multiple events at once:
.. code-block:: python
class MyObject(event.HasEvents):
@event.connect('foo', 'bar')
def handle_foo_and_bar(self, *events):
print(events)
To create a handler from a normal function, use the
:func:`HasEvents.connect() <flexx.event.HasEvents.connect>` method:
.. code-block:: python
h = event.HasEvents()
# Using a decorator
@h.connect('foo', 'bar')
def handle_func1(self, *events):
print(events)
# Explicit notation
def handle_func2(self, *events):
print(events)
h.connect(handle_func2, 'foo', 'bar')
Event emitters
--------------
Apart from using :func:`emit() <flexx.event.HasEvents.emit>` there are
certain attributes of ``HasEvents`` instances that generate events.
Properties
==========
Settable properties can be created easiliy using the
:func:`prop <flexx.event.prop>` decorator:
.. code-block:: python
class MyObject(event.HasEvents):
@event.prop
def foo(self, v=0):
''' This is a float indicating bla bla ...
'''
return float(v)
The function that is decorated is essentially the setter function, and
should have one argument (the new value for the property), which can
have a default value (representing the initial value). The function
body is used to validate and normalize the provided input. In this case
the input is simply cast to a float. The docstring of the function will
be the docstring of the property (e.g. for Sphynx docs).
An alternative initial value for a property can be provided upon instantiation:
.. code-block:: python
m = MyObject(foo=3)
Readonly
========
Readonly properties are created with the
:func:`readonly <flexx.event.readonly>` decorator. The value of a
readonly property can be set internally using the
:func:`_set_prop() <flexx.event.HasEvents._set_prop>` method:.
.. code-block:: python
class MyObject(event.HasEvents):
@event.readonly
def foo(self, v=0):
''' This is a float indicating bla.
'''
return float(v)
def _somewhere(self):
self._set_prop('foo', 42)
Emitter
=======
Emitter attributes make it easy to generate events, and function as a
placeholder to document events on a class. They are created with the
:func:`emitter <flexx.event.emitter>` decorator.
.. code-block:: python
class MyObject(event.HasEvents):
@event.emitter
def mouse_down(self, js_event):
''' Event emitted when the mouse is pressed down.
'''
return dict(button=js_event.button)
Emitters can have any number of arguments and should return a dictionary,
which will get emitted as an event, with the event type matching the name
of the emitter.
Labels
------
Labels are a feature that makes it possible to infuence the order by
which event handlers are called, and provide a means to disconnect
specific (groups of) handlers. The label is part of the connection
string: 'foo.bar:label'.
.. code-block:: python
class MyObject(event.HasEvents):
@event.connect('foo')
def given_foo_handler(*events):
...
@event.connect('foo:aa')
def my_foo_handler(*events):
# This one is called first: 'aa' < 'given_f...'
...
When an event is emitted, the event is added to the pending events of
the handlers in the order of a key, which is the label if present, and
otherwise the name of the handler. Note that this does not guarantee
the order in case a handler has multiple connections: a handler can be
scheduled to handle its events due to another event, and a handler
always handles all its pending events at once.
The label can also be used in the
:func:`disconnect() <flexx.event.HasEvents.disconnect>` method:
.. code-block:: python
@h.connect('foo:mylabel')
def handle_foo(*events):
...
...
h.disconnect('foo:mylabel') # don't need reference to handle_foo
Dynamism
--------
Dynamism is a concept that allows one to connect to events for which
the source can change. For the following example, assume that ``Node``
is a ``HasEvents`` subclass that has properties ``parent`` and
``children``.
.. code-block:: python
main = Node()
main.parent = Node()
main.children = Node(), Node()
@main.connect('parent.foo')
def parent_foo_handler(*events):
...
@main.connect('children*.foo')
def children_foo_handler(*events):
...
The ``parent_foo_handler`` gets invoked when the "foo" event gets
emitted on the parent of main. Similarly, the ``children_foo_handler``
gets invoked when any of the children emits its "foo" event. Note that
in some cases you might also want to connect to changes of the ``parent``
or ``children`` property itself.
The event system automatically reconnects handlers when necessary. This
concept makes it very easy to connect to the right events without the
need for a lot of boilerplate code.
Note that the above example would also work if ``parent`` would be a
regular attribute instead of a property, but the handler would not be
automatically reconnected when it changed.
Patterns
--------
This event system is quite flexible and designed to cover the needs
of a variety of event/messaging mechanisms. This section discusses
how this system relates to some common patterns, and how these can be
implemented.
Observer pattern
================
The idea of the observer pattern is that observers keep track (the state
of) of an object, and that object is agnostic about what it's tracked by.
For example, in a music player, instead of writing code to update the
window-title inside the function that starts a song, there would be a
concept of a "current song", and the window would listen for changes to
the current song to update the title when it changes.
In ``flexx.event``, a ``HasEvents`` object keeps track of its observers
(handlers) and notifies them when there are changes. In our music player
example, there would be a property "current_song", and a handler to
take action when it changes.
As is common in the observer pattern, the handlers keep track of the
handlers that they observe. Therefore both handlers and ``HasEvents``
objects have a ``dispose()`` method for cleaning up.
Signals and slots
=================
The Qt GUI toolkit makes use of a mechanism called "signals and slots" as
an easy way to connect different components of an application. In
``flexx.event`` signals translate to readonly properties, and slots to
the handlers that connect to them.
Overloadable event handlers
===========================
In Qt, the "event system" consists of methods that handles an event, which
can be overloaded in subclasses to handle an event differently. In
``flexx.event``, handlers can similarly be re-implemented in subclasses,
and these can call the original handler using ``super()`` if needed.
Publish-subscribe pattern
==========================
In pub-sub, publishers generate messages identified by a 'topic', and
subscribers can subscribe to such topics. There can be zero or more publishers
and zero or more subscribers to any topic.
In ``flexx.event`` a `HasEvents` object can play the role of a broker.
Publishers can simply emit events. The event type represents the message
topic. Subscribers are represented by handlers.
"""
import logging
logger = logging.getLogger(__name__)
del logging
# flake8: noqa
from ._dict import Dict
from ._loop import loop
from ._handler import Handler, connect
from ._emitters import prop, readonly, emitter
from ._hasevents import HasEvents
# from ._hasevents import new_type, with_metaclass
| bsd-2-clause | 7,249,233,369,221,251,000 | 32.211594 | 82 | 0.710159 | false |
timothycrosley/thedom | thedom/document.py | 1 | 6794 | '''
Document.py
Provides elements that define the html document being served to the client-side
Copyright (C) 2015 Timothy Edmund Crosley
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from . import Base, Factory
from .MethodUtils import CallBack
from .MultiplePythonSupport import *
from .Resources import ResourceFile
Factory = Factory.Factory("Document")
DOCTYPE_XHTML_TRANSITIONAL = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">')
DOCTYPE_XHTML_STRICT = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">')
DOCTYPE_XHTML_FRAMESET = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">')
DOCTYPE_HTML4_TRANSITIONAL = ('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" '
'"http://www.w3.org/TR/REC-html40/loose.dtd">')
DOCTYPE_HTML4_STRICT = ('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"'
'"http://www.w3.org/TR/html4/strict.dtd">')
DOCTYPE_HTML4_FRAMESET = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">')
DOCTYPE_HTML5 = "<!DOCTYPE html>"
class MetaData(Base.Node):
"""
A webelement implementation of the meta tag
"""
__slots__ = ()
tagName = "meta"
displayable = False
properties = Base.Node.properties.copy()
properties['value'] = {'action':'setValue'}
properties['name'] = {'action':'setName'}
properties['http-equiv'] = {'action':'attribute'}
def _create(self, id=None, name=None, parent=None, **kwargs):
Base.Node._create(self)
def value(self):
"""
Returns the meta tags value
"""
return self.attributes.get('content')
def setValue(self, value):
"""
Sets the meta tags value
"""
self.attributes['content'] = value
def getName(self):
"""
Returns the name of the meta tag
"""
return self.name
def setName(self, name):
"""
Sets the name of the meta tag
"""
self.name = name
def shown(self):
"""
Meta tags are never visible
"""
return False
Factory.addProduct(MetaData)
class HTTPHeader(MetaData):
"""
A webelement that represents an http header meta tag
"""
__slots__ = ()
def getName(self):
"""
Returns the headers name
"""
return self.attributes.get('http-equiv')
def setName(self, name):
"""
Sets the headers name
"""
self.attributes['http-equiv'] = name
Factory.addProduct(HTTPHeader)
class Document(Base.Node):
"""
A Node representation of the overall document that fills a single page
"""
__slots__ = ('head', 'body', 'title', 'contentType')
doctype = DOCTYPE_HTML5
tagName = "html"
properties = Base.Node.properties.copy()
properties['doctype'] = {'action':'classAttribute'}
properties['title'] = {'action':'title.setText'}
properties['contentType'] = {'action':'contentType.setValue'}
properties['xmlns'] = {'action':'attribute'}
class Head(Base.Node):
"""
Documents Head
"""
tagName = "head"
class Body(Base.Node):
"""
Documents Body
"""
tagName = "body"
class Title(Base.Node):
"""
Documents Title
"""
tagName = "title"
def _create(self, id=None, name=None, parent=None, **kwargs):
Base.Node._create(self, id=id, name=name, parent=parent)
self._textNode = self.add(Base.TextNode())
def setText(self, text):
"""
Sets the document title
"""
self._textNode.setText(text)
def text(self):
"""
Returns the document title
"""
return self._textNode.text(text)
def _create(self, id=None, name=None, parent=None, **kwargs):
Base.Node._create(self)
self.head = self.add(self.Head())
self.body = self.add(self.Body())
self.title = self.head.add(self.Title())
self.contentType = self.addHeader('Content-Type', 'text/html; charset=UTF-8')
def addMetaData(self, name=None, value="", **kwargs):
"""
Will add a meta tag based on name+value pair
"""
metaTag = self.head.add(MetaData(**kwargs))
metaTag.setName(name)
metaTag.setValue(value)
return metaTag
def addHeader(self, name, value):
"""
Will add an HTTP header pair based on name + value pair
"""
header = self.head.add(HTTPHeader())
header.setName(name)
header.setValue(value)
return header
def toHTML(self, formatted=False, *args, **kwargs):
"""
Overrides toHTML to include the doctype definition before the open tag.
"""
return self.doctype + "\n" + Base.Node.toHTML(self, formatted, *args, **kwargs)
def add(self, childElement, ensureUnique=True):
"""
Overrides add to place header elements and resources in the head
and all others in the body.
"""
if type(childElement) in [self.Head, self.Body]:
return Base.Node.add(self, childElement, ensureUnique)
elif type(childElement) == ResourceFile or childElement._tagName in ['title', 'base', 'link',
'meta', 'script', 'style']:
return self.head.add(childElement, ensureUnique)
else:
return self.body.add(childElement, ensureUnique)
Head = Document.Head
Body = Document.Body
Title = Document.Title
Factory.addProduct(Document)
| gpl-2.0 | 7,022,688,719,562,299,000 | 31.507177 | 104 | 0.584781 | false |
jiyfeng/RSTParser | model.py | 1 | 3945 | ## model.py
## Author: Yangfeng Ji
## Date: 09-09-2014
## Time-stamp: <yangfeng 11/05/2014 20:44:25>
## Last changed: umashanthi 11/19/2014
""" As a parsing model, it includes the following functions
1, Mini-batch training on the data generated by the Data class
2, Shift-Reduce RST parsing for a given text sequence
3, Save/load parsing model
"""
from sklearn.svm import LinearSVC
from cPickle import load, dump
from parser import SRParser
from feature import FeatureGenerator
from tree import RSTTree
from util import *
from datastructure import ActionError
import gzip, sys
import numpy as np
class ParsingModel(object):
def __init__(self, vocab=None, idxlabelmap=None, clf=None):
""" Initialization
:type vocab: dict
:param vocab: mappint from feature templates to feature indices
:type idxrelamap: dict
:param idxrelamap: mapping from parsing action indices to
parsing actions
:type clf: LinearSVC
:param clf: an multiclass classifier from sklearn
"""
self.vocab = vocab
# print labelmap
self.labelmap = idxlabelmap
if clf is None:
self.clf = LinearSVC()
def train(self, trnM, trnL):
""" Perform batch-learning on parsing model
"""
self.clf.fit(trnM, trnL)
def predict(self, features):
""" Predict parsing actions for a given set
of features
:type features: list
:param features: feature list generated by
FeatureGenerator
"""
vec = vectorize(features, self.vocab)
predicted_output = self.clf.decision_function(vec)
idxs = np.argsort(predicted_output[0])[::-1]
possible_labels = []
for index in idxs:
possible_labels.append(self.labelmap[index])
return possible_labels
def savemodel(self, fname):
""" Save model and vocab
"""
if not fname.endswith('.gz'):
fname += '.gz'
D = {'clf':self.clf, 'vocab':self.vocab,
'idxlabelmap':self.labelmap}
with gzip.open(fname, 'w') as fout:
dump(D, fout)
print 'Save model into file: {}'.format(fname)
def loadmodel(self, fname):
""" Load model
"""
with gzip.open(fname, 'r') as fin:
D = load(fin)
self.clf = D['clf']
self.vocab = D['vocab']
self.labelmap = D['idxlabelmap']
print 'Load model from file: {}'.format(fname)
def sr_parse(self, texts):
""" Shift-reduce RST parsing based on model prediction
:type texts: list of string
:param texts: list of EDUs for parsing
"""
# Initialize parser
srparser = SRParser([],[])
srparser.init(texts)
# Parsing
while not srparser.endparsing():
# Generate features
stack, queue = srparser.getstatus()
# Make sure call the generator with
# same arguments as in data generation part
fg = FeatureGenerator(stack, queue)
features = fg.features()
labels = self.predict(features)
# Enumerate through all possible actions ranked based on predcition scores
for i,label in enumerate(labels):
action = label2action(label)
try:
srparser.operate(action)
break # if legal action, end the loop
except ActionError:
if i < len(labels): # if not a legal action, try the next possible action
continue
else:
print "Parsing action error with {}".format(action)
sys.exit()
tree = srparser.getparsetree()
rst = RSTTree(tree=tree)
return rst
| mit | -4,449,401,833,782,390,000 | 30.56 | 93 | 0.570089 | false |
sherpaman/MolToolPy | bin/hbond_stat.py | 1 | 1064 | #!/usr/bin/env python
from sys import argv,stderr
#Prende in input il nome di un file che contiene, i dati di coppie di residui per ogni frame.
#Ogni riga ha il seguente formato:
#frame atom1_id res1_name res1_id atom1_name atom2_id res2_name res2_id atom2_name ...........
#0 8661 T 273 N3 8577 T 271 O2P 0.287049 4.688220
#L'output è un dizionario
#diz[(res1,res2)=frequenza
def group_values(filename):
hbond={}
local={}
resname={}
prev_frame=-1
tot_frame=0
for line in f:
flags=line.split()
frame=int(flags[0])
res1 =int(flags[3])
res2 =int(flags[7])
resname[res1]=flags[2]
resname[res2]=flags[6]
if frame<>prev_frame:
prev_frame=frame
tot_frame+=1
for k in local.keys():
try:
hbond[k]+=1
except KeyError:
hbond[k]=1
local={}
stderr.write("\rframe %d " %(frame))
if res1<=res2:
local[res1,res2]=1
else:
local[res1,res2]=1
stderr.write("\n")
return hbond
| gpl-2.0 | -7,142,410,232,880,668,000 | 23.159091 | 102 | 0.575729 | false |
release-engineering/releng-sop | tests/test_koji_clone_tag.py | 1 | 4609 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests of KojiCloneTagForReleaseMilestone script.
"""
import unittest
import os
import sys
from mock import Mock, patch
DIR = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(DIR, ".."))
from releng_sop.common import Environment, Release # noqa
from releng_sop.koji_clone_tag_for_release_milestone import get_parser, KojiCloneTagForReleaseMilestone # noqa
from tests.common import ParserTestBase # noqa
class TestKojiCloneTag(unittest.TestCase):
"""Tests of methods from KojiCloneTagForReleaseMilestone class."""
env_spec = {
'name': 'default',
'config_data': {
'koji_profile': 'test'
},
'__getitem__': lambda self, item: self.config_data[item]
}
release_spec = {
'name': 'test-release',
'config_data': {
'koji': {
'tag_release': 'test',
'tag_compose': 'test-compose'
}
},
'__getitem__': lambda self, item: self.config_data[item]
}
milestone = "Beta-1.0"
milestone_tag = '{0}-{1}-set'.format(
release_spec['config_data']['koji']['tag_release'],
milestone.lower().split(".")[0])
# Expected details text
details = """Cloning package set for a release milestone
* koji profile: {env[config_data][koji_profile]}
* release_id: {release[name]}
* milestone: {milestone}
* compose tag (source): {release[config_data][koji][tag_compose]}
* milestone tag (target): {milestone_tag}
""".format(env=env_spec, release=release_spec, milestone=milestone, milestone_tag=milestone_tag)
# Expected command
cmd = "koji --profile={profile} clone-tag --verbose {tag_compose} {milestone_tag}".format(
profile=env_spec['config_data']['koji_profile'],
tag_compose=release_spec['config_data']['koji']['tag_compose'],
milestone_tag=milestone_tag).split()
@classmethod
def setUpClass(cls):
"""Set up variables before tests."""
cls.env = Mock(spec_set=list(cls.env_spec.keys()))
cls.env.configure_mock(**cls.env_spec)
cls.release = Mock(spec_set=list(cls.release_spec.keys()))
cls.release.configure_mock(**cls.release_spec)
with patch('releng_sop.koji_clone_tag_for_release_milestone.verify_milestone') as verify_milestone:
verify_milestone.return_value = cls.milestone
cls.clone = KojiCloneTagForReleaseMilestone(cls.env, cls.release, cls.milestone)
def test_details_no_commit(self):
"""Get details, while not commiting."""
actual = self.clone.details()
expected = self.details + "*** TEST MODE ***"
self.assertEqual(actual, expected, self.test_details_no_commit.__doc__)
def test_details_with_commit(self):
"""Get details when commiting."""
actual = self.clone.details(commit=True)
expected = self.details
self.assertEqual(actual, expected, self.test_details_with_commit.__doc__)
def test_get_cmd_no_commit(self):
"""Get command, while not commiting."""
actual = self.clone.get_cmd()
expected = self.cmd + ["--test"]
self.assertEqual(actual, expected, self.test_get_cmd_no_commit.__doc__)
def test_get_cmd_with_commit(self):
"""Get command when commiting."""
actual = self.clone.get_cmd(commit=True)
expected = self.cmd
self.assertEqual(actual, expected, self.test_get_cmd_with_commit.__doc__)
def test_invalid_milestone(self):
"""Test invalid milestone."""
with patch('releng_sop.koji_clone_tag_for_release_milestone.verify_milestone') as verify_milestone:
verify_milestone.side_effect = ValueError
self.assertRaises(ValueError, KojiCloneTagForReleaseMilestone, self.env, self.release, self.milestone)
class TestKojiCloneTagParser(ParserTestBase, unittest.TestCase):
"""Set Arguments and Parser for Test generator."""
ARGUMENTS = {
'envHelp': {
'arg': '--env ENV',
'env_default': ['fedora-24', 'bash'],
'env_set': ['fedora-24', 'bash', "--env", "some_env"],
},
'commitHelp': {
'arg': '--commit',
'commit_default': ['fedora-24', 'bash'],
'commit_set': ['fedora-24', 'bash', "--commit"],
},
'helpReleaseId': {
'arg': 'RELEASE_ID',
},
'helpMilestone': {
'arg': 'MILESTONE',
},
}
PARSER = get_parser()
if __name__ == "__main__":
unittest.main()
| mit | -7,335,552,442,738,275,000 | 33.395522 | 114 | 0.602734 | false |
sebp/scikit-survival | sksurv/preprocessing.py | 1 | 3945 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from .column import encode_categorical
__all__ = ['OneHotEncoder']
def check_columns_exist(actual, expected):
missing_features = expected.difference(actual)
if len(missing_features) != 0:
raise ValueError("%d features are missing from data: %s" % (
len(missing_features), missing_features.tolist()
))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical columns with `M` categories into `M-1` columns according
to the one-hot scheme.
The order of non-categorical columns is preserved, encoded columns are inserted
inplace of the original column.
Parameters
----------
allow_drop : boolean, optional, default: True
Whether to allow dropping categorical columns that only consist
of a single category.
Attributes
----------
feature_names_ : pandas.Index
List of encoded columns.
categories_ : dict
Categories of encoded columns.
encoded_columns_ : list
Name of columns after encoding.
Includes names of non-categorical columns.
"""
def __init__(self, allow_drop=True):
self.allow_drop = allow_drop
def fit(self, X, y=None): # pylint: disable=unused-argument
"""Retrieve categorical columns.
Parameters
----------
X : pandas.DataFrame
Data to encode.
y :
Ignored. For compatibility with Pipeline.
Returns
-------
self : object
Returns self
"""
self.fit_transform(X)
return self
def _encode(self, X, columns_to_encode):
return encode_categorical(X, columns=columns_to_encode, allow_drop=self.allow_drop)
def fit_transform(self, X, y=None, **fit_params): # pylint: disable=unused-argument
"""Convert categorical columns to numeric values.
Parameters
----------
X : pandas.DataFrame
Data to encode.
y :
Ignored. For compatibility with TransformerMixin.
fit_params :
Ignored. For compatibility with TransformerMixin.
Returns
-------
Xt : pandas.DataFrame
Encoded data.
"""
columns_to_encode = X.select_dtypes(include=["object", "category"]).columns
x_dummy = self._encode(X, columns_to_encode)
self.feature_names_ = columns_to_encode
self.categories_ = {k: X[k].cat.categories for k in columns_to_encode}
self.encoded_columns_ = x_dummy.columns
return x_dummy
def transform(self, X):
"""Convert categorical columns to numeric values.
Parameters
----------
X : pandas.DataFrame
Data to encode.
Returns
-------
Xt : pandas.DataFrame
Encoded data.
"""
check_is_fitted(self, "encoded_columns_")
check_columns_exist(X.columns, self.feature_names_)
Xt = X.copy()
for col, cat in self.categories_.items():
Xt[col].cat.set_categories(cat, inplace=True)
new_data = self._encode(Xt, self.feature_names_)
return new_data.loc[:, self.encoded_columns_]
| gpl-3.0 | -6,087,449,575,147,389,000 | 31.073171 | 91 | 0.628897 | false |
mikesname/ehri-collections | ehriportal/portal/migrations/0012_auto__add_field_authority_languages__add_field_authority_scripts.py | 1 | 17152 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Authority.languages'
db.add_column('portal_authority', 'languages', self.gf('jsonfield.fields.JSONField')(default='[]'), keep_default=False)
# Adding field 'Authority.scripts'
db.add_column('portal_authority', 'scripts', self.gf('jsonfield.fields.JSONField')(default='[]'), keep_default=False)
def backwards(self, orm):
# Deleting field 'Authority.languages'
db.delete_column('portal_authority', 'languages')
# Deleting field 'Authority.scripts'
db.delete_column('portal_authority', 'scripts')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'portal.authority': {
'Meta': {'object_name': 'Authority', '_ormbases': ['portal.Resource']},
'dates_of_existence': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'functions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'general_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'institution_responsible_identifier': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'internal_structures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'languages': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}),
'legal_status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'lod': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mandates': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'places': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}),
'revision_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scripts': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'}),
'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type_of_entity': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'portal.collection': {
'Meta': {'object_name': 'Collection', '_ormbases': ['portal.Resource']},
'access_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'accruals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'acquisition': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'alternate_title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'appraisal': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'archival_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'arrangement': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Authority']", 'null': 'True', 'blank': 'True'}),
'edition': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extent_and_medium': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'finding_aids': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'institution_responsible_identifier': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'languages': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}),
'languages_of_description': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}),
'location_of_copies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'location_of_originals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'lod': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'physical_characteristics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'related_units_of_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Repository']"}),
'reproduction_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}),
'revision_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scope_and_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scripts': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}),
'scripts_of_description': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'}),
'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'portal.contact': {
'Meta': {'object_name': 'Contact'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Repository']"}),
'street_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'portal.fuzzydate': {
'Meta': {'object_name': 'FuzzyDate'},
'circa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'date_set'", 'to': "orm['portal.Collection']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'precision': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'start_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'})
},
'portal.othername': {
'Meta': {'object_name': 'OtherName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'portal.place': {
'Meta': {'object_name': 'Place'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"})
},
'portal.property': {
'Meta': {'object_name': 'Property'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'portal.relation': {
'Meta': {'object_name': 'Relation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['portal.Resource']"}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['portal.Resource']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'portal.repository': {
'Meta': {'object_name': 'Repository', '_ormbases': ['portal.Resource']},
'access_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'buildings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'collecting_policies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dates_of_existence': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disabled_access': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'finding_aids': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'functions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'general_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geocultural_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'holdings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'internal_structures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'languages': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}),
'legal_status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'lod': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo': ('portal.thumbs.ImageWithThumbsField', [], {'name': "'logo'", 'sizes': '((100, 100), (300, 300))', 'max_length': '100', 'blank': 'True', 'null': 'True'}),
'maintenance_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'mandates': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'opening_times': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'places': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reproduction_services': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'research_services': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scripts': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'}),
'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type_of_entity': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'portal.resource': {
'Meta': {'object_name': 'Resource'},
'created_on': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'portal.resourceimage': {
'Meta': {'object_name': 'ResourceImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('portal.thumbs.ImageWithThumbsField', [], {'max_length': '100', 'name': "'image'", 'sizes': '((100, 100), (300, 300))'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['portal']
| mit | 2,309,109,972,575,588,000 | 80.67619 | 174 | 0.543027 | false |
jtbattle/wangemu | wvdutil/wvdHandler_base.py | 1 | 4196 | # Purpose: template class for file handler for the wvdutil.py program
# Author: Jim Battle
#
# Version: 1.0, 2018/09/15, JTB
# massive restructuring of the old wvdutil code base
# Version: 1.1, 2021/06/19, JTB
# get rid of bilingualism (aka python2 support);
# convert to inline type hints instead of type hint pragma comments
# Version: 1.2, 2021/06/20, JTB
# declare and use type aliases Sector and SectorList for clarity
from typing import List, Dict, Any, Tuple # pylint: disable=unused-import
from wvdTypes import Sector, SectorList, Options
class WvdHandler_base(object): # pylint: disable=useless-object-inheritance
def __init__(self):
self._errors: List[str] = []
self._warnings: List[str] = []
self._firsterr: int = 0 # which was the first sector with an error
self._firstwarn: int = 0 # which was the first sector with a warning
@staticmethod
def name() -> str:
return 'short description'
@staticmethod
def nameLong() -> str:
# optional: override with longer description if useful
return WvdHandler_base.name()
# return either "P "(rogram) or "D "(ata)
@staticmethod
def fileType() -> str:
return 'D '
# pylint: disable=unused-argument, no-self-use
def checkBlocks(self, blocks: SectorList, opts: Options) -> Dict[str, Any]:
# the opts dictionary can contain these keys:
# 'sector' = <number> -- the absolute address of the first sector
# 'used' = <number> -- the "used" field from the catalog, if it is known
# 'warnlimit' = <number> -- stop when the number of warnings is exceeded
# the return dict contains these keys:
# 'failed' = bool -- True if any errors or warnings
# 'errors' = [str] -- list of error messages
# 'warnings' = [str] -- list of warning messages
# 'lastsec' = <number> -- last valid sector before giving up
return { 'errors':0, 'warnings':0, 'lastsec':0 }
# the bool is True if this is a terminating block
# pylint: disable=unused-argument, no-self-use
def listOneBlock(self, blk: Sector, opts: Options) -> Tuple[bool, List[str]]:
# the opts dictionary can contain these keys:
# 'sector' = <number> -- the absolute address of the first sector
# 'used' = <number> -- the "used" field from the catalog, if it is known
# 'warnlimit' = <number> -- stop when the number of warnings is exceeded
return (True, [])
# if the file type doesn't have context which crosses sectors, then
# the default method will just repeated use listOneBlock
def listBlocks(self, blocks: SectorList, opts: Options) -> List[str]:
# same opts as listOneBlock
listing = []
opt = dict(opts)
for offset, blk in enumerate(blocks):
opt['secnum'] = opts['sector'] + offset
done, morelines = self.listOneBlock(blk, opt)
listing.extend(morelines)
if done: break
return listing
# utilities to be used by derived classes
def clearErrors(self) -> None:
self._errors = []
self._warnings = []
self._firsterr = 0
self._firstwarn = 0
def error(self, secnum: int, text: str) -> None:
if (not self._errors) or (secnum < self._firsterr):
self._firsterr = secnum
self._errors.append(text)
def warning(self, secnum: int, text: str) -> None:
if (not self._warnings) or (secnum < self._firstwarn):
self._firstwarn = secnum
self._warnings.append(text)
def status(self, sec: int, opts: Options) -> Dict[str, Any]:
failed = (len(self._errors) > 0) or (len(self._warnings) > opts['warnlimit'])
if self._errors:
last_good_sector = self._firsterr-1
elif self._warnings:
last_good_sector = self._firstwarn-1
else:
last_good_sector = sec
return { 'failed': failed,
'errors': self._errors,
'warnings': self._warnings,
'lastsec': last_good_sector }
| mit | -7,556,717,784,087,207,000 | 39.346154 | 87 | 0.602717 | false |
endlessm/chromium-browser | build/win/reorder-imports.py | 4 | 4054 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import optparse
import os
import shutil
import subprocess
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'pefile'))
import pefile
def reorder_imports(input_dir, output_dir, architecture):
"""Swap chrome_elf.dll to be the first import of chrome.exe.
Also copy over any related files that might be needed
(pdbs, manifests etc.).
"""
# TODO(thakis): See if there is a reliable way to write the
# correct executable in the first place, so that this script
# only needs to verify that and not write a whole new exe.
input_image = os.path.join(input_dir, 'chrome.exe')
output_image = os.path.join(output_dir, 'chrome.exe')
# pefile mmap()s the whole executable, and then parses parts of
# it into python data structures for ease of processing.
# To write the file again, only the mmap'd data is written back,
# so modifying the parsed python objects generally has no effect.
# However, parsed raw data ends up in pe.Structure instances,
# and these all get serialized back when the file gets written.
# So things that are in a Structure must have their data set
# through the Structure, while other data must bet set through
# the set_bytes_*() methods.
pe = pefile.PE(input_image, fast_load=True)
if architecture == 'x64' or architecture == 'arm64':
assert pe.PE_TYPE == pefile.OPTIONAL_HEADER_MAGIC_PE_PLUS
else:
assert pe.PE_TYPE == pefile.OPTIONAL_HEADER_MAGIC_PE
pe.parse_data_directories(directories=[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']])
found_elf = False
for i, peimport in enumerate(pe.DIRECTORY_ENTRY_IMPORT):
if peimport.dll.lower() == 'chrome_elf.dll':
assert not found_elf, 'only one chrome_elf.dll import expected'
found_elf = True
if i > 0:
swap = pe.DIRECTORY_ENTRY_IMPORT[0]
# Morally we want to swap peimport.struct and swap.struct here,
# but the pe module doesn't expose a public method on Structure
# to get all data of a Structure without explicitly listing all
# field names.
# NB: OriginalFirstThunk and Characteristics are an union both at
# offset 0, handling just one of them is enough.
peimport.struct.OriginalFirstThunk, swap.struct.OriginalFirstThunk = \
swap.struct.OriginalFirstThunk, peimport.struct.OriginalFirstThunk
peimport.struct.TimeDateStamp, swap.struct.TimeDateStamp = \
swap.struct.TimeDateStamp, peimport.struct.TimeDateStamp
peimport.struct.ForwarderChain, swap.struct.ForwarderChain = \
swap.struct.ForwarderChain, peimport.struct.ForwarderChain
peimport.struct.Name, swap.struct.Name = \
swap.struct.Name, peimport.struct.Name
peimport.struct.FirstThunk, swap.struct.FirstThunk = \
swap.struct.FirstThunk, peimport.struct.FirstThunk
assert found_elf, 'chrome_elf.dll import not found'
pe.write(filename=output_image)
for fname in glob.iglob(os.path.join(input_dir, 'chrome.exe.*')):
shutil.copy(fname, os.path.join(output_dir, os.path.basename(fname)))
return 0
def main(argv):
usage = 'reorder_imports.py -i <input_dir> -o <output_dir> -a <target_arch>'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-i', '--input', help='reorder chrome.exe in DIR',
metavar='DIR')
parser.add_option('-o', '--output', help='write new chrome.exe to DIR',
metavar='DIR')
parser.add_option('-a', '--arch', help='architecture of build (optional)',
default='ia32')
opts, args = parser.parse_args()
if not opts.input or not opts.output:
parser.error('Please provide and input and output directory')
return reorder_imports(opts.input, opts.output, opts.arch)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | 5,867,402,285,721,175,000 | 41.229167 | 78 | 0.694869 | false |
qedsoftware/commcare-hq | corehq/apps/callcenter/fixturegenerators.py | 1 | 4228 | from xml.etree import ElementTree
from datetime import datetime
import pytz
from corehq.apps.callcenter.app_parser import get_call_center_config_from_app
from casexml.apps.phone.models import OTARestoreUser
from corehq.util.soft_assert import soft_assert
from corehq.util.timezones.conversions import ServerTime
from dimagi.utils.logging import notify_exception
utc = pytz.utc
def should_sync(domain, last_sync, utcnow=None):
# definitely sync if we haven't synced before
if not last_sync or not last_sync.date:
return True
# utcnow only used in tests to mock other times
utcnow = utcnow or datetime.utcnow()
try:
timezone = domain.get_default_timezone()
except pytz.UnknownTimeZoneError:
timezone = utc
last_sync_utc = last_sync.date
# check if user has already synced today (in local timezone).
# Indicators only change daily.
last_sync_local = ServerTime(last_sync_utc).user_time(timezone).done()
current_date_local = ServerTime(utcnow).user_time(timezone).done()
if current_date_local.date() != last_sync_local.date():
return True
return False
class IndicatorsFixturesProvider(object):
id = 'indicators'
def __call__(self, restore_user, version, last_sync=None, app=None):
assert isinstance(restore_user, OTARestoreUser)
domain = restore_user.project
fixtures = []
if self._should_return_no_fixtures(domain, last_sync):
return fixtures
config = None
if app:
try:
config = get_call_center_config_from_app(app)
except:
notify_exception(None, "Error getting call center config from app", details={
'domain': app.domain,
'app_id': app.get_id
})
if config:
_assert = soft_assert(['skelly_at_dimagi_dot_com'.replace('_at_', '@').replace('_dot_', '.')])
_assert(not config.includes_legacy(), 'Domain still using legacy call center indicators', {
'domain': domain.name,
'config': config.to_json()
})
try:
fixtures.append(gen_fixture(restore_user, restore_user.get_call_center_indicators(config)))
except Exception: # blanket exception catching intended
notify_exception(None, 'problem generating callcenter fixture', details={
'user_id': restore_user.user_id,
'domain': restore_user.domain
})
return fixtures
@staticmethod
def _should_return_no_fixtures(domain, last_sync):
config = domain.call_center_config
return (
not domain or
not (config.fixtures_are_active() and config.config_is_valid()) or
not should_sync(domain, last_sync)
)
indicators_fixture_generator = IndicatorsFixturesProvider()
def gen_fixture(restore_user, indicator_set):
"""
Generate the fixture from the indicator data.
:param user: The user.
:param indicator_set: A subclass of SqlIndicatorSet
"""
"""
Example output:
indicator_set.name = 'demo'
indicator_set.get_data() = {'user_case1': {'indicator_a': 1, 'indicator_b': 2}}
<fixture id="indicators:demo" user_id="...">
<indicators>
<case id="user_case1">
<indicator_a>1</indicator_a>
<indicator_b>2</indicator_2>
</case>
</indicators>
</fixture>
"""
if indicator_set is None:
return []
name = indicator_set.name
data = indicator_set.get_data()
fixture = ElementTree.Element('fixture', attrib={
'id': ':'.join((IndicatorsFixturesProvider.id, name)),
'user_id': restore_user.user_id,
'date': indicator_set.reference_date.isoformat()
})
indicators_node = ElementTree.SubElement(fixture, 'indicators')
for case_id, indicators in data.iteritems():
group = ElementTree.SubElement(indicators_node, 'case', attrib={'id': case_id})
for name, value in indicators.items():
indicator = ElementTree.SubElement(group, name)
indicator.text = str(value)
return fixture
| bsd-3-clause | 1,972,465,104,134,683,000 | 31.274809 | 106 | 0.62228 | false |
mikemhenry/arcade | examples/sprite_tiled_map.py | 1 | 6561 | """
Load a map stored in csv format, as exported by the program 'Tiled.'
Artwork from http://kenney.nl
"""
import arcade
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
# How many pixels to keep as a minimum margin between the character
# and the edge of the screen.
VIEWPORT_MARGIN = 40
RIGHT_MARGIN = 150
# Physics
MOVEMENT_SPEED = 5
JUMP_SPEED = 14
GRAVITY = 0.5
def get_map():
map_file = open("map.csv")
map_array = []
for line in map_file:
line = line.strip()
map_row = line.split(",")
for index, item in enumerate(map_row):
map_row[index] = int(item)
map_array.append(map_row)
return map_array
class MyApplication(arcade.Window):
""" Main application class. """
def __init__(self, width, height):
"""
Initializer
:param width:
:param height:
"""
super().__init__(width, height)
# Sprite lists
self.all_sprites_list = None
self.coin_list = None
# Set up the player
self.score = 0
self.player_sprite = None
self.wall_list = None
self.physics_engine = None
self.view_left = 0
self.view_bottom = 0
self.game_over = False
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.all_sprites_list = arcade.SpriteList()
self.wall_list = arcade.SpriteList()
# Set up the player
self.score = 0
self.player_sprite = arcade.Sprite("images/character.png",
SPRITE_SCALING)
self.player_sprite.center_x = 64
self.player_sprite.center_y = 270
self.all_sprites_list.append(self.player_sprite)
map_array = get_map()
for row_index, row in enumerate(map_array):
for column_index, item in enumerate(row):
if item == -1:
continue
elif item == 0:
wall = arcade.Sprite("images/boxCrate_double.png",
SPRITE_SCALING)
elif item == 1:
wall = arcade.Sprite("images/grassLeft.png",
SPRITE_SCALING)
elif item == 2:
wall = arcade.Sprite("images/grassMid.png",
SPRITE_SCALING)
elif item == 3:
wall = arcade.Sprite("images/grassRight.png",
SPRITE_SCALING)
wall.right = column_index * 64
wall.top = (7 - row_index) * 64
self.all_sprites_list.append(wall)
self.wall_list.append(wall)
self.physics_engine = \
arcade.PhysicsEnginePlatformer(self.player_sprite,
self.wall_list,
gravity_constant=GRAVITY)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
# Set the viewport boundaries
# These numbers set where we have 'scrolled' to.
self.view_left = 0
self.view_bottom = 0
self.game_over = False
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.all_sprites_list.draw()
# Put the text on the screen.
# Adjust the text position based on the viewport so that we don't
# scroll the text too.
distance = self.view_left + self.player_sprite.right
output = "Distance: {}".format(distance)
arcade.draw_text(output, self.view_left + 10, self.view_bottom + 20,
arcade.color.WHITE, 14)
if self.game_over:
output = "Game Over"
arcade.draw_text(output, self.view_left + 200,
self.view_bottom + 200,
arcade.color.WHITE, 30)
def on_key_press(self, key, modifiers):
"""
Called whenever the mouse moves.
"""
if key == arcade.key.UP:
if self.physics_engine.can_jump():
self.player_sprite.change_y = JUMP_SPEED
elif key == arcade.key.LEFT:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player_sprite.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user presses a mouse button.
"""
if key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player_sprite.change_x = 0
def animate(self, delta_time):
""" Movement and game logic """
if self.view_left + self.player_sprite.right >= 5630:
self.game_over = True
# Call update on all sprites (The sprites don't do much in this
# example though.)
if not self.game_over:
self.physics_engine.update()
# --- Manage Scrolling ---
# Track if we need to change the viewport
changed = False
# Scroll left
left_bndry = self.view_left + VIEWPORT_MARGIN
if self.player_sprite.left < left_bndry:
self.view_left -= left_bndry - self.player_sprite.left
changed = True
# Scroll right
right_bndry = self.view_left + SCREEN_WIDTH - RIGHT_MARGIN
if self.player_sprite.right > right_bndry:
self.view_left += self.player_sprite.right - right_bndry
changed = True
# Scroll up
top_bndry = self.view_bottom + SCREEN_HEIGHT - VIEWPORT_MARGIN
if self.player_sprite.top > top_bndry:
self.view_bottom += self.player_sprite.top - top_bndry
changed = True
# Scroll down
bottom_bndry = self.view_bottom + VIEWPORT_MARGIN
if self.player_sprite.bottom < bottom_bndry:
self.view_bottom -= bottom_bndry - self.player_sprite.bottom
changed = True
# If we need to scroll, go ahead and do it.
if changed:
arcade.set_viewport(self.view_left,
SCREEN_WIDTH + self.view_left,
self.view_bottom,
SCREEN_HEIGHT + self.view_bottom)
window = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT)
window.setup()
arcade.run()
| mit | -5,013,089,162,620,566,000 | 30.242857 | 76 | 0.53757 | false |
scottrice/Ice | tests/steam_grid_updater_tests.py | 1 | 2915 |
import os
import tempfile
import unittest
from mockito import *
from pysteam import grid
from pysteam import shortcuts
from ice import model
from ice import roms
from ice import steam_grid_updater
from testinfra import fixtures
class SteamGridUpdaterTests(unittest.TestCase):
def setUp(self):
self.steam_fixture = fixtures.SteamFixture()
self.user_fixture = fixtures.UserFixture(self.steam_fixture)
self.mock_provider = mock()
self.updater = steam_grid_updater.SteamGridUpdater(
self.mock_provider,
)
def tearDown(self):
self.user_fixture.tearDown()
self.steam_fixture.tearDown()
def test_updater_sets_image_if_provider_has_one(self):
rom = model.ROM(name = 'Game1', path = '/Path/to/game1', console = fixtures.consoles.flagged)
shortcut = roms.rom_to_shortcut(rom)
(handle, path) = tempfile.mkstemp('.png')
when(self.mock_provider).image_for_rom(rom).thenReturn(path)
self.assertFalse(grid.has_custom_image(self.user_fixture.get_context(), shortcuts.shortcut_app_id(shortcut)))
self.updater.update_rom_artwork(self.user_fixture.get_context(), rom)
self.assertTrue(grid.has_custom_image(self.user_fixture.get_context(), shortcuts.shortcut_app_id(shortcut)))
os.remove(path)
def test_updater_does_nothing_if_provider_has_no_image(self):
rom = model.ROM(name = 'Game1', path = '/Path/to/game1', console = fixtures.consoles.flagged)
shortcut = roms.rom_to_shortcut(rom)
when(self.mock_provider).image_for_rom(rom).thenReturn(None)
self.assertFalse(grid.has_custom_image(self.user_fixture.get_context(), shortcuts.shortcut_app_id(shortcut)))
self.updater.update_rom_artwork(self.user_fixture.get_context(), rom)
self.assertFalse(grid.has_custom_image(self.user_fixture.get_context(), shortcuts.shortcut_app_id(shortcut)))
def test_updater_keeps_image_if_already_exists(self):
rom = model.ROM(name = 'Game1', path = '/Path/to/game1', console = fixtures.consoles.flagged)
shortcut = roms.rom_to_shortcut(rom)
# Start with a custom image, say a .png
(handle, path) = tempfile.mkstemp('.png')
grid.set_custom_image(self.user_fixture.get_context(), shortcuts.shortcut_app_id(shortcut), path)
os.remove(path)
# Make the provider return a .jpg
(handle, path) = tempfile.mkstemp('.jpg')
when(self.mock_provider).image_for_rom(rom).thenReturn(path)
self.assertTrue(grid.has_custom_image(self.user_fixture.get_context(), shortcuts.shortcut_app_id(shortcut)))
self.updater.update_rom_artwork(self.user_fixture.get_context(), rom)
self.assertTrue(grid.has_custom_image(self.user_fixture.get_context(), shortcuts.shortcut_app_id(shortcut)))
# Ensure that we are still using the .png, not the .jpg
(_, ext) = os.path.splitext(grid.get_custom_image(self.user_fixture.get_context(), shortcuts.shortcut_app_id(shortcut)))
self.assertEqual(ext, '.png')
| mit | 7,511,073,260,580,224,000 | 37.866667 | 124 | 0.723499 | false |
anderspitman/scikit-bio | skbio/sequence/distance.py | 1 | 5233 | """
Sequence distance metrics (:mod:`skbio.sequence.distance`)
==========================================================
.. currentmodule:: skbio.sequence.distance
This module contains functions for computing distances between scikit-bio
``Sequence`` objects. These functions can be used directly or supplied to other
parts of the scikit-bio API that accept a sequence distance metric as input,
such as :meth:`skbio.sequence.Sequence.distance` and
:meth:`skbio.stats.distance.DistanceMatrix.from_iterable`.
Functions
---------
.. autosummary::
:toctree: generated/
hamming
kmer_distance
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import scipy.spatial.distance
import skbio
from skbio.util._decorator import experimental
@experimental(as_of='0.4.2')
def hamming(seq1, seq2):
"""Compute Hamming distance between two sequences.
The Hamming distance between two equal-length sequences is the proportion
of differing characters.
Parameters
----------
seq1, seq2 : Sequence
Sequences to compute Hamming distance between.
Returns
-------
float
Hamming distance between `seq1` and `seq2`.
Raises
------
TypeError
If `seq1` and `seq2` are not ``Sequence`` instances.
TypeError
If `seq1` and `seq2` are not the same type.
ValueError
If `seq1` and `seq2` are not the same length.
See Also
--------
scipy.spatial.distance.hamming
Notes
-----
``np.nan`` will be returned if the sequences do not contain any characters.
This function does not make assumptions about the sequence alphabet in use.
Each sequence object's underlying sequence of characters are used to
compute Hamming distance. Characters that may be considered equivalent in
certain contexts (e.g., `-` and `.` as gap characters) are treated as
distinct characters when computing Hamming distance.
Examples
--------
>>> from skbio import Sequence
>>> from skbio.sequence.distance import hamming
>>> seq1 = Sequence('AGGGTA')
>>> seq2 = Sequence('CGTTTA')
>>> hamming(seq1, seq2)
0.5
"""
_check_seqs(seq1, seq2)
# Hamming requires equal length sequences. We are checking this here
# because the error you would get otherwise is cryptic.
if len(seq1) != len(seq2):
raise ValueError(
"Hamming distance can only be computed between sequences of equal "
"length (%d != %d)" % (len(seq1), len(seq2)))
# scipy throws a RuntimeWarning when computing Hamming distance on length 0
# input.
if not seq1:
distance = np.nan
else:
distance = scipy.spatial.distance.hamming(seq1.values, seq2.values)
return float(distance)
@experimental(as_of='0.4.2-dev')
def kmer_distance(seq1, seq2, k, overlap=True):
"""Compute the kmer distance between a pair of sequences
The kmer distance between two sequences is the fraction of kmers that are
unique to either sequence.
Parameters
----------
seq1, seq2 : Sequence
Sequences to compute kmer distance between.
k : int
The kmer length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
Returns
-------
float
kmer distance between `seq1` and `seq2`.
Raises
------
ValueError
If `k` is less than 1.
TypeError
If `seq1` and `seq2` are not ``Sequence`` instances.
TypeError
If `seq1` and `seq2` are not the same type.
Notes
-----
kmer counts are not incorporated in this distance metric.
``np.nan`` will be returned if there are no kmers defined for the
sequences.
Examples
--------
>>> from skbio import Sequence
>>> seq1 = Sequence('ATCGGCGAT')
>>> seq2 = Sequence('GCAGATGTG')
>>> kmer_distance(seq1, seq2, 3) # doctest: +ELLIPSIS
0.9230769230...
"""
_check_seqs(seq1, seq2)
seq1_kmers = set(map(str, seq1.iter_kmers(k, overlap=overlap)))
seq2_kmers = set(map(str, seq2.iter_kmers(k, overlap=overlap)))
all_kmers = seq1_kmers | seq2_kmers
if not all_kmers:
return np.nan
shared_kmers = seq1_kmers & seq2_kmers
number_unique = len(all_kmers) - len(shared_kmers)
fraction_unique = number_unique / len(all_kmers)
return fraction_unique
def _check_seqs(seq1, seq2):
# Asserts both sequences are skbio.sequence objects
for seq in seq1, seq2:
if not isinstance(seq, skbio.Sequence):
raise TypeError(
"`seq1` and `seq2` must be Sequence instances, not %r"
% type(seq).__name__)
# Asserts sequences have the same type
if type(seq1) is not type(seq2):
raise TypeError(
"Sequences must have matching type. Type %r does not match type %r"
% (type(seq1).__name__, type(seq2).__name__))
| bsd-3-clause | -1,355,334,489,428,729,900 | 28.398876 | 79 | 0.623543 | false |
kennedyshead/home-assistant | homeassistant/components/media_player/__init__.py | 1 | 39109 | """Component to interface with various media players."""
from __future__ import annotations
import asyncio
import base64
import collections
from contextlib import suppress
import datetime as dt
import functools as ft
import hashlib
import logging
import secrets
from typing import final
from urllib.parse import urlparse
from aiohttp import web
from aiohttp.hdrs import CACHE_CONTROL, CONTENT_TYPE
from aiohttp.typedefs import LooseHeaders
import async_timeout
import voluptuous as vol
from yarl import URL
from homeassistant.components import websocket_api
from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView
from homeassistant.components.websocket_api.const import (
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
ERR_UNKNOWN_ERROR,
)
from homeassistant.const import (
HTTP_INTERNAL_SERVER_ERROR,
HTTP_NOT_FOUND,
HTTP_OK,
HTTP_UNAUTHORIZED,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_REPEAT_SET,
SERVICE_SHUFFLE_SET,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_IDLE,
STATE_OFF,
STATE_PLAYING,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
datetime,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.network import get_url
from homeassistant.loader import bind_hass
from .const import (
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_GROUP_MEMBERS,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_EXTRA,
ATTR_MEDIA_PLAYLIST,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_REPEAT,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_SOUND_MODE,
ATTR_SOUND_MODE_LIST,
DOMAIN,
MEDIA_CLASS_DIRECTORY,
REPEAT_MODES,
SERVICE_CLEAR_PLAYLIST,
SERVICE_JOIN,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOUND_MODE,
SERVICE_SELECT_SOURCE,
SERVICE_UNJOIN,
SUPPORT_BROWSE_MEDIA,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_GROUPING,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_REPEAT_SET,
SUPPORT_SEEK,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from .errors import BrowseError
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CACHE_IMAGES = "images"
CACHE_MAXSIZE = "maxsize"
CACHE_LOCK = "lock"
CACHE_URL = "url"
CACHE_CONTENT = "content"
ENTITY_IMAGE_CACHE = {CACHE_IMAGES: collections.OrderedDict(), CACHE_MAXSIZE: 16}
SCAN_INTERVAL = dt.timedelta(seconds=10)
DEVICE_CLASS_TV = "tv"
DEVICE_CLASS_SPEAKER = "speaker"
DEVICE_CLASS_RECEIVER = "receiver"
DEVICE_CLASSES = [DEVICE_CLASS_TV, DEVICE_CLASS_SPEAKER, DEVICE_CLASS_RECEIVER]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = {
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
vol.Optional(ATTR_MEDIA_ENQUEUE): cv.boolean,
vol.Optional(ATTR_MEDIA_EXTRA, default={}): dict,
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_SOUND_MODE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_REPEAT,
]
@bind_hass
def is_on(hass, entity_id=None):
"""
Return true if specified media player entity_id is on.
Check all media player if no entity_id specified.
"""
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(
not hass.states.is_state(entity_id, STATE_OFF) for entity_id in entity_ids
)
def _rename_keys(**keys):
"""Create validator that renames keys.
Necessary because the service schema names do not match the command parameters.
Async friendly.
"""
def rename(value):
for to_key, from_key in keys.items():
if from_key in value:
value[to_key] = value.pop(from_key)
return value
return rename
async def async_setup(hass, config):
"""Track states and offer events for media_players."""
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
hass.components.websocket_api.async_register_command(websocket_handle_thumbnail)
hass.components.websocket_api.async_register_command(websocket_browse_media)
hass.http.register_view(MediaPlayerImageView(component))
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_ON, {}, "async_turn_on", [SUPPORT_TURN_ON]
)
component.async_register_entity_service(
SERVICE_TURN_OFF, {}, "async_turn_off", [SUPPORT_TURN_OFF]
)
component.async_register_entity_service(
SERVICE_TOGGLE, {}, "async_toggle", [SUPPORT_TURN_OFF | SUPPORT_TURN_ON]
)
component.async_register_entity_service(
SERVICE_VOLUME_UP,
{},
"async_volume_up",
[SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_VOLUME_DOWN,
{},
"async_volume_down",
[SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY_PAUSE,
{},
"async_media_play_pause",
[SUPPORT_PLAY | SUPPORT_PAUSE],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY, {}, "async_media_play", [SUPPORT_PLAY]
)
component.async_register_entity_service(
SERVICE_MEDIA_PAUSE, {}, "async_media_pause", [SUPPORT_PAUSE]
)
component.async_register_entity_service(
SERVICE_MEDIA_STOP, {}, "async_media_stop", [SUPPORT_STOP]
)
component.async_register_entity_service(
SERVICE_MEDIA_NEXT_TRACK, {}, "async_media_next_track", [SUPPORT_NEXT_TRACK]
)
component.async_register_entity_service(
SERVICE_MEDIA_PREVIOUS_TRACK,
{},
"async_media_previous_track",
[SUPPORT_PREVIOUS_TRACK],
)
component.async_register_entity_service(
SERVICE_CLEAR_PLAYLIST, {}, "async_clear_playlist", [SUPPORT_CLEAR_PLAYLIST]
)
component.async_register_entity_service(
SERVICE_VOLUME_SET,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float}
),
_rename_keys(volume=ATTR_MEDIA_VOLUME_LEVEL),
),
"async_set_volume_level",
[SUPPORT_VOLUME_SET],
)
component.async_register_entity_service(
SERVICE_VOLUME_MUTE,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean}
),
_rename_keys(mute=ATTR_MEDIA_VOLUME_MUTED),
),
"async_mute_volume",
[SUPPORT_VOLUME_MUTE],
)
component.async_register_entity_service(
SERVICE_MEDIA_SEEK,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_SEEK_POSITION): cv.positive_float}
),
_rename_keys(position=ATTR_MEDIA_SEEK_POSITION),
),
"async_media_seek",
[SUPPORT_SEEK],
)
component.async_register_entity_service(
SERVICE_JOIN,
{vol.Required(ATTR_GROUP_MEMBERS): vol.All(cv.ensure_list, [cv.entity_id])},
"async_join_players",
[SUPPORT_GROUPING],
)
component.async_register_entity_service(
SERVICE_SELECT_SOURCE,
{vol.Required(ATTR_INPUT_SOURCE): cv.string},
"async_select_source",
[SUPPORT_SELECT_SOURCE],
)
component.async_register_entity_service(
SERVICE_SELECT_SOUND_MODE,
{vol.Required(ATTR_SOUND_MODE): cv.string},
"async_select_sound_mode",
[SUPPORT_SELECT_SOUND_MODE],
)
component.async_register_entity_service(
SERVICE_PLAY_MEDIA,
vol.All(
cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA),
_rename_keys(
media_type=ATTR_MEDIA_CONTENT_TYPE,
media_id=ATTR_MEDIA_CONTENT_ID,
enqueue=ATTR_MEDIA_ENQUEUE,
),
),
"async_play_media",
[SUPPORT_PLAY_MEDIA],
)
component.async_register_entity_service(
SERVICE_SHUFFLE_SET,
{vol.Required(ATTR_MEDIA_SHUFFLE): cv.boolean},
"async_set_shuffle",
[SUPPORT_SHUFFLE_SET],
)
component.async_register_entity_service(
SERVICE_UNJOIN, {}, "async_unjoin_player", [SUPPORT_GROUPING]
)
component.async_register_entity_service(
SERVICE_REPEAT_SET,
{vol.Required(ATTR_MEDIA_REPEAT): vol.In(REPEAT_MODES)},
"async_set_repeat",
[SUPPORT_REPEAT_SET],
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class MediaPlayerEntity(Entity):
"""ABC for media player entities."""
_access_token: str | None = None
_attr_app_id: str | None = None
_attr_app_name: str | None = None
_attr_group_members: list[str] | None = None
_attr_is_volume_muted: bool | None = None
_attr_media_album_artist: str | None = None
_attr_media_album_name: str | None = None
_attr_media_artist: str | None = None
_attr_media_channel: str | None = None
_attr_media_content_id: str | None = None
_attr_media_content_type: str | None = None
_attr_media_duration: int | None = None
_attr_media_episode: str | None = None
_attr_media_image_hash: str | None
_attr_media_image_remotely_accessible: bool = False
_attr_media_image_url: str | None = None
_attr_media_playlist: str | None = None
_attr_media_position_updated_at: dt.datetime | None = None
_attr_media_position: int | None = None
_attr_media_season: str | None = None
_attr_media_series_title: str | None = None
_attr_media_title: str | None = None
_attr_media_track: int | None = None
_attr_repeat: str | None = None
_attr_shuffle: bool | None = None
_attr_sound_mode_list: list[str] | None = None
_attr_sound_mode: str | None = None
_attr_source_list: list[str] | None = None
_attr_source: str | None = None
_attr_state: str | None = None
_attr_supported_features: int = 0
_attr_volume_level: float | None = None
# Implement these for your media player
@property
def state(self) -> str | None:
"""State of the player."""
return self._attr_state
@property
def access_token(self) -> str:
"""Access token for this media player."""
if self._access_token is None:
self._access_token = secrets.token_hex(32)
return self._access_token
@property
def volume_level(self) -> float | None:
"""Volume level of the media player (0..1)."""
return self._attr_volume_level
@property
def is_volume_muted(self) -> bool | None:
"""Boolean if volume is currently muted."""
return self._attr_is_volume_muted
@property
def media_content_id(self) -> str | None:
"""Content ID of current playing media."""
return self._attr_media_content_id
@property
def media_content_type(self) -> str | None:
"""Content type of current playing media."""
return self._attr_media_content_type
@property
def media_duration(self) -> int | None:
"""Duration of current playing media in seconds."""
return self._attr_media_duration
@property
def media_position(self) -> int | None:
"""Position of current playing media in seconds."""
return self._attr_media_position
@property
def media_position_updated_at(self) -> dt.datetime | None:
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self._attr_media_position_updated_at
@property
def media_image_url(self) -> str | None:
"""Image url of current playing media."""
return self._attr_media_image_url
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return self._attr_media_image_remotely_accessible
@property
def media_image_hash(self) -> str | None:
"""Hash value for media image."""
if hasattr(self, "_attr_media_image_hash"):
return self._attr_media_image_hash
url = self.media_image_url
if url is not None:
return hashlib.sha256(url.encode("utf-8")).hexdigest()[:16]
return None
async def async_get_media_image(self):
"""Fetch media image of current playing image."""
url = self.media_image_url
if url is None:
return None, None
return await self._async_fetch_image_from_cache(url)
async def async_get_browse_image(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> tuple[str | None, str | None]:
"""
Optionally fetch internally accessible image for media browser.
Must be implemented by integration.
"""
return None, None
@property
def media_title(self) -> str | None:
"""Title of current playing media."""
return self._attr_media_title
@property
def media_artist(self) -> str | None:
"""Artist of current playing media, music track only."""
return self._attr_media_artist
@property
def media_album_name(self) -> str | None:
"""Album name of current playing media, music track only."""
return self._attr_media_album_name
@property
def media_album_artist(self) -> str | None:
"""Album artist of current playing media, music track only."""
return self._attr_media_album_artist
@property
def media_track(self) -> int | None:
"""Track number of current playing media, music track only."""
return self._attr_media_track
@property
def media_series_title(self) -> str | None:
"""Title of series of current playing media, TV show only."""
return self._attr_media_series_title
@property
def media_season(self) -> str | None:
"""Season of current playing media, TV show only."""
return self._attr_media_season
@property
def media_episode(self) -> str | None:
"""Episode of current playing media, TV show only."""
return self._attr_media_episode
@property
def media_channel(self) -> str | None:
"""Channel currently playing."""
return self._attr_media_channel
@property
def media_playlist(self) -> str | None:
"""Title of Playlist currently playing."""
return self._attr_media_playlist
@property
def app_id(self) -> str | None:
"""ID of the current running app."""
return self._attr_app_id
@property
def app_name(self) -> str | None:
"""Name of the current running app."""
return self._attr_app_name
@property
def source(self) -> str | None:
"""Name of the current input source."""
return self._attr_source
@property
def source_list(self) -> list[str] | None:
"""List of available input sources."""
return self._attr_source_list
@property
def sound_mode(self) -> str | None:
"""Name of the current sound mode."""
return self._attr_sound_mode
@property
def sound_mode_list(self) -> list[str] | None:
"""List of available sound modes."""
return self._attr_sound_mode_list
@property
def shuffle(self) -> bool | None:
"""Boolean if shuffle is enabled."""
return self._attr_shuffle
@property
def repeat(self) -> str | None:
"""Return current repeat mode."""
return self._attr_repeat
@property
def group_members(self) -> list[str] | None:
"""List of members which are currently grouped together."""
return self._attr_group_members
@property
def supported_features(self) -> int:
"""Flag media player features that are supported."""
return self._attr_supported_features
def turn_on(self):
"""Turn the media player on."""
raise NotImplementedError()
async def async_turn_on(self):
"""Turn the media player on."""
await self.hass.async_add_executor_job(self.turn_on)
def turn_off(self):
"""Turn the media player off."""
raise NotImplementedError()
async def async_turn_off(self):
"""Turn the media player off."""
await self.hass.async_add_executor_job(self.turn_off)
def mute_volume(self, mute):
"""Mute the volume."""
raise NotImplementedError()
async def async_mute_volume(self, mute):
"""Mute the volume."""
await self.hass.async_add_executor_job(self.mute_volume, mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self.hass.async_add_executor_job(self.set_volume_level, volume)
def media_play(self):
"""Send play command."""
raise NotImplementedError()
async def async_media_play(self):
"""Send play command."""
await self.hass.async_add_executor_job(self.media_play)
def media_pause(self):
"""Send pause command."""
raise NotImplementedError()
async def async_media_pause(self):
"""Send pause command."""
await self.hass.async_add_executor_job(self.media_pause)
def media_stop(self):
"""Send stop command."""
raise NotImplementedError()
async def async_media_stop(self):
"""Send stop command."""
await self.hass.async_add_executor_job(self.media_stop)
def media_previous_track(self):
"""Send previous track command."""
raise NotImplementedError()
async def async_media_previous_track(self):
"""Send previous track command."""
await self.hass.async_add_executor_job(self.media_previous_track)
def media_next_track(self):
"""Send next track command."""
raise NotImplementedError()
async def async_media_next_track(self):
"""Send next track command."""
await self.hass.async_add_executor_job(self.media_next_track)
def media_seek(self, position):
"""Send seek command."""
raise NotImplementedError()
async def async_media_seek(self, position):
"""Send seek command."""
await self.hass.async_add_executor_job(self.media_seek, position)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
raise NotImplementedError()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
await self.hass.async_add_executor_job(
ft.partial(self.play_media, media_type, media_id, **kwargs)
)
def select_source(self, source):
"""Select input source."""
raise NotImplementedError()
async def async_select_source(self, source):
"""Select input source."""
await self.hass.async_add_executor_job(self.select_source, source)
def select_sound_mode(self, sound_mode):
"""Select sound mode."""
raise NotImplementedError()
async def async_select_sound_mode(self, sound_mode):
"""Select sound mode."""
await self.hass.async_add_executor_job(self.select_sound_mode, sound_mode)
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
async def async_clear_playlist(self):
"""Clear players playlist."""
await self.hass.async_add_executor_job(self.clear_playlist)
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
raise NotImplementedError()
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self.hass.async_add_executor_job(self.set_shuffle, shuffle)
def set_repeat(self, repeat):
"""Set repeat mode."""
raise NotImplementedError()
async def async_set_repeat(self, repeat):
"""Set repeat mode."""
await self.hass.async_add_executor_job(self.set_repeat, repeat)
# No need to overwrite these.
@property
def support_play(self):
"""Boolean if play is supported."""
return bool(self.supported_features & SUPPORT_PLAY)
@property
def support_pause(self):
"""Boolean if pause is supported."""
return bool(self.supported_features & SUPPORT_PAUSE)
@property
def support_stop(self):
"""Boolean if stop is supported."""
return bool(self.supported_features & SUPPORT_STOP)
@property
def support_seek(self):
"""Boolean if seek is supported."""
return bool(self.supported_features & SUPPORT_SEEK)
@property
def support_volume_set(self):
"""Boolean if setting volume is supported."""
return bool(self.supported_features & SUPPORT_VOLUME_SET)
@property
def support_volume_mute(self):
"""Boolean if muting volume is supported."""
return bool(self.supported_features & SUPPORT_VOLUME_MUTE)
@property
def support_previous_track(self):
"""Boolean if previous track command supported."""
return bool(self.supported_features & SUPPORT_PREVIOUS_TRACK)
@property
def support_next_track(self):
"""Boolean if next track command supported."""
return bool(self.supported_features & SUPPORT_NEXT_TRACK)
@property
def support_play_media(self):
"""Boolean if play media command supported."""
return bool(self.supported_features & SUPPORT_PLAY_MEDIA)
@property
def support_select_source(self):
"""Boolean if select source command supported."""
return bool(self.supported_features & SUPPORT_SELECT_SOURCE)
@property
def support_select_sound_mode(self):
"""Boolean if select sound mode command supported."""
return bool(self.supported_features & SUPPORT_SELECT_SOUND_MODE)
@property
def support_clear_playlist(self):
"""Boolean if clear playlist command supported."""
return bool(self.supported_features & SUPPORT_CLEAR_PLAYLIST)
@property
def support_shuffle_set(self):
"""Boolean if shuffle is supported."""
return bool(self.supported_features & SUPPORT_SHUFFLE_SET)
@property
def support_grouping(self):
"""Boolean if player grouping is supported."""
return bool(self.supported_features & SUPPORT_GROUPING)
async def async_toggle(self):
"""Toggle the power on the media player."""
if hasattr(self, "toggle"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.toggle)
return
if self.state in [STATE_OFF, STATE_IDLE]:
await self.async_turn_on()
else:
await self.async_turn_off()
async def async_volume_up(self):
"""Turn volume up for media player.
This method is a coroutine.
"""
if hasattr(self, "volume_up"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.volume_up)
return
if self.volume_level < 1 and self.supported_features & SUPPORT_VOLUME_SET:
await self.async_set_volume_level(min(1, self.volume_level + 0.1))
async def async_volume_down(self):
"""Turn volume down for media player.
This method is a coroutine.
"""
if hasattr(self, "volume_down"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.volume_down)
return
if self.volume_level > 0 and self.supported_features & SUPPORT_VOLUME_SET:
await self.async_set_volume_level(max(0, self.volume_level - 0.1))
async def async_media_play_pause(self):
"""Play or pause the media player."""
if hasattr(self, "media_play_pause"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.media_play_pause)
return
if self.state == STATE_PLAYING:
await self.async_media_pause()
else:
await self.async_media_play()
@property
def entity_picture(self):
"""Return image of the media playing."""
if self.state == STATE_OFF:
return None
if self.media_image_remotely_accessible:
return self.media_image_url
return self.media_image_local
@property
def media_image_local(self):
"""Return local url to media image."""
image_hash = self.media_image_hash
if image_hash is None:
return None
return (
f"/api/media_player_proxy/{self.entity_id}?"
f"token={self.access_token}&cache={image_hash}"
)
@property
def capability_attributes(self):
"""Return capability attributes."""
supported_features = self.supported_features or 0
data = {}
if supported_features & SUPPORT_SELECT_SOURCE:
source_list = self.source_list
if source_list:
data[ATTR_INPUT_SOURCE_LIST] = source_list
if supported_features & SUPPORT_SELECT_SOUND_MODE:
sound_mode_list = self.sound_mode_list
if sound_mode_list:
data[ATTR_SOUND_MODE_LIST] = sound_mode_list
return data
@final
@property
def state_attributes(self):
"""Return the state attributes."""
if self.state == STATE_OFF:
return None
state_attr = {}
for attr in ATTR_TO_PROPERTY:
value = getattr(self, attr)
if value is not None:
state_attr[attr] = value
if self.media_image_remotely_accessible:
state_attr["entity_picture_local"] = self.media_image_local
if self.support_grouping:
state_attr[ATTR_GROUP_MEMBERS] = self.group_members
return state_attr
async def async_browse_media(
self,
media_content_type: str | None = None,
media_content_id: str | None = None,
) -> BrowseMedia:
"""Return a BrowseMedia instance.
The BrowseMedia instance will be used by the
"media_player/browse_media" websocket command.
"""
raise NotImplementedError()
def join_players(self, group_members):
"""Join `group_members` as a player group with the current player."""
raise NotImplementedError()
async def async_join_players(self, group_members):
"""Join `group_members` as a player group with the current player."""
await self.hass.async_add_executor_job(self.join_players, group_members)
def unjoin_player(self):
"""Remove this player from any group."""
raise NotImplementedError()
async def async_unjoin_player(self):
"""Remove this player from any group."""
await self.hass.async_add_executor_job(self.unjoin_player)
async def _async_fetch_image_from_cache(self, url):
"""Fetch image.
Images are cached in memory (the images are typically 10-100kB in size).
"""
cache_images = ENTITY_IMAGE_CACHE[CACHE_IMAGES]
cache_maxsize = ENTITY_IMAGE_CACHE[CACHE_MAXSIZE]
if urlparse(url).hostname is None:
url = f"{get_url(self.hass)}{url}"
if url not in cache_images:
cache_images[url] = {CACHE_LOCK: asyncio.Lock()}
async with cache_images[url][CACHE_LOCK]:
if CACHE_CONTENT in cache_images[url]:
return cache_images[url][CACHE_CONTENT]
(content, content_type) = await self._async_fetch_image(url)
async with cache_images[url][CACHE_LOCK]:
cache_images[url][CACHE_CONTENT] = content, content_type
while len(cache_images) > cache_maxsize:
cache_images.popitem(last=False)
return content, content_type
async def _async_fetch_image(self, url):
"""Retrieve an image."""
content, content_type = (None, None)
websession = async_get_clientsession(self.hass)
with suppress(asyncio.TimeoutError), async_timeout.timeout(10):
response = await websession.get(url)
if response.status == HTTP_OK:
content = await response.read()
content_type = response.headers.get(CONTENT_TYPE)
if content_type:
content_type = content_type.split(";")[0]
if content is None:
_LOGGER.warning("Error retrieving proxied image from %s", url)
return content, content_type
def get_browse_image_url(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> str:
"""Generate an url for a media browser image."""
url_path = (
f"/api/media_player_proxy/{self.entity_id}/browse_media"
f"/{media_content_type}/{media_content_id}"
)
url_query = {"token": self.access_token}
if media_image_id:
url_query["media_image_id"] = media_image_id
return str(URL(url_path).with_query(url_query))
class MediaPlayerImageView(HomeAssistantView):
"""Media player view to serve an image."""
requires_auth = False
url = "/api/media_player_proxy/{entity_id}"
name = "api:media_player:image"
extra_urls = [
url + "/browse_media/{media_content_type}/{media_content_id}",
]
def __init__(self, component):
"""Initialize a media player view."""
self.component = component
async def get(
self,
request: web.Request,
entity_id: str,
media_content_type: str | None = None,
media_content_id: str | None = None,
) -> web.Response:
"""Start a get request."""
player = self.component.get_entity(entity_id)
if player is None:
status = HTTP_NOT_FOUND if request[KEY_AUTHENTICATED] else HTTP_UNAUTHORIZED
return web.Response(status=status)
authenticated = (
request[KEY_AUTHENTICATED]
or request.query.get("token") == player.access_token
)
if not authenticated:
return web.Response(status=HTTP_UNAUTHORIZED)
if media_content_type and media_content_id:
media_image_id = request.query.get("media_image_id")
data, content_type = await player.async_get_browse_image(
media_content_type, media_content_id, media_image_id
)
else:
data, content_type = await player.async_get_media_image()
if data is None:
return web.Response(status=HTTP_INTERNAL_SERVER_ERROR)
headers: LooseHeaders = {CACHE_CONTROL: "max-age=3600"}
return web.Response(body=data, content_type=content_type, headers=headers)
@websocket_api.websocket_command(
{
vol.Required("type"): "media_player_thumbnail",
vol.Required("entity_id"): cv.entity_id,
}
)
@websocket_api.async_response
async def websocket_handle_thumbnail(hass, connection, msg):
"""Handle get media player cover command.
Async friendly.
"""
component = hass.data[DOMAIN]
player = component.get_entity(msg["entity_id"])
if player is None:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
_LOGGER.warning(
"The websocket command media_player_thumbnail is deprecated. Use /api/media_player_proxy instead"
)
data, content_type = await player.async_get_media_image()
if data is None:
connection.send_message(
websocket_api.error_message(
msg["id"], "thumbnail_fetch_failed", "Failed to fetch thumbnail"
)
)
return
await connection.send_big_result(
msg["id"],
{
"content_type": content_type,
"content": base64.b64encode(data).decode("utf-8"),
},
)
@websocket_api.websocket_command(
{
vol.Required("type"): "media_player/browse_media",
vol.Required("entity_id"): cv.entity_id,
vol.Inclusive(
ATTR_MEDIA_CONTENT_TYPE,
"media_ids",
"media_content_type and media_content_id must be provided together",
): str,
vol.Inclusive(
ATTR_MEDIA_CONTENT_ID,
"media_ids",
"media_content_type and media_content_id must be provided together",
): str,
}
)
@websocket_api.async_response
async def websocket_browse_media(hass, connection, msg):
"""
Browse media available to the media_player entity.
To use, media_player integrations can implement MediaPlayerEntity.async_browse_media()
"""
component = hass.data[DOMAIN]
player: MediaPlayerDevice | None = component.get_entity(msg["entity_id"])
if player is None:
connection.send_error(msg["id"], "entity_not_found", "Entity not found")
return
if not player.supported_features & SUPPORT_BROWSE_MEDIA:
connection.send_message(
websocket_api.error_message(
msg["id"], ERR_NOT_SUPPORTED, "Player does not support browsing media"
)
)
return
media_content_type = msg.get(ATTR_MEDIA_CONTENT_TYPE)
media_content_id = msg.get(ATTR_MEDIA_CONTENT_ID)
try:
payload = await player.async_browse_media(media_content_type, media_content_id)
except NotImplementedError:
_LOGGER.error(
"%s allows media browsing but its integration (%s) does not",
player.entity_id,
player.platform.platform_name,
)
connection.send_message(
websocket_api.error_message(
msg["id"],
ERR_NOT_SUPPORTED,
"Integration does not support browsing media",
)
)
return
except BrowseError as err:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_UNKNOWN_ERROR, str(err))
)
return
# For backwards compat
if isinstance(payload, BrowseMedia):
payload = payload.as_dict()
else:
_LOGGER.warning("Browse Media should use new BrowseMedia class")
connection.send_result(msg["id"], payload)
class MediaPlayerDevice(MediaPlayerEntity):
"""ABC for media player devices (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"MediaPlayerDevice is deprecated, modify %s to extend MediaPlayerEntity",
cls.__name__,
)
class BrowseMedia:
"""Represent a browsable media file."""
def __init__(
self,
*,
media_class: str,
media_content_id: str,
media_content_type: str,
title: str,
can_play: bool,
can_expand: bool,
children: list[BrowseMedia] | None = None,
children_media_class: str | None = None,
thumbnail: str | None = None,
) -> None:
"""Initialize browse media item."""
self.media_class = media_class
self.media_content_id = media_content_id
self.media_content_type = media_content_type
self.title = title
self.can_play = can_play
self.can_expand = can_expand
self.children = children
self.children_media_class = children_media_class
self.thumbnail = thumbnail
def as_dict(self, *, parent: bool = True) -> dict:
"""Convert Media class to browse media dictionary."""
if self.children_media_class is None:
self.calculate_children_class()
response = {
"title": self.title,
"media_class": self.media_class,
"media_content_type": self.media_content_type,
"media_content_id": self.media_content_id,
"can_play": self.can_play,
"can_expand": self.can_expand,
"children_media_class": self.children_media_class,
"thumbnail": self.thumbnail,
}
if not parent:
return response
if self.children:
response["children"] = [
child.as_dict(parent=False) for child in self.children
]
else:
response["children"] = []
return response
def calculate_children_class(self) -> None:
"""Count the children media classes and calculate the correct class."""
if self.children is None or len(self.children) == 0:
return
self.children_media_class = MEDIA_CLASS_DIRECTORY
proposed_class = self.children[0].media_class
if all(child.media_class == proposed_class for child in self.children):
self.children_media_class = proposed_class
| apache-2.0 | 7,419,448,609,892,268,000 | 30.31225 | 105 | 0.620727 | false |
Bdanilko/EdPy | src/lib/program.py | 1 | 21793 | #!/usr/bin/env python2
# * **************************************************************** **
# File: program.py
# Requires: Python 2.7+ (but not Python 3.0+)
# Note: For history, changes and dates for this file, consult git.
# Author: Brian Danilko, Likeable Software ([email protected])
# Copyright 2015-2017 Microbric Pty Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License (in the doc/licenses directory)
# for more details.
#
# * **************************************************************** */
""" Module contains Objects that represent the Ed.Py program """
from __future__ import print_function
from __future__ import absolute_import
class EdPyError(Exception):
def __init__(self):
pass
class ParseError(EdPyError):
def __init__(self, rawmsg=""):
self.rawmsg = rawmsg
class OptError(EdPyError):
def __init__(self, rawmsg=""):
self.rawmsg = rawmsg
class CompileError(EdPyError):
def __init__(self, rawmsg=""):
self.rawmsg = rawmsg
class AssemblerError(EdPyError):
def __init__(self, rawmsg=""):
self.rawmsg = rawmsg
class UnclassifiedError(Exception):
def __init__(self, rawmsg):
self.rawmsg = rawmsg
class Marker(object):
"""Mark each source line (but not worrying about column number)"""
def __init__(self, line, col=None):
self.kind = "Marker"
self.line = line
self.col = col
def GetValues(self):
return []
def GetTarget(self):
return None
def __repr__(self):
return "<program.Marker source line:{0}>".format(self.line)
class ControlMarker(object):
"""Marks start/else/end of If structures, While loops, For loops
and Boolean Checks (for short-circuit evaluation). This marks a
series of locations that tests can jump to."""
def __init__(self, markerNumber, name, end="start"):
self.kind = "ControlMarker"
self.num = markerNumber
self.name = name # string - type of loop: "If", "While", "For", "Or", "And"
self.end = end # a string - one of "start", "else", "end"
self.CheckData()
def GetNumber(self):
return self.num
def CheckData(self):
if (self.name not in ("If", "While", "For", "Or", "And")):
raise UnclassifiedError("Invalid program.ControlMarker() name.")
if (self.end not in ("start", "else", "end")):
raise UnclassifiedError("Invalid program.ControlMarker() end.")
def GetValues(self):
return []
def GetTarget(self):
return None
def __repr__(self):
msg = "<program.ControlMarker marker:{0} {1} {2}>".format(self.num, self.name, self.end)
return msg
class LoopControl(object):
"""Used at the top of If and While loops (where a test needs to be evaluated).
The markerNumber is the same as used in ControlMarkers, so jumps to locations
marked by the corresponding ControlMarker will be done."""
def __init__(self, markerNumber, name=None, test=None):
self.kind = "LoopControl"
self.num = markerNumber
self.name = name # a string "If", "While"
self.test = test # a Value object. if evaluates to 0 then False, else True
def GetValues(self):
return [self.test]
def GetTarget(self):
return None
def __repr__(self):
msg = "<program.LoopControl {0}, name:{1}, test:{2}>".format(
self.num, self.name, self.test)
return msg
class LoopModifier(object):
"""Mark, inside ControlMarkers, Breaks and Continues. As the markerNumber
is the same as the corresponding ControlMarker markerNumber, jumps to the
"start" or "end" is easy."""
def __init__(self, markerNumber, name=None):
self.kind = "LoopModifier"
self.num = markerNumber
self.name = name # a string "Pass", "Break", "Continue"
def GetValues(self):
return []
def GetTarget(self):
return None
def __repr__(self):
msg = "<program.LoopModifier {0}, name:{1}>".format(
self.num, self.name)
return msg
class ForControl(object):
"""In a for loop, this will check that arrayValue is still inside
the array. If not a jump to the "end" of the corresponding ControlMarker
will be made."""
def __init__(self, markerNumber, arrayValue=None,
constantLimit=None, currentValue=None):
self.kind = "ForControl"
self.num = markerNumber
self.arrayValue = arrayValue # a value with name and iVariable
self.constantLimit = constantLimit # a value
self.currentValue = currentValue # a value
if ((self.arrayValue is None and self.constantLimit is None) or
(self.arrayValue is not None and self.constantLimit is not None) or
(self.currentValue is None and self.constantLimit is not None) or
(self.currentValue is not None and self.constantLimit is None)):
raise UnclassifiedError("Invalid program.ForControl() condition.")
def IsRange(self):
return self.constantLimit is not None
def IsArray(self):
return self.arrayValue is not None
def GetValues(self):
if (self.IsArray()):
return [self.arrayValue]
else:
return [self.constantLimit, self.currentValue]
def GetTarget(self):
return None
def __repr__(self):
msg = "<program.ForControl {0}, ".format(self.num)
if (self.IsArray()):
msg += "arrayValue:{0}>".format(self.arrayValue)
else:
msg += "constantLimit:{0}, currentValue:{1}>".format(self.constantLimit, self.currentValue)
return msg
class BoolCheck(object):
"""In a BoolOp, there is a need to short-curcuit evaluation on pass (or) or
failure (and). This object is used in each location where a value is
checked, and possible short-curcuit eval. may require a jump to the
"end" of the corresponding ControlMarker"""
def __init__(self, markerNumber, op=None, value=None, target=None):
"""An binary operation on constants or variables, assigned to a variable"""
self.kind = "BoolCheck"
self.num = markerNumber
self.op = op # a string - the boolean op ("Or", "And", "Done")
# Done signifies to put the non-shortcircuit value in target
self.value = value # a Value object which has the left result of the op
self.target = target # a Value object which gets the result on short-circuit
def GetValues(self):
return [self.value]
def GetTarget(self):
return self.target
def __repr__(self):
return "<program.BoolCheck {0} {1} check:{2}, target{3}>".format(
self.num, self.op, self.value, self.target)
class Value(object):
"""Stores an integer variable or constant or string constant, and depending on where it is used
in the other objects, can represent a STORE or a LOAD. Note that for a
STORE, this object can not represent a constant"""
def __init__(self, constant=None, name=None, iConstant=None, iVariable=None,
strConst=None, listConst=None,
tsRef=None, listRef=None, objectRef=None):
self.kind = "Value"
self.name = name # The name of the variable
self.indexConstant = iConstant # if not None, then the value is a slice at this index
self.indexVariable = iVariable
self.constant = constant # if not None, then this is the value (integer)
self.strConst = strConst # if not None, then a string
self.listConst = listConst # if not None, then a list
self.tsRef = tsRef # if not None, then a reference to a tunestring variable
self.listRef = listRef # if not None, then a reference to a list variable
self.objectRef = objectRef # if not None, then a reference to an object variable
self.loopTempStart = 9999 # All temps above this number are loop control temps
# check that the object has been created consistently
if (((self.IsIntConst()) and
((self.name is not None) or self.IsSlice() or
self.IsStrConst() or self.IsListConst() or self.IsRef())) or
((self.IsStrConst()) and
((self.name is not None) or self.IsSlice() or self.IsRef() or
self.IsListConst() or self.IsIntConst())) or
((self.IsListConst()) and
((self.name is not None) or self.IsSlice() or self.IsRef() or
self.IsStrConst() or self.IsIntConst())) or
(self.IsRef() and
((self.name is not None) or self.IsSlice() or
self.IsStrConst() or self.IsListConst() or self.IsIntConst())) or
((self.indexConstant is not None) and (self.indexVariable is not None)) or
((self.indexConstant is not None) and (self.name is None)) or
((self.indexVariable is not None) and (self.name is None)) or
((self.tsRef is not None) and
((self.listRef is not None) or (self.objectRef is not None))) or
((self.listRef is not None) and
((self.tsRef is not None) or (self.objectRef is not None))) or
((self.objectRef is not None) and
((self.listRef is not None) or (self.tsRef is not None)))):
raise UnclassifiedError("Invalid program.Value() constructor arguments")
def IsIntConst(self):
return self.constant is not None
def IsStrConst(self):
return (self.strConst is not None)
def IsListConst(self):
return (self.listConst is not None)
def IsTSRef(self):
return self.tsRef is not None
def IsListRef(self):
return self.listRef is not None
def IsObjRef(self):
return self.objectRef is not None
def IsRef(self):
return self.IsTSRef() or self.IsListRef() or self.IsObjRef()
def IsConstant(self):
return self.IsIntConst() or self.IsStrConst() or self.IsListConst()
def IsSimpleVar(self):
return (not (self.IsConstant() or self.IsSlice() or self.IsRef()))
def IsSlice(self):
return self.indexConstant is not None or self.indexVariable is not None
def IsDotted(self):
if (not self.IsTemp()):
left, sep, right = self.name.partition(self.name)
if (right != ""):
return True
return False
def IsTemp(self):
if self.IsSimpleVar():
if type(self.name) is int:
return True
return False
def IsSimpleTemp(self):
return self.IsTemp() and (self.name < self.loopTempStart)
def IsSliceWithSimpleTempIndex(self):
return (self.IsSlice() and self.indexVariable is not None and
type(self.indexVariable) is int and (self.indexVariable < self.loopTempStart))
def IsSliceWithVarIndex(self):
return self.IsSlice() and self.indexVariable is not None and type(self.indexVariable) is not int
def IsAssignable(self):
return not (self.IsRef() or self.IsConstant())
def UsesValue(self, otherValue):
if (otherValue.IsSimpleVar()):
if ((self.IsSimpleVar() and self.name == otherValue.name) or
(self.IsSlice() and self.indexVariable == otherValue.name)):
return True
elif (otherValue.IsSlice()):
return self == otherValue
return False
def Name(self):
if self.IsConstant():
return "????"
elif not self.IsSlice():
if type(self.name) is int:
return "TEMP-" + str(self.name)
else:
return self.name
elif self.indexConstant is not None:
return self.name + "[" + str(self.indexConstant) + "]"
elif type(self.indexVariable) is int:
return self.name + "[TEMP-" + str(self.indexVariable) + "]"
else:
return self.name + "[" + self.indexVariable + "]"
def __eq__(self, rhs):
return ((self.kind == rhs.kind) and
(self.name == rhs.name) and
(self.indexConstant == rhs.indexConstant) and
(self.indexVariable == rhs.indexVariable) and
(self.constant == rhs.constant) and
(self.strConst == rhs.strConst) and
(self.listConst == rhs.listConst) and
(self.tsRef == rhs.tsRef) and
(self.listRef == rhs.listRef) and
(self.objectRef == rhs.objectRef))
def GetValues(self):
return [self]
def GetTarget(self):
return None
def __repr__(self):
if self.constant is not None:
return "<program.Value const:{0}>".format(self.constant)
elif self.IsStrConst():
return "<program.Value const:\"{0}\">".format(self.strConst)
elif self.IsListConst():
return "<program.Value const:{0}>".format(self.listConst)
elif self.IsTSRef():
return "<program.Value T_Ref:{0}>".format(self.tsRef)
elif self.IsListRef():
return "<program.Value L_Ref:{0}>".format(self.listRef)
elif self.IsObjRef():
return "<program.Value O_Ref:{0}>".format(self.objectRef)
else:
return "<program.Value name:{0}>".format(self.Name())
class UAssign(object):
"""Represent an Unary Op with assignment to a variable (target)"""
def __init__(self, target=None, op=None, operand=None):
"""A unary operation on constants or variables, assigned to a variable"""
self.kind = "UAssign"
self.target = target # a value object
self.operation = op # a unary operation (could be UAdd for identity
self.operand = operand # (used for binary op or unary op) if used then a Value object
def GetValues(self):
if (self.operand is None):
return []
else:
return [self.operand]
def GetTarget(self):
return self.target
def __repr__(self):
msg = "<program.UAssign {0} = ".format(self.target)
msg += "{0} {1}>".format(self.operation, self.operand)
return msg
class BAssign(object):
"""Represent a Binary Op (including logical tests) with assignment to
a variable (target)"""
def __init__(self, target=None, left=None, op=None, right=None):
"""An binary operation on constants or variables, assigned to a variable"""
self.kind = "BAssign"
self.target = target # a value object
self.left = left # a Value object
self.operation = op # binary operation
self.right = right # a Value object
def GetValues(self):
return [self.left, self.right]
def GetTarget(self):
return self.target
def __repr__(self):
msg = "<program.BAssign {0} = ".format(self.target)
msg += "{0} {1} {2}>".format(self.left, self.operation, self.right)
return msg
class Call(object):
"""Calling a function, optionally assigning the result to a variable
(if self.target is not None)."""
def __init__(self, target=None, funcName=None, args=[]):
self.kind = "Call"
self.target = target # a Value object OR CAN BE NONE!
self.funcName = funcName # a String
self.args = args # each arg is a Value object
def GetValues(self):
return self.args
def GetTarget(self):
if (self.target is None):
return None
else:
return self.target
def __repr__(self):
msg = "<program.Call "
if (self.target is not None):
msg += "{0} = ".format(self.target)
msg += "name:{0} with args:{1}>".format(self.funcName, self.args)
return msg
class Return(object):
"""Return an explicit value (an int) or nothing from the function"""
def __init__(self, returnValue=None):
self.kind = "Return"
self.returnValue = returnValue
def IsVoidReturn(self):
return self.returnValue is None
def GetValues(self):
if self.returnValue is None:
return []
else:
return [self.returnValue]
def GetTarget(self):
return None
def __repr__(self):
return "<program.Return {0}>".format(self.returnValue)
# ######## Top level objects ##############################
class Function(object):
def __init__(self, name, internalFunc = False):
self.kind = "Function"
self.name = name
self.docString = ""
self.internalFunction = internalFunc
self.globalAccess = [] # Global variable names can write too
self.localVar = {} # local variable types (including temps)
self.args = []
self.callsTo = [] # functions called from this function
self.maxSimpleTemps = 0 # Number of integer temps needed,
# they will be from 0 - (maxSimpleTemps - 1).
self.body = [] # contains objects of type 'Op', 'Call'
self.returnsValue = False # explicit return with a value
self.returnsNone = False # explicit return but with no value
def __repr__(self):
msg = "<program.Function name:{0}, doc:|{1}|, ".format(
self.name, self.docString)
msg += "args:{0}, lclVars:{1}, glbWriteVars:{2}, maxSimpleTemps:{3}, internal:{4}".format(
self.args, self.localVar, self.globalAccess, self.maxSimpleTemps, self.internalFunction)
return msg + "returnsValue:{0}, calls:{1}, body:{2}>".format(
self.returnsValue, self.callsTo, self.body)
def IsInternalFunction(self):
return self.internalFunction
class Class(object):
def __init__(self, name):
self.kind = "Class"
self.name = name
self.docString = ""
self.funcNames = []
def __repr__(self):
return "<program.Class name:{}, doc:|{}|, funcNames:{}>".format(
self.name, self.docString, self.funcNames)
class Program(object):
def __init__(self):
self.kind = "Program"
self.EdVariables = {}
self.Import = []
mainFunction = Function("__main__")
self.Function = {"__main__": mainFunction}
self.FunctionSigDict = {}
self.EventHandlers = {}
self.globalVar = {}
self.GlobalTypeDict = {}
self.Class = {}
self.indent = 0
def __repr__(self):
return "<program.Program Import:{}, Global:{}, Function:{}, Class:{}>".format(
self.Import, self.globalVar, self.Function, self.Class)
def Print(self, prefix="", *vars):
if (prefix == "" and len(vars) == 0):
print()
else:
if (prefix.startswith('\n')):
print()
prefix = prefix[1:]
indentSpaces = " " * (self.indent)
if (prefix):
print(indentSpaces, prefix, sep='', end='')
else:
print(indentSpaces, end='')
for v in vars:
print(' ', v, sep='', end='')
print()
def Dump(self, filterOutInternals=True):
"""Dump the full program"""
self.Print("Program")
self.Print("\Edison variables:", self.EdVariables)
self.Print("\nImports:", self.Import)
self.Print("\nGlobals:", self.globalVar)
self.Print("\nClasses:", self.Class)
self.Print("\nFunctions:", self.Function.keys())
self.Print("\nFunction Sigs:", self.FunctionSigDict)
self.Print("\nEvent Handlers:", self.EventHandlers)
self.Print("\nFunction Details:")
self.indent += 2
sigsPrinted = []
for i in self.Function:
if (filterOutInternals and self.Function[i].IsInternalFunction()):
continue
self.Print()
f = self.Function[i]
if (f.IsInternalFunction()):
name = "{}-internal".format(i)
else:
name = i
self.Print("", name)
self.indent += 2
self.Print("Args:", f.args)
if (i in self.FunctionSigDict):
sigsPrinted.append(i)
self.Print("Signature:", self.FunctionSigDict[i])
self.Print("Globals can write:", f.globalAccess)
self.Print("Local vars:", f.localVar)
self.Print("Max simple temps:", f.maxSimpleTemps)
self.Print("Functions called:", f.callsTo)
self.indent += 2
for l in f.body:
if (l.kind == "Marker"):
self.Print()
self.Print("", l)
self.indent -= 4
self.indent -= 2
# header = "\nExternal functions:"
# for i in self.FunctionSigDict:
# if (i not in sigsPrinted):
# if header:
# self.Print(header)
# header = None
# self.Print("External function:", i)
# self.indent += 2
# self.Print("Signature:", self.FunctionSigDict[i])
# self.indent -= 2
| gpl-2.0 | -59,243,137,877,478,696 | 33.757576 | 104 | 0.581976 | false |
kevinkellyspacey/standalone-dell-recovery | Dell/recovery_xml.py | 1 | 5532 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# «recovery_xml» - Helper Class for parsing and using a bto.xml
#
# Copyright (C) 2010-2011, Dell Inc.
#
# Author:
# - Mario Limonciello <[email protected]>
#
# This is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this application; if not, write to the Free Software Foundation, Inc., 51
# Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##################################################################################
import xml.dom.minidom
import codecs
import os
import sys
if sys.version >= '3':
text_type = str
binary_type = bytes
else:
text_type = unicode
binary_type = str
def utf8str(old):
if isinstance(old, text_type):
return old
else:
return text_type(binary_type(old), 'utf-8', errors='ignore')
class BTOxml:
def __init__(self):
self.dom = None
self.new = False
self.load_bto_xml()
def set_base(self, name, md5=''):
"""Sets the base image"""
self.replace_node_contents('base', name)
if md5:
self.dom.getElementsByTagName('base')[0].setAttribute('md5', md5)
def append_fish(self, fish_type, name, md5='', srv=''):
"""Appends a fish package"""
elements = self.dom.getElementsByTagName('fish')
new_element = self.dom.createElement(fish_type)
if md5:
new_element.setAttribute('md5', md5)
if srv:
new_element.setAttribute('srv', srv)
new_node = self.dom.createTextNode(name)
new_element.appendChild(new_node)
elements[0].appendChild(new_element)
def fetch_node_contents(self, tag):
"""Fetches all children of a tag"""
elements = self.dom.getElementsByTagName(tag)
values = text_type('')
if len(elements) > 1:
values = []
if elements:
for element in elements:
child = element.firstChild
if child:
if len(elements) > 1:
values.append(child.nodeValue.strip())
else:
values = child.nodeValue.strip()
return values
def replace_node_contents(self, tag, new):
"""Replaces a node contents (that we assume exists)"""
elements = self.dom.getElementsByTagName(tag)
if not elements:
print("Missing elements for tag")
return
if elements[0].hasChildNodes():
for node in elements[0].childNodes:
elements[0].removeChild(node)
noob = self.dom.createTextNode(utf8str(new))
elements[0].appendChild(noob)
def load_bto_xml(self, fname=None):
"""Initialize an XML file into memory"""
def create_top_level(dom):
"""Initializes a top level document"""
element = dom.createElement('bto')
dom.appendChild(element)
return element
def create_tag(dom, tag, append_to):
"""Create a subtag as necessary"""
element = dom.getElementsByTagName(tag)
if element:
element = element[0]
else:
element = dom.createElement(tag)
append_to.appendChild(element)
return element
if fname:
self.new = False
try:
if os.path.exists(fname):
with open(fname, 'rb') as f:
fname = f.read()
self.dom = xml.dom.minidom.parseString(utf8str(fname))
except xml.parsers.expat.ExpatError:
print("Damaged XML file, regenerating")
if not (fname and self.dom):
self.new = True
self.dom = xml.dom.minidom.Document()
#test for top level bto object
if self.dom.firstChild and self.dom.firstChild.localName != 'bto':
self.dom.removeChild(self.dom.firstChild)
if not self.dom.firstChild:
bto = create_top_level(self.dom)
else:
bto = self.dom.getElementsByTagName('bto')[0]
#create all our second and third level tags that are supported
for tag in ['date', 'versions', 'base', 'fid', 'fish', 'logs']:
element = create_tag(self.dom, tag, bto)
subtags = []
if tag == 'versions':
subtags = ['os', 'iso', 'generator', 'bootstrap', 'ubiquity']
elif tag == 'fid':
subtags = ['git_tag', 'deb_archive']
elif tag == 'logs':
subtags = ['syslog', 'debug']
for subtag in subtags:
create_tag(self.dom, subtag, element)
def write_xml(self, fname):
"""Writes out a BTO XML file based on the current data"""
with codecs.open(fname, 'w', 'utf-8') as wfd:
if self.new:
self.dom.writexml(wfd, "", " ", "\n", encoding='utf-8')
else:
self.dom.writexml(wfd, encoding='utf-8')
| gpl-2.0 | -9,137,183,234,520,056,000 | 34.677419 | 82 | 0.565099 | false |
BarrelfishOS/barrelfish | tools/harness/machines/gem5.py | 1 | 5282 | ##########################################################################
# Copyright (c) 2012-2016 ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstr 6, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
# Quirks:
# * this is only running in single-core mode, since bootarm=0 is
# used in above mentioned menu.lst
import os, signal, tempfile, subprocess, shutil, time
import debug, machines
from machines import ARMSimulatorBase, MachineFactory, ARMSimulatorOperations
GEM5_PATH = '/home/netos/tools/gem5/gem5-stable-1804'
# gem5 takes quite a while to come up. If we return right away,
# telnet will be opened too early and fails to connect
#
# SG, 2016-10-07: If this is too high, however, and we have an
# early-boot bug gem5 will exit before telnet connects, and we do
# not get the gem5 output at all
GEM5_START_TIMEOUT = 1 # in seconds
class Gem5MachineBase(ARMSimulatorBase):
imagename = "armv7_a15ve_gem5_image"
def __init__(self, options, operations, **kwargs):
super(Gem5MachineBase, self).__init__(options, operations, **kwargs)
def get_buildall_target(self):
return "VExpressEMM-A15"
def get_boot_timeout(self):
# we set this to 10 mins since gem5 is very slow
return 600
def get_test_timeout(self):
# give gem5 tests enough time to complete: skb initialization takes
# about 10 minutes, so set timeout to 25 minutes.
# RH, 2018-08-08 newer version of gem5 is even slower ...
# increased to 50 mins
return 50 * 60
class Gem5MachineBaseOperations(ARMSimulatorOperations):
def __init__(self, machine):
super(Gem5MachineBaseOperations, self).__init__(machine)
self.simulator_start_timeout = GEM5_START_TIMEOUT
# menu.lst template for gem5 is special
# XXX: current template does not work because gem5 coreboot NYI
self.menulst_template = "menu.lst.armv7_a15ve_gem5"
def get_tftp_dir(self):
if self.tftp_dir is None:
debug.verbose('creating temporary directory for Gem5 files')
self.tftp_dir = tempfile.mkdtemp(prefix='harness_gem5_')
debug.verbose('Gem5 install directory is %s' % self.tftp_dir)
return self.tftp_dir
def reboot(self):
self._kill_child()
cmd = self._get_cmdline()
self.telnet_port = 3456
debug.verbose('starting "%s" in gem5.py:reboot' % ' '.join(cmd))
devnull = open('/dev/null', 'w')
# remove ubuntu chroot from environment to make sure gem5 finds the
# right shared libraries
env = dict(os.environ)
if 'LD_LIBRARY_PATH' in env:
del env['LD_LIBRARY_PATH']
self.child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=devnull, env=env)
time.sleep(GEM5_START_TIMEOUT)
class Gem5MachineARM(Gem5MachineBase):
def __init__(self, options, operations, **kwargs):
super(Gem5MachineARM, self).__init__(options, operations, **kwargs)
def get_bootarch(self):
return 'armv7'
def get_platform(self):
return 'a15ve'
class Gem5MachineARMOperations(Gem5MachineBaseOperations):
def set_bootmodules(self, modules):
# write menu.lst in build directory
debug.verbose("writing menu.lst in build directory")
menulst_fullpath = os.path.join(self._machine.options.builds[0].build_dir,
"platforms", "arm", self.menulst_template)
debug.verbose("writing menu.lst in build directory: %s" %
menulst_fullpath)
self._machine._write_menu_lst(modules.get_menu_data("/"), menulst_fullpath)
debug.verbose("building proper gem5 image")
debug.checkcmd(["make", self._machine.imagename],
cwd=self._machine.options.builds[0].build_dir)
# SK: did not test this yet, but should work
# @machines.add_machine
# class Gem5MachineARMSingleCore(Gem5MachineARM):
# name = 'gem5_arm_1'
# def get_ncores(self):
# return 1
# def _get_cmdline(self):
# script_path = os.path.join(self.options.sourcedir, 'tools/arm_gem5', 'gem5script.py')
# return (['gem5.fast', script_path, '--kernel=%s'%self.kernel_img, '--n=%s'%self.get_ncores()]
# + GEM5_CACHES_ENABLE)
class Gem5MachineARMSingleCore(Gem5MachineARM):
name = 'armv7_gem5'
def __init__(self, options, **kwargs):
super(Gem5MachineARMSingleCore, self).__init__(options, Gem5MachineARMSingleCoreOperations(self), **kwargs)
class Gem5MachineARMSingleCoreOperations(Gem5MachineARMOperations):
def _get_cmdline(self):
self.get_free_port()
script_path = \
os.path.join(self._machine.options.sourcedir, 'tools/arm_gem5',
'boot_gem5.sh')
return ([script_path, 'VExpress_EMM', self._machine.kernel_img, GEM5_PATH,
str(self.telnet_port)])
MachineFactory.addMachine(Gem5MachineARMSingleCore.name, Gem5MachineARMSingleCore,
bootarch="armv7",
platform="a15ve")
| mit | 70,575,520,908,050,710 | 37.554745 | 115 | 0.639152 | false |
sbc100/yapf | yapf/yapflib/format_decision_state.py | 1 | 38486 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a format decision state object that manages whitespace decisions.
Each token is processed one at a time, at which point its whitespace formatting
decisions are made. A graph of potential whitespace formattings is created,
where each node in the graph is a format decision state object. The heuristic
tries formatting the token with and without a newline before it to determine
which one has the least penalty. Therefore, the format decision state object for
each decision needs to be its own unique copy.
Once the heuristic determines the best formatting, it makes a non-dry run pass
through the code to commit the whitespace formatting.
FormatDecisionState: main class exported by this module.
"""
from yapf.yapflib import format_token
from yapf.yapflib import object_state
from yapf.yapflib import split_penalty
from yapf.yapflib import style
from yapf.yapflib import unwrapped_line
class FormatDecisionState(object):
"""The current state when indenting an unwrapped line.
The FormatDecisionState object is meant to be copied instead of referenced.
Attributes:
first_indent: The indent of the first token.
column: The number of used columns in the current line.
next_token: The next token to be formatted.
paren_level: The level of nesting inside (), [], and {}.
lowest_level_on_line: The lowest paren_level on the current line.
newline: Indicates if a newline is added along the edge to this format
decision state node.
previous: The previous format decision state in the decision tree.
stack: A stack (of _ParenState) keeping track of properties applying to
parenthesis levels.
comp_stack: A stack (of ComprehensionState) keeping track of properties
applying to comprehensions.
ignore_stack_for_comparison: Ignore the stack of _ParenState for state
comparison.
"""
def __init__(self, line, first_indent):
"""Initializer.
Initializes to the state after placing the first token from 'line' at
'first_indent'.
Arguments:
line: (UnwrappedLine) The unwrapped line we're currently processing.
first_indent: (int) The indent of the first token.
"""
self.next_token = line.first
self.column = first_indent
self.line = line
self.paren_level = 0
self.lowest_level_on_line = 0
self.ignore_stack_for_comparison = False
self.stack = [_ParenState(first_indent, first_indent)]
self.comp_stack = []
self.first_indent = first_indent
self.newline = False
self.previous = None
self.column_limit = style.Get('COLUMN_LIMIT')
def Clone(self):
"""Clones a FormatDecisionState object."""
new = FormatDecisionState(self.line, self.first_indent)
new.next_token = self.next_token
new.column = self.column
new.line = self.line
new.paren_level = self.paren_level
new.line.depth = self.line.depth
new.lowest_level_on_line = self.lowest_level_on_line
new.ignore_stack_for_comparison = self.ignore_stack_for_comparison
new.first_indent = self.first_indent
new.newline = self.newline
new.previous = self.previous
new.stack = [state.Clone() for state in self.stack]
new.comp_stack = [state.Clone() for state in self.comp_stack]
return new
def __eq__(self, other):
# Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous',
# because it shouldn't have a bearing on this comparison. (I.e., it will
# report equal if 'next_token' does.)
return (self.next_token == other.next_token and
self.column == other.column and
self.paren_level == other.paren_level and
self.line.depth == other.line.depth and
self.lowest_level_on_line == other.lowest_level_on_line and
(self.ignore_stack_for_comparison or
other.ignore_stack_for_comparison or
self.stack == other.stack and self.comp_stack == other.comp_stack))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.next_token, self.column, self.paren_level,
self.line.depth, self.lowest_level_on_line))
def __repr__(self):
return ('column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' %
(self.column, repr(self.next_token), self.paren_level,
'\n\t'.join(repr(s) for s in self.stack) + ']'))
def CanSplit(self, must_split):
"""Determine if we can split before the next token.
Arguments:
must_split: (bool) A newline was required before this token.
Returns:
True if the line can be split before the next token.
"""
current = self.next_token
previous = current.previous_token
if current.is_pseudo_paren:
return False
if (not must_split and
format_token.Subtype.DICTIONARY_KEY_PART in current.subtypes and
format_token.Subtype.DICTIONARY_KEY not in current.subtypes and
not style.Get('ALLOW_MULTILINE_DICTIONARY_KEYS')):
# In some situations, a dictionary may be multiline, but pylint doesn't
# like it. So don't allow it unless forced to.
return False
if (not must_split and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes and
not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')):
return False
if previous and previous.value == '(' and current.value == ')':
# Don't split an empty function call list if we aren't splitting before
# dict values.
token = previous.previous_token
while token:
prev = token.previous_token
if not prev or prev.name not in {'NAME', 'DOT'}:
break
token = token.previous_token
if token and format_token.Subtype.DICTIONARY_VALUE in token.subtypes:
if not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE'):
return False
if previous and previous.value == '.' and current.value == '.':
return False
return current.can_break_before
def MustSplit(self):
"""Returns True if the line must split before the next token."""
current = self.next_token
previous = current.previous_token
if current.is_pseudo_paren:
return False
if current.must_break_before:
return True
if not previous:
return False
if style.Get('SPLIT_ALL_COMMA_SEPARATED_VALUES') and previous.value == ',':
return True
if (self.stack[-1].split_before_closing_bracket and
current.value in '}]' and style.Get('SPLIT_BEFORE_CLOSING_BRACKET')):
# Split before the closing bracket if we can.
return current.node_split_penalty != split_penalty.UNBREAKABLE
if (current.value == ')' and previous.value == ',' and
not _IsSingleElementTuple(current.matching_bracket)):
return True
# Prevent splitting before the first argument in compound statements
# with the exception of function declarations.
if (style.Get('SPLIT_BEFORE_FIRST_ARGUMENT') and
_IsCompoundStatement(self.line.first) and
not _IsFunctionDef(self.line.first)):
return False
###########################################################################
# List Splitting
if (style.Get('DEDENT_CLOSING_BRACKETS') or
style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')):
bracket = current if current.ClosesScope() else previous
if format_token.Subtype.SUBSCRIPT_BRACKET not in bracket.subtypes:
if bracket.OpensScope():
if style.Get('COALESCE_BRACKETS'):
if current.OpensScope():
# Prefer to keep all opening brackets together.
return False
if (not _IsLastScopeInLine(bracket) or
unwrapped_line.IsSurroundedByBrackets(bracket)):
last_token = bracket.matching_bracket
else:
last_token = _LastTokenInLine(bracket.matching_bracket)
if not self._FitsOnLine(bracket, last_token):
# Split before the first element if the whole list can't fit on a
# single line.
self.stack[-1].split_before_closing_bracket = True
return True
elif style.Get('DEDENT_CLOSING_BRACKETS') and current.ClosesScope():
# Split before and dedent the closing bracket.
return self.stack[-1].split_before_closing_bracket
if (style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') and
current.is_name):
# An expression that's surrounded by parens gets split after the opening
# parenthesis.
def SurroundedByParens(token):
"""Check if it's an expression surrounded by parentheses."""
while token:
if token.value == ',':
return False
if token.value == ')':
return not token.next_token
if token.OpensScope():
token = token.matching_bracket.next_token
else:
token = token.next_token
return False
if (previous.value == '(' and not previous.is_pseudo_paren and
not unwrapped_line.IsSurroundedByBrackets(previous)):
pptoken = previous.previous_token
if (pptoken and not pptoken.is_name and not pptoken.is_keyword and
SurroundedByParens(current)):
return True
if (current.is_name or current.is_string) and previous.value == ',':
# If the list has function calls in it and the full list itself cannot
# fit on the line, then we want to split. Otherwise, we'll get something
# like this:
#
# X = [
# Bar(xxx='some string',
# yyy='another long string',
# zzz='a third long string'), Bar(
# xxx='some string',
# yyy='another long string',
# zzz='a third long string')
# ]
#
# or when a string formatting syntax.
func_call_or_string_format = False
tok = current.next_token
if current.is_name:
while tok and (tok.is_name or tok.value == '.'):
tok = tok.next_token
func_call_or_string_format = tok and tok.value == '('
elif current.is_string:
while tok and tok.is_string:
tok = tok.next_token
func_call_or_string_format = tok and tok.value == '%'
if func_call_or_string_format:
open_bracket = unwrapped_line.IsSurroundedByBrackets(current)
if open_bracket:
if open_bracket.value in '[{':
if not self._FitsOnLine(open_bracket,
open_bracket.matching_bracket):
return True
elif tok.value == '(':
if not self._FitsOnLine(current, tok.matching_bracket):
return True
###########################################################################
# Dict/Set Splitting
if (style.Get('EACH_DICT_ENTRY_ON_SEPARATE_LINE') and
format_token.Subtype.DICTIONARY_KEY in current.subtypes and
not current.is_comment):
# Place each dictionary entry onto its own line.
if previous.value == '{' and previous.previous_token:
opening = _GetOpeningBracket(previous.previous_token)
if (opening and opening.value == '(' and opening.previous_token and
opening.previous_token.is_name):
# This is a dictionary that's an argument to a function.
if (self._FitsOnLine(previous, previous.matching_bracket) and
previous.matching_bracket.next_token and
(not opening.matching_bracket.next_token or
opening.matching_bracket.next_token.value != '.') and
_ScopeHasNoCommas(previous)):
# Don't split before the key if:
# - The dictionary fits on a line, and
# - The function call isn't part of a builder-style call and
# - The dictionary has one entry and no trailing comma
return False
return True
if (style.Get('SPLIT_BEFORE_DICT_SET_GENERATOR') and
format_token.Subtype.DICT_SET_GENERATOR in current.subtypes):
# Split before a dict/set generator.
return True
if (format_token.Subtype.DICTIONARY_VALUE in current.subtypes or
(previous.is_pseudo_paren and previous.value == '(' and
not current.is_comment)):
# Split before the dictionary value if we can't fit every dictionary
# entry on its own line.
if not current.OpensScope():
opening = _GetOpeningBracket(current)
if not self._EachDictEntryFitsOnOneLine(opening):
return style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')
if previous.value == '{':
# Split if the dict/set cannot fit on one line and ends in a comma.
closing = previous.matching_bracket
if (not self._FitsOnLine(previous, closing) and
closing.previous_token.value == ','):
self.stack[-1].split_before_closing_bracket = True
return True
###########################################################################
# Argument List Splitting
if (style.Get('SPLIT_BEFORE_NAMED_ASSIGNS') and not current.is_comment and
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in
current.subtypes):
if (previous.value not in {'=', ':', '*', '**'} and
current.value not in ':=,)' and not _IsFunctionDefinition(previous)):
# If we're going to split the lines because of named arguments, then we
# want to split after the opening bracket as well. But not when this is
# part of a function definition.
if previous.value == '(':
# Make sure we don't split after the opening bracket if the
# continuation indent is greater than the opening bracket:
#
# a(
# b=1,
# c=2)
if (self._FitsOnLine(previous, previous.matching_bracket) and
unwrapped_line.IsSurroundedByBrackets(previous)):
# An argument to a function is a function call with named
# assigns.
return False
column = self.column - self.stack[-1].last_space
return column > style.Get('CONTINUATION_INDENT_WIDTH')
opening = _GetOpeningBracket(current)
if opening:
arglist_length = (
opening.matching_bracket.total_length - opening.total_length +
self.stack[-1].indent)
return arglist_length > self.column_limit
if (current.value not in '{)' and previous.value == '(' and
self._ArgumentListHasDictionaryEntry(current)):
return True
if style.Get('SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED'):
# Split before arguments in a function call or definition if the
# arguments are terminated by a comma.
opening = _GetOpeningBracket(current)
if opening and opening.previous_token and opening.previous_token.is_name:
if previous.value in '(,':
if opening.matching_bracket.previous_token.value == ',':
return True
if ((current.is_name or current.value in {'*', '**'}) and
previous.value == ','):
# If we have a function call within an argument list and it won't fit on
# the remaining line, but it will fit on a line by itself, then go ahead
# and split before the call.
opening = _GetOpeningBracket(current)
if (opening and opening.value == '(' and opening.previous_token and
(opening.previous_token.is_name or
opening.previous_token.value in {'*', '**'})):
is_func_call = False
opening = current
while opening:
if opening.value == '(':
is_func_call = True
break
if (not (opening.is_name or opening.value in {'*', '**'}) and
opening.value != '.'):
break
opening = opening.next_token
if is_func_call:
if (not self._FitsOnLine(current, opening.matching_bracket) or
(opening.matching_bracket.next_token and
opening.matching_bracket.next_token.value != ',' and
not opening.matching_bracket.next_token.ClosesScope())):
return True
pprevious = previous.previous_token
if (current.is_name and pprevious and pprevious.is_name and
previous.value == '('):
if (not self._FitsOnLine(previous, previous.matching_bracket) and
_IsFunctionCallWithArguments(current)):
# There is a function call, with more than 1 argument, where the first
# argument is itself a function call with arguments. In this specific
# case, if we split after the first argument's opening '(', then the
# formatting will look bad for the rest of the arguments. E.g.:
#
# outer_function_call(inner_function_call(
# inner_arg1, inner_arg2),
# outer_arg1, outer_arg2)
#
# Instead, enforce a split before that argument to keep things looking
# good.
return True
if (previous.OpensScope() and not current.OpensScope() and
not current.is_comment and
format_token.Subtype.SUBSCRIPT_BRACKET not in previous.subtypes):
if pprevious and not pprevious.is_keyword and not pprevious.is_name:
# We want to split if there's a comment in the container.
token = current
while token != previous.matching_bracket:
if token.is_comment:
return True
token = token.next_token
if previous.value == '(':
pptoken = previous.previous_token
if not pptoken or not pptoken.is_name:
# Split after the opening of a tuple if it doesn't fit on the current
# line and it's not a function call.
if self._FitsOnLine(previous, previous.matching_bracket):
return False
elif not self._FitsOnLine(previous, previous.matching_bracket):
if len(previous.container_elements) == 1:
return False
elements = previous.container_elements + [previous.matching_bracket]
i = 1
while i < len(elements):
if (not elements[i - 1].OpensScope() and
not self._FitsOnLine(elements[i - 1], elements[i])):
return True
i += 1
if (self.column_limit - self.column) / float(self.column_limit) < 0.3:
# Try not to squish all of the arguments off to the right.
return True
else:
# Split after the opening of a container if it doesn't fit on the
# current line.
if not self._FitsOnLine(previous, previous.matching_bracket):
return True
###########################################################################
# Original Formatting Splitting
# These checks rely upon the original formatting. This is in order to
# attempt to keep hand-written code in the same condition as it was before.
# However, this may cause the formatter to fail to be idempotent.
if (style.Get('SPLIT_BEFORE_BITWISE_OPERATOR') and current.value in '&|' and
previous.lineno < current.lineno):
# Retain the split before a bitwise operator.
return True
if (current.is_comment and
previous.lineno < current.lineno - current.value.count('\n')):
# If a comment comes in the middle of an unwrapped line (like an if
# conditional with comments interspersed), then we want to split if the
# original comments were on a separate line.
return True
return False
def AddTokenToState(self, newline, dry_run, must_split=False):
"""Add a token to the format decision state.
Allow the heuristic to try out adding the token with and without a newline.
Later on, the algorithm will determine which one has the lowest penalty.
Arguments:
newline: (bool) Add the token on a new line if True.
dry_run: (bool) Don't commit whitespace changes to the FormatToken if
True.
must_split: (bool) A newline was required before this token.
Returns:
The penalty of splitting after the current token.
"""
penalty = 0
if newline:
penalty = self._AddTokenOnNewline(dry_run, must_split)
else:
self._AddTokenOnCurrentLine(dry_run)
penalty += self._CalculateComprehensionState(newline)
return self.MoveStateToNextToken() + penalty
def _AddTokenOnCurrentLine(self, dry_run):
"""Puts the token on the current line.
Appends the next token to the state and updates information necessary for
indentation.
Arguments:
dry_run: (bool) Commit whitespace changes to the FormatToken if True.
"""
current = self.next_token
previous = current.previous_token
spaces = current.spaces_required_before
if not dry_run:
current.AddWhitespacePrefix(newlines_before=0, spaces=spaces)
if previous.OpensScope():
if not current.is_comment:
# Align closing scopes that are on a newline with the opening scope:
#
# foo = [a,
# b,
# ]
self.stack[-1].closing_scope_indent = self.column - 1
if style.Get('ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'):
self.stack[-1].closing_scope_indent += 1
self.stack[-1].indent = self.column + spaces
else:
self.stack[-1].closing_scope_indent = (
self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH'))
self.column += spaces
def _AddTokenOnNewline(self, dry_run, must_split):
"""Adds a line break and necessary indentation.
Appends the next token to the state and updates information necessary for
indentation.
Arguments:
dry_run: (bool) Don't commit whitespace changes to the FormatToken if
True.
must_split: (bool) A newline was required before this token.
Returns:
The split penalty for splitting after the current state.
"""
current = self.next_token
previous = current.previous_token
self.column = self._GetNewlineColumn()
if not dry_run:
indent_level = self.line.depth
spaces = self.column
if spaces:
spaces -= indent_level * style.Get('INDENT_WIDTH')
current.AddWhitespacePrefix(
newlines_before=1, spaces=spaces, indent_level=indent_level)
if not current.is_comment:
self.stack[-1].last_space = self.column
self.lowest_level_on_line = self.paren_level
if (previous.OpensScope() or
(previous.is_comment and previous.previous_token is not None and
previous.previous_token.OpensScope())):
self.stack[-1].closing_scope_indent = max(
0, self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH'))
self.stack[-1].split_before_closing_bracket = True
# Calculate the split penalty.
penalty = current.split_penalty
if must_split:
# Don't penalize for a must split.
return penalty
if previous.is_pseudo_paren and previous.value == '(':
# Small penalty for splitting after a pseudo paren.
penalty += 50
# Add a penalty for each increasing newline we add, but don't penalize for
# splitting before an if-expression or list comprehension.
if current.value not in {'if', 'for'}:
last = self.stack[-1]
last.num_line_splits += 1
penalty += (
style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') *
last.num_line_splits)
if current.OpensScope() and previous.OpensScope():
# Prefer to keep opening brackets coalesced (unless it's at the beginning
# of a function call).
pprev = previous.previous_token
if not pprev or not pprev.is_name:
penalty += 10
return penalty + 10
def MoveStateToNextToken(self):
"""Calculate format decision state information and move onto the next token.
Before moving onto the next token, we first calculate the format decision
state given the current token and its formatting decisions. Then the format
decision state is set up so that the next token can be added.
Returns:
The penalty for the number of characters over the column limit.
"""
current = self.next_token
if not current.OpensScope() and not current.ClosesScope():
self.lowest_level_on_line = min(self.lowest_level_on_line,
self.paren_level)
# If we encounter an opening bracket, we add a level to our stack to prepare
# for the subsequent tokens.
if current.OpensScope():
last = self.stack[-1]
new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space
self.stack.append(_ParenState(new_indent, self.stack[-1].last_space))
self.paren_level += 1
# If we encounter a closing bracket, we can remove a level from our
# parenthesis stack.
if len(self.stack) > 1 and current.ClosesScope():
if format_token.Subtype.DICTIONARY_KEY_PART in current.subtypes:
self.stack[-2].last_space = self.stack[-2].indent
else:
self.stack[-2].last_space = self.stack[-1].last_space
self.stack.pop()
self.paren_level -= 1
is_multiline_string = current.is_string and '\n' in current.value
if is_multiline_string:
# This is a multiline string. Only look at the first line.
self.column += len(current.value.split('\n')[0])
elif not current.is_pseudo_paren:
self.column += len(current.value)
self.next_token = self.next_token.next_token
# Calculate the penalty for overflowing the column limit.
penalty = 0
if (not current.is_pylint_comment and not current.is_pytype_comment and
self.column > self.column_limit):
excess_characters = self.column - self.column_limit
penalty += style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters
if is_multiline_string:
# If this is a multiline string, the column is actually the
# end of the last line in the string.
self.column = len(current.value.split('\n')[-1])
return penalty
def _CalculateComprehensionState(self, newline):
"""Makes required changes to comprehension state.
Args:
newline: Whether the current token is to be added on a newline.
Returns:
The penalty for the token-newline combination given the current
comprehension state.
"""
current = self.next_token
previous = current.previous_token
top_of_stack = self.comp_stack[-1] if self.comp_stack else None
penalty = 0
if top_of_stack is not None:
# Check if the token terminates the current comprehension.
if current == top_of_stack.closing_bracket:
last = self.comp_stack.pop()
# Lightly penalize comprehensions that are split across multiple lines.
if last.has_interior_split:
penalty += style.Get('SPLIT_PENALTY_COMPREHENSION')
return penalty
if newline:
top_of_stack.has_interior_split = True
if (format_token.Subtype.COMP_EXPR in current.subtypes and
format_token.Subtype.COMP_EXPR not in previous.subtypes):
self.comp_stack.append(object_state.ComprehensionState(current))
return penalty
if (current.value == 'for' and
format_token.Subtype.COMP_FOR in current.subtypes):
if top_of_stack.for_token is not None:
# Treat nested comprehensions like normal comp_if expressions.
# Example:
# my_comp = [
# a.qux + b.qux
# for a in foo
# --> for b in bar <--
# if a.zut + b.zut
# ]
if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and
top_of_stack.has_split_at_for != newline and
(top_of_stack.has_split_at_for or
not top_of_stack.HasTrivialExpr())):
penalty += split_penalty.UNBREAKABLE
else:
top_of_stack.for_token = current
top_of_stack.has_split_at_for = newline
# Try to keep trivial expressions on the same line as the comp_for.
if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and newline and
top_of_stack.HasTrivialExpr()):
penalty += split_penalty.CONNECTED
if (format_token.Subtype.COMP_IF in current.subtypes and
format_token.Subtype.COMP_IF not in previous.subtypes):
# Penalize breaking at comp_if when it doesn't match the newline structure
# in the rest of the comprehension.
if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and
top_of_stack.has_split_at_for != newline and
(top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr())):
penalty += split_penalty.UNBREAKABLE
return penalty
def _GetNewlineColumn(self):
"""Return the new column on the newline."""
current = self.next_token
previous = current.previous_token
top_of_stack = self.stack[-1]
if current.spaces_required_before > 2 or self.line.disable:
return current.spaces_required_before
if current.OpensScope():
return top_of_stack.indent if self.paren_level else self.first_indent
if current.ClosesScope():
if (previous.OpensScope() or
(previous.is_comment and previous.previous_token is not None and
previous.previous_token.OpensScope())):
return max(0,
top_of_stack.indent - style.Get('CONTINUATION_INDENT_WIDTH'))
return top_of_stack.closing_scope_indent
if (previous and previous.is_string and current.is_string and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes):
return previous.column
if style.Get('INDENT_DICTIONARY_VALUE'):
if previous and (previous.value == ':' or previous.is_pseudo_paren):
if format_token.Subtype.DICTIONARY_VALUE in current.subtypes:
return top_of_stack.indent
if (_IsCompoundStatement(self.line.first) and
(not style.Get('DEDENT_CLOSING_BRACKETS') or
style.Get('SPLIT_BEFORE_FIRST_ARGUMENT'))):
token_indent = (
len(self.line.first.whitespace_prefix.split('\n')[-1]) +
style.Get('INDENT_WIDTH'))
if token_indent == top_of_stack.indent:
return top_of_stack.indent + style.Get('CONTINUATION_INDENT_WIDTH')
return top_of_stack.indent
def _FitsOnLine(self, start, end):
"""Determines if line between start and end can fit on the current line."""
length = end.total_length - start.total_length
if not start.is_pseudo_paren:
length += len(start.value)
return length + self.column <= self.column_limit
def _EachDictEntryFitsOnOneLine(self, opening):
"""Determine if each dict elems can fit on one line."""
def PreviousNonCommentToken(tok):
tok = tok.previous_token
while tok.is_comment:
tok = tok.previous_token
return tok
def ImplicitStringConcatenation(tok):
num_strings = 0
if tok.is_pseudo_paren:
tok = tok.next_token
while tok.is_string:
num_strings += 1
tok = tok.next_token
return num_strings > 1
closing = opening.matching_bracket
entry_start = opening.next_token
current = opening.next_token.next_token
while current and current != closing:
if format_token.Subtype.DICTIONARY_KEY in current.subtypes:
prev = PreviousNonCommentToken(current)
length = prev.total_length - entry_start.total_length
length += len(entry_start.value)
if length + self.stack[-2].indent >= self.column_limit:
return False
entry_start = current
if current.OpensScope():
if ((current.value == '{' or
(current.is_pseudo_paren and current.next_token.value == '{') and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes) or
ImplicitStringConcatenation(current)):
# A dictionary entry that cannot fit on a single line shouldn't matter
# to this calculation. If it can't fit on a single line, then the
# opening should be on the same line as the key and the rest on
# newlines after it. But the other entries should be on single lines
# if possible.
if current.matching_bracket:
current = current.matching_bracket
while current:
if current == closing:
return True
if format_token.Subtype.DICTIONARY_KEY in current.subtypes:
entry_start = current
break
current = current.next_token
else:
current = current.matching_bracket
else:
current = current.next_token
# At this point, current is the closing bracket. Go back one to get the the
# end of the dictionary entry.
current = PreviousNonCommentToken(current)
length = current.total_length - entry_start.total_length
length += len(entry_start.value)
return length + self.stack[-2].indent <= self.column_limit
def _ArgumentListHasDictionaryEntry(self, token):
"""Check if the function argument list has a dictionary as an arg."""
if _IsArgumentToFunction(token):
while token:
if token.value == '{':
length = token.matching_bracket.total_length - token.total_length
return length + self.stack[-2].indent > self.column_limit
if token.ClosesScope():
break
if token.OpensScope():
token = token.matching_bracket
token = token.next_token
return False
_COMPOUND_STMTS = frozenset(
{'for', 'while', 'if', 'elif', 'with', 'except', 'def', 'class'})
def _IsCompoundStatement(token):
if token.value == 'async':
token = token.next_token
return token.value in _COMPOUND_STMTS
def _IsFunctionDef(token):
if token.value == 'async':
token = token.next_token
return token.value == 'def'
def _IsFunctionCallWithArguments(token):
while token:
if token.value == '(':
token = token.next_token
return token and token.value != ')'
elif token.name not in {'NAME', 'DOT', 'EQUAL'}:
break
token = token.next_token
return False
def _IsArgumentToFunction(token):
bracket = unwrapped_line.IsSurroundedByBrackets(token)
if not bracket or bracket.value != '(':
return False
previous = bracket.previous_token
return previous and previous.is_name
def _GetLengthOfSubtype(token, subtype, exclude=None):
current = token
while (current.next_token and subtype in current.subtypes and
(exclude is None or exclude not in current.subtypes)):
current = current.next_token
return current.total_length - token.total_length + 1
def _GetOpeningBracket(current):
"""Get the opening bracket containing the current token."""
if current.matching_bracket and not current.is_pseudo_paren:
return current.matching_bracket
while current:
if current.ClosesScope():
current = current.matching_bracket
elif current.is_pseudo_paren:
current = current.previous_token
elif current.OpensScope():
return current
current = current.previous_token
return None
def _LastTokenInLine(current):
while not current.is_comment and current.next_token:
current = current.next_token
return current
def _IsFunctionDefinition(current):
prev = current.previous_token
return (current.value == '(' and prev and
format_token.Subtype.FUNC_DEF in prev.subtypes)
def _IsLastScopeInLine(current):
while current:
current = current.next_token
if current and current.OpensScope():
return False
return True
def _IsSingleElementTuple(token):
"""Check if it's a single-element tuple."""
close = token.matching_bracket
token = token.next_token
num_commas = 0
while token != close:
if token.value == ',':
num_commas += 1
if token.OpensScope():
token = token.matching_bracket
else:
token = token.next_token
return num_commas == 1
def _ScopeHasNoCommas(token):
"""Check if the scope has no commas."""
close = token.matching_bracket
token = token.next_token
while token != close:
if token.value == ',':
return False
if token.OpensScope():
token = token.matching_bracket
else:
token = token.next_token
return True
class _ParenState(object):
"""Maintains the state of the bracket enclosures.
A stack of _ParenState objects are kept so that we know how to indent relative
to the brackets.
Attributes:
indent: The column position to which a specified parenthesis level needs to
be indented.
last_space: The column position of the last space on each level.
split_before_closing_bracket: Whether a newline needs to be inserted before
the closing bracket. We only want to insert a newline before the closing
bracket if there also was a newline after the beginning left bracket.
num_line_splits: Number of line splits this _ParenState contains already.
Each subsequent line split gets an increasing penalty.
"""
# TODO(morbo): This doesn't track "bin packing."
def __init__(self, indent, last_space):
self.indent = indent
self.last_space = last_space
self.closing_scope_indent = 0
self.split_before_closing_bracket = False
self.num_line_splits = 0
def Clone(self):
state = _ParenState(self.indent, self.last_space)
state.closing_scope_indent = self.closing_scope_indent
state.split_before_closing_bracket = self.split_before_closing_bracket
state.num_line_splits = self.num_line_splits
return state
def __repr__(self):
return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % (
self.indent, self.last_space, self.closing_scope_indent)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self == other
def __hash__(self, *args, **kwargs):
return hash((self.indent, self.last_space, self.closing_scope_indent,
self.split_before_closing_bracket, self.num_line_splits))
| apache-2.0 | 1,545,245,249,763,236,600 | 37.06726 | 80 | 0.644364 | false |
demisto/content | Packs/ThinkstCanary/Integrations/ThinkstCanary/ThinkstCanary_test.py | 1 | 3171 | import demistomock as demisto
MOCK_PARAMS = {
'access-key': 'fake_access_key',
'secret-key': 'fake_access_key',
'server': 'http://123-fake-api.com/',
'unsecure': True,
'proxy': True
}
def test_fetch_incidents(mocker, requests_mock):
"""
Given: An existing last run time.
When: Running a fetch incidents command normally (not a first run).
Then: The last run time object should increment by 1 second.
2020-01-07-04:58:18 -> 2020-01-07-04:58:19
"""
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'getLastRun', return_value={'time': '2020-07-01-04:58:18'})
mocker.patch.object(demisto, 'setLastRun')
requests_mock.get('http://123-fake-api.com/api/v1/incidents/unacknowledged?newer_than=2020-07-01-04%3A58%3A18',
json={'incidents': [{'description': {'created': 1593579498}}]})
from ThinkstCanary import fetch_incidents_command
fetch_incidents_command()
assert demisto.setLastRun.call_args[0][0]['time'] == '2020-07-01-04:58:19'
def test_check_whitelist_command_not_whitelisted(mocker):
"""
Given: An IP to check
When: Running check_whitelist_command.
Then: The IP should not be ignored (not in the whitelist).
"""
import ThinkstCanary
ip_to_check = "1.2.3.4"
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'args', return_value={'ip': ip_to_check})
mocker.patch.object(ThinkstCanary, 'check_whitelist', return_value={'is_ip_ignored': False,
'is_whitelist_enabled': True})
ThinkstCanary.check_whitelist_command()
assert demisto.results.call_args_list[0][0][0].get('HumanReadable') == 'The IP address 1.2.3.4:Any is not ' \
'Whitelisted'
def test_check_whitelist_commands_whitelisted(mocker):
"""
Given: An already whitelisted IP to check
When: Inserting IP to whitelist (whitelist_ip_command) and checking if it is whitelisted (check_whitelist_command).
Then: The IP should be ignored (in the whitelist), and an appropriate message to the user should be prompted.
"""
import ThinkstCanary
ip_to_whitelist = "1.2.3.4"
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'args', return_value={'ip': ip_to_whitelist})
mocker.patch.object(ThinkstCanary, 'whitelist_ip', return_value={'message': 'Whitelist added',
'result': 'success'})
mocker.patch.object(ThinkstCanary, 'check_whitelist', return_value={'is_ip_ignored': True,
'is_whitelist_enabled': True})
ThinkstCanary.whitelist_ip_command()
ThinkstCanary.check_whitelist_command()
assert demisto.results.call_args_list[1][0][0].get('HumanReadable') == 'The IP address 1.2.3.4:Any is Whitelisted'
| mit | -3,411,973,804,248,822,300 | 47.784615 | 120 | 0.62567 | false |
rajarsheem/libsdae-autoencoder-tensorflow | deepautoencoder/stacked_autoencoder.py | 1 | 6154 | import numpy as np
import deepautoencoder.utils as utils
import tensorflow as tf
allowed_activations = ['sigmoid', 'tanh', 'softmax', 'relu', 'linear']
allowed_noises = [None, 'gaussian', 'mask']
allowed_losses = ['rmse', 'cross-entropy']
class StackedAutoEncoder:
"""A deep autoencoder with denoising capability"""
def assertions(self):
global allowed_activations, allowed_noises, allowed_losses
assert self.loss in allowed_losses, 'Incorrect loss given'
assert 'list' in str(
type(self.dims)), 'dims must be a list even if there is one layer.'
assert len(self.epoch) == len(
self.dims), "No. of epochs must equal to no. of hidden layers"
assert len(self.activations) == len(
self.dims), "No. of activations must equal to no. of hidden layers"
assert all(
True if x > 0 else False
for x in self.epoch), "No. of epoch must be atleast 1"
assert set(self.activations + allowed_activations) == set(
allowed_activations), "Incorrect activation given."
assert utils.noise_validator(
self.noise, allowed_noises), "Incorrect noise given"
def __init__(self, dims, activations, epoch=1000, noise=None, loss='rmse',
lr=0.001, batch_size=100, print_step=50):
self.print_step = print_step
self.batch_size = batch_size
self.lr = lr
self.loss = loss
self.activations = activations
self.noise = noise
self.epoch = epoch
self.dims = dims
self.assertions()
self.depth = len(dims)
self.weights, self.biases = [], []
def add_noise(self, x):
if self.noise == 'gaussian':
n = np.random.normal(0, 0.1, (len(x), len(x[0])))
return x + n
if 'mask' in self.noise:
frac = float(self.noise.split('-')[1])
temp = np.copy(x)
for i in temp:
n = np.random.choice(len(i), round(
frac * len(i)), replace=False)
i[n] = 0
return temp
if self.noise == 'sp':
pass
def fit(self, x):
for i in range(self.depth):
print('Layer {0}'.format(i + 1))
if self.noise is None:
x = self.run(data_x=x, activation=self.activations[i],
data_x_=x,
hidden_dim=self.dims[i], epoch=self.epoch[
i], loss=self.loss,
batch_size=self.batch_size, lr=self.lr,
print_step=self.print_step)
else:
temp = np.copy(x)
x = self.run(data_x=self.add_noise(temp),
activation=self.activations[i], data_x_=x,
hidden_dim=self.dims[i],
epoch=self.epoch[
i], loss=self.loss,
batch_size=self.batch_size,
lr=self.lr, print_step=self.print_step)
def transform(self, data):
tf.reset_default_graph()
sess = tf.Session()
x = tf.constant(data, dtype=tf.float32)
for w, b, a in zip(self.weights, self.biases, self.activations):
weight = tf.constant(w, dtype=tf.float32)
bias = tf.constant(b, dtype=tf.float32)
layer = tf.matmul(x, weight) + bias
x = self.activate(layer, a)
return x.eval(session=sess)
def fit_transform(self, x):
self.fit(x)
return self.transform(x)
def run(self, data_x, data_x_, hidden_dim, activation, loss, lr,
print_step, epoch, batch_size=100):
tf.reset_default_graph()
input_dim = len(data_x[0])
sess = tf.Session()
x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='x')
x_ = tf.placeholder(dtype=tf.float32, shape=[
None, input_dim], name='x_')
encode = {'weights': tf.Variable(tf.truncated_normal(
[input_dim, hidden_dim], dtype=tf.float32)),
'biases': tf.Variable(tf.truncated_normal([hidden_dim],
dtype=tf.float32))}
decode = {'biases': tf.Variable(tf.truncated_normal([input_dim],
dtype=tf.float32)),
'weights': tf.transpose(encode['weights'])}
encoded = self.activate(
tf.matmul(x, encode['weights']) + encode['biases'], activation)
decoded = tf.matmul(encoded, decode['weights']) + decode['biases']
# reconstruction loss
if loss == 'rmse':
loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(x_, decoded))))
elif loss == 'cross-entropy':
loss = -tf.reduce_mean(x_ * tf.log(decoded))
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
sess.run(tf.global_variables_initializer())
for i in range(epoch):
b_x, b_x_ = utils.get_batch(
data_x, data_x_, batch_size)
sess.run(train_op, feed_dict={x: b_x, x_: b_x_})
if (i + 1) % print_step == 0:
l = sess.run(loss, feed_dict={x: data_x, x_: data_x_})
print('epoch {0}: global loss = {1}'.format(i, l))
# self.loss_val = l
# debug
# print('Decoded', sess.run(decoded, feed_dict={x: self.data_x_})[0])
self.weights.append(sess.run(encode['weights']))
self.biases.append(sess.run(encode['biases']))
return sess.run(encoded, feed_dict={x: data_x_})
def activate(self, linear, name):
if name == 'sigmoid':
return tf.nn.sigmoid(linear, name='encoded')
elif name == 'softmax':
return tf.nn.softmax(linear, name='encoded')
elif name == 'linear':
return linear
elif name == 'tanh':
return tf.nn.tanh(linear, name='encoded')
elif name == 'relu':
return tf.nn.relu(linear, name='encoded')
| mit | -4,848,992,055,917,937,000 | 41.441379 | 79 | 0.524862 | false |
foglamp/FogLAMP | python/foglamp/services/core/api/plugins/discovery.py | 1 | 4032 | # -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import logging
import json
from aiohttp import web
from foglamp.common.plugin_discovery import PluginDiscovery
from foglamp.services.core.api.plugins import common
from foglamp.common import logger
from foglamp.services.core.api.plugins.exceptions import *
__author__ = "Amarendra K Sinha, Ashish Jabble"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_help = """
-------------------------------------------------------------------------------
| GET | /foglamp/plugins/installed |
| GET | /foglamp/plugins/available |
-------------------------------------------------------------------------------
"""
_logger = logger.setup(__name__, level=logging.INFO)
async def get_plugins_installed(request):
""" get list of installed plugins
:Example:
curl -X GET http://localhost:8081/foglamp/plugins/installed
curl -X GET http://localhost:8081/foglamp/plugins/installed?config=true
curl -X GET http://localhost:8081/foglamp/plugins/installed?type=north|south|filter|notificationDelivery|notificationRule
curl -X 'GET http://localhost:8081/foglamp/plugins/installed?type=north&config=true'
"""
plugin_type = None
is_config = False
if 'type' in request.query and request.query['type'] != '':
plugin_type = request.query['type'].lower() if request.query['type'] not in ['notificationDelivery', 'notificationRule'] else request.query['type']
if plugin_type is not None and plugin_type not in ['north', 'south', 'filter', 'notificationDelivery', 'notificationRule']:
raise web.HTTPBadRequest(reason="Invalid plugin type. Must be 'north' or 'south' or 'filter' or 'notificationDelivery' or 'notificationRule'.")
if 'config' in request.query:
config = request.query['config']
if config not in ['true', 'false', True, False]:
raise web.HTTPBadRequest(reason='Only "true", "false", true, false'
' are allowed for value of config.')
is_config = True if ((type(config) is str and config.lower() in ['true']) or (
(type(config) is bool and config is True))) else False
plugins_list = PluginDiscovery.get_plugins_installed(plugin_type, is_config)
return web.json_response({"plugins": plugins_list})
async def get_plugins_available(request: web.Request) -> web.Response:
""" get list of a available plugins via package management i.e apt or yum
:Example:
curl -X GET http://localhost:8081/foglamp/plugins/available
curl -X GET http://localhost:8081/foglamp/plugins/available?type=north | south | filter | notify | rule
"""
try:
package_type = ""
if 'type' in request.query and request.query['type'] != '':
package_type = request.query['type'].lower()
if package_type and package_type not in ['north', 'south', 'filter', 'notify', 'rule']:
raise ValueError("Invalid package type. Must be 'north' or 'south' or 'filter' or 'notify' or 'rule'.")
plugins, log_path = await common.fetch_available_packages(package_type)
# foglamp-gui, foglamp-quickstart and foglamp-service-* packages are excluded when no type is given
if not package_type:
plugins = [p for p in plugins if not str(p).startswith('foglamp-service-') and p not in ('foglamp-quickstart', 'foglamp-gui')]
except ValueError as e:
raise web.HTTPBadRequest(reason=e)
except PackageError as e:
msg = "Fetch available plugins package request failed"
raise web.HTTPBadRequest(body=json.dumps({"message": msg, "link": str(e)}), reason=msg)
except Exception as ex:
raise web.HTTPInternalServerError(reason=ex)
return web.json_response({"plugins": plugins, "link": log_path})
| apache-2.0 | 2,274,135,026,468,009,700 | 44.818182 | 155 | 0.624008 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.