repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
zhsso/ubunto-one | src/server/integtests/test_capabilities.py | 6 | 6423 | # -*- coding: utf-8 -*-
# Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""Test the capabilities decorator."""
from twisted.trial.unittest import TestCase
from twisted.trial.reporter import TestResult
from twisted.internet import defer
from ubuntuone.storage.server.testing.caps_helpers import required_caps
from ubuntuone.storage.server.testing.aq_helpers import (
TestWithDatabase,
failure_expected,
)
from ubuntuone.storage.server import server as server_module
from ubuntuone import syncdaemon
class RequiredCapsDecoratorTests(TestCase):
"""Tests for required_caps decorator"""
_original_required_caps = syncdaemon.REQUIRED_CAPS
@defer.inlineCallbacks
def tearDown(self):
"""tearDown"""
syncdaemon.REQUIRED_CAPS = self._original_required_caps
yield super(RequiredCapsDecoratorTests, self).tearDown()
def test_mismatch(self):
"""test tha a test is correctly skipped"""
result = TestResult()
syncdaemon.REQUIRED_CAPS = set(['supercalifragilistico'])
class FakeTest(TestCase):
"""Testcase to test the decorator"""
@required_caps([], validate=False)
def test_method(innerself):
"""test method that allways fails"""
innerself.fail()
FakeTest('test_method').run(result)
self.assertEquals(1, len(result.skips))
def test_match(self):
"""Check that a test is executed when the caps match."""
result = TestResult()
syncdaemon.REQUIRED_CAPS = server_module.MIN_CAP
class FakeTest(TestCase):
"""Testcase to test the decorator"""
@required_caps(server_module.MIN_CAP)
def test_method(innerself):
"""Test method that always pass."""
innerself.assertTrue(True)
FakeTest('test_method').run(result)
self.assertEquals(0, len(result.skips))
self.assertEquals(1, result.successes)
def test_not_validate(self):
"""test that a test is executed when the supported_caps_set don't match
the server SUPPORTED_CAPS and validate=False.
"""
result = TestResult()
syncdaemon.REQUIRED_CAPS = set(['supercalifragilistico'])
class FakeTest(TestCase):
"""Testcase to test the decorator"""
@required_caps(['supercalifragilistico'], validate=False)
def test_method(innerself):
"""test method that always pass"""
innerself.assertTrue(True)
FakeTest('test_method').run(result)
self.assertEquals(0, len(result.skips))
self.assertEquals(1, result.successes)
def test_validate(self):
"""test tha a test fails when the supported_caps_set don't match
the server SUPPORTED_CAPS and validate=True.
"""
result = TestResult()
class FakeTest(TestCase):
"""Testcase to test the decorator"""
@required_caps([], ['supercalifragilistico', 'foo'], ['foo'])
def test_method(innerself):
"""test method that always pass"""
innerself.assertTrue(True)
the_test = FakeTest('test_method')
the_test.run(result)
self.assertEquals(0, len(result.skips))
self.assertEquals(1, len(result.failures))
self.assertEquals(the_test, result.failures[0][0])
class TestClientCapabilities(TestWithDatabase):
"""Test the client side of query/set capabilities"""
client = None
# just to restore original values
_original_supported_caps = server_module.SUPPORTED_CAPS
_original_required_caps = syncdaemon.REQUIRED_CAPS
def tearDown(self):
"""cleanup the mess"""
server_module.SUPPORTED_CAPS = self._original_supported_caps
syncdaemon.REQUIRED_CAPS = self._original_required_caps
if self.aq.connector is not None:
self.aq.disconnect()
return super(TestClientCapabilities, self).tearDown()
def assertInQ(self, deferred, containee, msg=None):
"""
deferredly assert that the containee is in the event queue.
containee can be callable, in which case it's called before
asserting.
"""
def check_queue(_):
"the check itself"
ce = containee() if callable(containee) else containee
self.assertIn(ce, self.listener.q, msg)
deferred.addCallback(check_queue)
def connect(self):
"""Connect the client"""
d = self.wait_for('SYS_CONNECTION_MADE')
self.eq.push('SYS_INIT_DONE')
self.eq.push('SYS_LOCAL_RESCAN_DONE')
self.eq.push('SYS_USER_CONNECT',
access_token=self.access_tokens['jack'])
self.eq.push('SYS_NET_CONNECTED')
return d
def test_query_set_capabilities(self):
"""After connecting the server uses the caps specified by client."""
needed_event = self.wait_for('SYS_SET_CAPABILITIES_OK')
d = self.connect()
d.addCallback(lambda _: needed_event)
return d
@failure_expected("The server doesn't have the requested capabilities")
def test_query_bad_capabilities(self):
"""Test how the client hanlde trying to set capabilities that the
server don't have.
"""
syncdaemon.REQUIRED_CAPS = frozenset(['foo'])
needed_event = self.wait_for('SYS_SET_CAPABILITIES_ERROR')
d = self.connect()
d.addCallback(lambda _: needed_event)
self.assertInQ(d, ('SYS_SET_CAPABILITIES_ERROR',
{'error': "The server doesn't have the requested "
"capabilities"}))
return d
| agpl-3.0 | 7,105,596,276,067,651,000 | 35.494318 | 79 | 0.641912 | false | 4.220105 | true | false | false |
MediaKraken/MediaKraken_Deployment | source/database/db_base_metadata_thesportsdb.py | 1 | 2446 | """
Copyright (C) 2015 Quinn D Granfor <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import uuid
def db_metathesportsdb_select_guid(self, guid):
"""
# select
"""
self.db_cursor.execute('select mm_metadata_sports_json'
' from mm_metadata_sports'
' where mm_metadata_sports_guid = %s', (guid,))
try:
return self.db_cursor.fetchone()['mm_metadata_sports_json']
except:
return None
def db_metathesportsdb_insert(self, series_id_json, event_name, show_detail,
image_json):
"""
# insert
"""
new_guid = uuid.uuid4()
self.db_cursor.execute('insert into mm_metadata_sports (mm_metadata_sports_guid,'
' mm_metadata_media_sports_id,'
' mm_metadata_sports_name,'
' mm_metadata_sports_json,'
' mm_metadata_sports_image_json)'
' values (%s,%s,%s,%s,%s)',
(new_guid, series_id_json, event_name, show_detail, image_json))
self.db_commit()
return new_guid
def db_metathesports_update(self, series_id_json, event_name, show_detail,
sportsdb_id):
"""
# updated
"""
self.db_cursor.execute('update mm_metadata_sports'
' set mm_metadata_media_sports_id = %s,'
' mm_metadata_sports_name = %s,'
' mm_metadata_sports_json = %s'
' where mm_metadata_media_sports_id->\'thesportsdb\' ? %s',
(series_id_json, event_name, show_detail, sportsdb_id))
self.db_commit()
| gpl-3.0 | 4,486,608,820,093,549,600 | 36.825397 | 91 | 0.562551 | false | 3.964344 | false | false | false |
cleemesser/pyo | pyolib/phasevoc.py | 5 | 64602 | """
Phase vocoder.
The phase vocoder is a digital signal processing technique of potentially
great musical significance. It can be used to perform very high fidelity
time scaling, pitch transposition, and myriad other modifications of sounds.
"""
"""
Copyright 2009-2015 Olivier Belanger
This file is part of pyo, a python module to help digital signal
processing script creation.
pyo is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
pyo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with pyo. If not, see <http://www.gnu.org/licenses/>.
"""
from _core import *
from _maps import *
from _widgets import createSpectrumWindow
from pattern import Pattern
class PVAnal(PyoPVObject):
"""
Phase Vocoder analysis object.
PVAnal takes an input sound and performs the phase vocoder
analysis on it. This results in two streams, one for the bin's
magnitudes and the other for the bin's true frequencies. These
two streams are used by the PVxxx object family to transform
the input signal using spectral domain algorithms. The last
object in the phase vocoder chain must be a PVSynth to perform
the spectral to time domain conversion.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoObject
Input signal to process.
size : int {pow-of-two > 4}, optional
FFT size. Must be a power of two greater than 4.
Defaults to 1024.
The FFT size is the number of samples used in each
analysis frame.
overlaps : int, optional
The number of overlaped analysis block. Must be a
power of two. Defaults to 4.
More overlaps can greatly improved sound quality
synthesis but it is also more CPU expensive.
wintype : int, optional
Shape of the envelope used to filter each input frame.
Possible shapes are:
0. rectangular (no windowing)
1. Hamming
2. Hanning (default)
3. Bartlett (triangular)
4. Blackman 3-term
5. Blackman-Harris 4-term
6. Blackman-Harris 7-term
7. Tuckey (alpha = 0.66)
8. Sine (half-sine window)
>>> s = Server().boot()
>>> s.start()
>>> a = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=0.7)
>>> pva = PVAnal(a, size=1024, overlaps=4, wintype=2)
>>> pvs = PVSynth(pva).mix(2).out()
"""
def __init__(self, input, size=1024, overlaps=4, wintype=2):
pyoArgsAssert(self, "oiii", input, size, overlaps, wintype)
PyoPVObject.__init__(self)
self._input = input
self._size = size
self._overlaps = overlaps
self._wintype = wintype
self._in_fader = InputFader(input)
in_fader, size, overlaps, wintype, lmax = convertArgsToLists(self._in_fader, size, overlaps, wintype)
self._base_objs = [PVAnal_base(wrap(in_fader,i), wrap(size,i), wrap(overlaps,i), wrap(wintype,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Default to 0.05.
"""
pyoArgsAssert(self, "oN", x, fadetime)
self._input = x
self._in_fader.setInput(x, fadetime)
def setSize(self, x):
"""
Replace the `size` attribute.
:Args:
x : int
new `size` attribute.
"""
pyoArgsAssert(self, "i", x)
self._size = x
x, lmax = convertArgsToLists(x)
[obj.setSize(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setOverlaps(self, x):
"""
Replace the `overlaps` attribute.
:Args:
x : int
new `overlaps` attribute.
"""
pyoArgsAssert(self, "i", x)
self._overlaps = x
x, lmax = convertArgsToLists(x)
[obj.setOverlaps(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setWinType(self, x):
"""
Replace the `wintype` attribute.
:Args:
x : int
new `wintype` attribute.
"""
self._wintype = x
x, lmax = convertArgsToLists(x)
[obj.setWinType(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
@property
def input(self):
"""PyoObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def size(self):
"""int. FFT size."""
return self._size
@size.setter
def size(self, x): self.setSize(x)
@property
def overlaps(self):
"""int. FFT overlap factor."""
return self._overlaps
@overlaps.setter
def overlaps(self, x): self.setOverlaps(x)
@property
def wintype(self):
"""int. Windowing method."""
return self._wintype
@wintype.setter
def wintype(self, x): self.setWinType(x)
class PVSynth(PyoObject):
"""
Phase Vocoder synthesis object.
PVSynth takes a PyoPVObject as its input and performed
the spectral to time domain conversion on it. This step
converts phase vocoder magnitude and true frequency's
streams back to a real signal.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
wintype : int, optional
Shape of the envelope used to filter each input frame.
Possible shapes are:
0. rectangular (no windowing)
1. Hamming
2. Hanning (default)
3. Bartlett (triangular)
4. Blackman 3-term
5. Blackman-Harris 4-term
6. Blackman-Harris 7-term
7. Tuckey (alpha = 0.66)
8. Sine (half-sine window)
>>> s = Server().boot()
>>> s.start()
>>> a = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=0.7)
>>> pva = PVAnal(a, size=1024, overlaps=4, wintype=2)
>>> pvs = PVSynth(pva).mix(2).out()
"""
def __init__(self, input, wintype=2, mul=1, add=0):
pyoArgsAssert(self, "piOO", input, wintype, mul, add)
PyoObject.__init__(self, mul, add)
self._input = input
self._wintype = wintype
input, wintype, mul, add, lmax = convertArgsToLists(self._input, wintype, mul, add)
self._base_objs = [PVSynth_base(wrap(input,i), wrap(wintype,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setWinType(self, x):
"""
Replace the `wintype` attribute.
:Args:
x : int
new `wintype` attribute.
"""
pyoArgsAssert(self, "i", x)
self._wintype = x
x, lmax = convertArgsToLists(x)
[obj.setWinType(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def wintype(self):
"""int. Windowing method."""
return self._wintype
@wintype.setter
def wintype(self, x): self.setWinType(x)
class PVAddSynth(PyoObject):
"""
Phase Vocoder additive synthesis object.
PVAddSynth takes a PyoPVObject as its input and resynthesize
the real signal using the magnitude and true frequency's
streams to control amplitude and frequency envelopes of an
oscillator bank.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
pitch : float or PyoObject, optional
Transposition factor. Defaults to 1.
num : int, optional
Number of oscillators used to synthesize the
output sound. Defaults to 100.
first : int, optional
The first bin to synthesize, starting from 0.
Defaults to 0.
inc : int, optional
Starting from bin `first`, resynthesize bins
`inc` apart. Defaults to 1.
>>> s = Server().boot()
>>> s.start()
>>> a = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=0.7)
>>> pva = PVAnal(a, size=1024, overlaps=4, wintype=2)
>>> pvs = PVAddSynth(pva, pitch=1.25, num=100, first=0, inc=2).out()
"""
def __init__(self, input, pitch=1, num=100, first=0, inc=1, mul=1, add=0):
pyoArgsAssert(self, "pOiiiOO", input, pitch, num, first, inc, mul, add)
PyoObject.__init__(self, mul, add)
self._input = input
self._pitch = pitch
self._num = num
self._first = first
self._inc = inc
input, pitch, num, first, inc, mul, add, lmax = convertArgsToLists(self._input, pitch, num, first, inc, mul, add)
self._base_objs = [PVAddSynth_base(wrap(input,i), wrap(pitch,i), wrap(num,i), wrap(first,i), wrap(inc,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setPitch(self, x):
"""
Replace the `pitch` attribute.
:Args:
x : float or PyoObject
new `pitch` attribute.
"""
pyoArgsAssert(self, "O", x)
self._pitch = x
x, lmax = convertArgsToLists(x)
[obj.setPitch(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setNum(self, x):
"""
Replace the `num` attribute.
:Args:
x : int
new `num` attribute.
"""
pyoArgsAssert(self, "i", x)
self._num = x
x, lmax = convertArgsToLists(x)
[obj.setNum(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setFirst(self, x):
"""
Replace the `first` attribute.
:Args:
x : int
new `first` attribute.
"""
pyoArgsAssert(self, "i", x)
self._first = x
x, lmax = convertArgsToLists(x)
[obj.setFirst(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setInc(self, x):
"""
Replace the `inc` attribute.
:Args:
x : int
new `inc` attribute.
"""
pyoArgsAssert(self, "i", x)
self._inc = x
x, lmax = convertArgsToLists(x)
[obj.setInc(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.25, 4, "lin", "pitch", self._pitch),
SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def pitch(self):
"""float or PyoObject. Transposition factor."""
return self._pitch
@pitch.setter
def pitch(self, x): self.setPitch(x)
@property
def num(self):
"""int. Number of oscillators."""
return self._num
@num.setter
def num(self, x): self.setNum(x)
@property
def first(self):
"""int. First bin to synthesize."""
return self._first
@first.setter
def first(self, x): self.setFirst(x)
@property
def inc(self):
"""int. Synthesized bin increment."""
return self._inc
@inc.setter
def inc(self, x): self.setInc(x)
class PVTranspose(PyoPVObject):
"""
Transpose the frequency components of a pv stream.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
transpo : float or PyoObject, optional
Transposition factor. Defaults to 1.
>>> s = Server().boot()
>>> s.start()
>>> sf = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=.7)
>>> pva = PVAnal(sf, size=1024)
>>> pvt = PVTranspose(pva, transpo=1.5)
>>> pvs = PVSynth(pvt).out()
>>> dry = Delay(sf, delay=1024./s.getSamplingRate(), mul=.7).out(1)
"""
def __init__(self, input, transpo=1):
pyoArgsAssert(self, "pO", input, transpo)
PyoPVObject.__init__(self)
self._input = input
self._transpo = transpo
input, transpo, lmax = convertArgsToLists(self._input, transpo)
self._base_objs = [PVTranspose_base(wrap(input,i), wrap(transpo,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setTranspo(self, x):
"""
Replace the `transpo` attribute.
:Args:
x : int
new `transpo` attribute.
"""
pyoArgsAssert(self, "O", x)
self._transpo = x
x, lmax = convertArgsToLists(x)
[obj.setTranspo(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.25, 4, "lin", "transpo", self._transpo)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def transpo(self):
"""float or PyoObject. Transposition factor."""
return self._transpo
@transpo.setter
def transpo(self, x): self.setTranspo(x)
class PVVerb(PyoPVObject):
"""
Spectral domain reverberation.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
revtime : float or PyoObject, optional
Reverberation factor, between 0 and 1.
Defaults to 0.75.
damp : float or PyoObject, optional
High frequency damping factor, between 0 and 1.
1 means no damping and 0 is the most damping.
Defaults to 0.75.
>>> s = Server().boot()
>>> s.start()
>>> sf = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=.5)
>>> pva = PVAnal(sf, size=2048)
>>> pvg = PVGate(pva, thresh=-36, damp=0)
>>> pvv = PVVerb(pvg, revtime=0.95, damp=0.95)
>>> pvs = PVSynth(pvv).mix(2).out()
>>> dry = Delay(sf, delay=2048./s.getSamplingRate(), mul=.4).mix(2).out()
"""
def __init__(self, input, revtime=0.75, damp=0.75):
pyoArgsAssert(self, "pOO", input, revtime, damp)
PyoPVObject.__init__(self)
self._input = input
self._revtime = revtime
self._damp = damp
input, revtime, damp, lmax = convertArgsToLists(self._input, revtime, damp)
self._base_objs = [PVVerb_base(wrap(input,i), wrap(revtime,i), wrap(damp,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setRevtime(self, x):
"""
Replace the `revtime` attribute.
:Args:
x : int
new `revtime` attribute.
"""
pyoArgsAssert(self, "O", x)
self._revtime = x
x, lmax = convertArgsToLists(x)
[obj.setRevtime(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDamp(self, x):
"""
Replace the `damp` attribute.
:Args:
x : int
new `damp` attribute.
"""
pyoArgsAssert(self, "O", x)
self._damp = x
x, lmax = convertArgsToLists(x)
[obj.setDamp(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 1, "lin", "revtime", self._revtime),
SLMap(0, 1, "lin", "damp", self._damp)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def revtime(self):
"""float or PyoObject. Reverberation factor."""
return self._revtime
@revtime.setter
def revtime(self, x): self.setRevtime(x)
@property
def damp(self):
"""float or PyoObject. High frequency damping factor."""
return self._damp
@damp.setter
def damp(self, x): self.setDamp(x)
class PVGate(PyoPVObject):
"""
Spectral gate.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
thresh : float or PyoObject, optional
Threshold factor in dB. Bins below that threshold
will be scaled by `damp` factor. Defaults to -20.
damp : float or PyoObject, optional
Damping factor for low amplitude bins. Defaults to 0.
>>> s = Server().boot()
>>> s.start()
>>> sf = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=.5)
>>> pva = PVAnal(sf, size=2048)
>>> pvg = PVGate(pva, thresh=-50, damp=0)
>>> pvs = PVSynth(pvg).mix(2).out()
"""
def __init__(self, input, thresh=-20, damp=0.):
pyoArgsAssert(self, "pOO", input, thresh, damp)
PyoPVObject.__init__(self)
self._input = input
self._thresh = thresh
self._damp = damp
input, thresh, damp, lmax = convertArgsToLists(self._input, thresh, damp)
self._base_objs = [PVGate_base(wrap(input,i), wrap(thresh,i), wrap(damp,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setThresh(self, x):
"""
Replace the `thresh` attribute.
:Args:
x : int
new `thresh` attribute.
"""
pyoArgsAssert(self, "O", x)
self._thresh = x
x, lmax = convertArgsToLists(x)
[obj.setThresh(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDamp(self, x):
"""
Replace the `damp` attribute.
:Args:
x : int
new `damp` attribute.
"""
pyoArgsAssert(self, "O", x)
self._damp = x
x, lmax = convertArgsToLists(x)
[obj.setDamp(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(-120, 18, "lin", "thresh", self._thresh),
SLMap(0, 2, "lin", "damp", self._damp)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def thresh(self):
"""float or PyoObject. Threshold factor."""
return self._thresh
@thresh.setter
def thresh(self, x): self.setThresh(x)
@property
def damp(self):
"""float or PyoObject. Damping factor for low amplitude bins."""
return self._damp
@damp.setter
def damp(self, x): self.setDamp(x)
class PVCross(PyoPVObject):
"""
Performs cross-synthesis between two phase vocoder streaming object.
The amplitudes from `input` and `input2` (scaled by `fade` argument)
are applied to the frequencies of `input`.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process. Frequencies from
this pv stream are used to compute the output signal.
input2 : PyoPVObject
Phase vocoder streaming object which gives the second set of
magnitudes. Frequencies from this pv stream are not used.
fade : float or PyoObject, optional
Scaling factor for the output amplitudes, between 0 and 1.
0 means amplitudes from `input` and 1 means amplitudes from `input2`.
Defaults to 1.
.. note::
The two input pv stream must have the same size and overlaps. It is
the responsibility of the user to be sure they are consistent. To change
the size (or the overlaps) of the phase vocoder process, one must
write a function to change both at the same time (see the example below).
Another possibility is to use channel expansion to analyse both sounds
with the same PVAnal object.
>>> s = Server().boot()
>>> s.start()
>>> sf = SineLoop(freq=[80,81], feedback=0.07, mul=.5)
>>> sf2 = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=.5)
>>> pva = PVAnal(sf)
>>> pva2 = PVAnal(sf2)
>>> pvc = PVCross(pva, pva2, fade=1)
>>> pvs = PVSynth(pvc).out()
>>> def size(x):
... pva.size = x
... pva2.size = x
>>> def olaps(x):
... pva.overlaps = x
... pva2.overlaps = x
"""
def __init__(self, input, input2, fade=1):
pyoArgsAssert(self, "ppO", input, input2, fade)
PyoPVObject.__init__(self)
self._input = input
self._input2 = input2
self._fade = fade
input, input2, fade, lmax = convertArgsToLists(self._input, self._input2, fade)
self._base_objs = [PVCross_base(wrap(input,i), wrap(input2,i), wrap(fade,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setInput2(self, x):
"""
Replace the `input2` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input2 = x
x, lmax = convertArgsToLists(x)
[obj.setInput2(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setFade(self, x):
"""
Replace the `fade` attribute.
:Args:
x : float or PyoObject
new `fade` attribute.
"""
pyoArgsAssert(self, "O", x)
self._fade = x
x, lmax = convertArgsToLists(x)
[obj.setFade(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 1, "lin", "fade", self._fade)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def input2(self):
"""PyoPVObject. Second set of amplitudes."""
return self._input2
@input2.setter
def input2(self, x): self.setInput2(x)
@property
def fade(self):
"""float or PyoObject. Scaling factor."""
return self._fade
@fade.setter
def fade(self, x): self.setFade(x)
class PVMult(PyoPVObject):
"""
Multiply magnitudes from two phase vocoder streaming object.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process. Frequencies from
this pv stream are used to compute the output signal.
input2 : PyoPVObject
Phase vocoder streaming object which gives the second set of
magnitudes. Frequencies from this pv stream are not used.
.. note::
The two input pv stream must have the same size and overlaps. It is
the responsibility of the user to be sure they are consistent. To change
the size (or the overlaps) of the phase vocoder process, one must
write a function to change both at the same time (see the example below).
Another possibility is to use channel expansion to analyse both sounds
with the same PVAnal object.
>>> s = Server().boot()
>>> s.start()
>>> sf = FM(carrier=[100,150], ratio=[.999,.5005], index=20, mul=.4)
>>> sf2 = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=.5)
>>> pva = PVAnal(sf)
>>> pva2 = PVAnal(sf2)
>>> pvc = PVMult(pva, pva2)
>>> pvs = PVSynth(pvc).out()
>>> def size(x):
... pva.size = x
... pva2.size = x
>>> def olaps(x):
... pva.overlaps = x
... pva2.overlaps = x
"""
def __init__(self, input, input2):
pyoArgsAssert(self, "pp", input, input2)
PyoPVObject.__init__(self)
self._input = input
self._input2 = input2
input, input2, lmax = convertArgsToLists(self._input, self._input2)
self._base_objs = [PVMult_base(wrap(input,i), wrap(input2,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setInput2(self, x):
"""
Replace the `input2` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input2 = x
x, lmax = convertArgsToLists(x)
[obj.setInput2(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def input2(self):
"""PyoPVObject. Second set of magnitudes."""
return self._input2
@input2.setter
def input2(self, x): self.setInput2(x)
class PVMorph(PyoPVObject):
"""
Performs spectral morphing between two phase vocoder streaming object.
According to `fade` argument, the amplitudes from `input` and `input2`
are interpolated linearly while the frequencies are interpolated
exponentially.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object which gives the first set of
magnitudes and frequencies.
input2 : PyoPVObject
Phase vocoder streaming object which gives the second set of
magnitudes and frequencies.
fade : float or PyoObject, optional
Scaling factor for the output amplitudes and frequencies,
between 0 and 1. 0 is `input` and 1 in `input2`. Defaults to 0.5.
.. note::
The two input pv stream must have the same size and overlaps. It is
the responsibility of the user to be sure they are consistent. To change
the size (or the overlaps) of the phase vocoder process, one must
write a function to change both at the same time (see the example below).
Another possibility is to use channel expansion to analyse both sounds
with the same PVAnal object.
>>> s = Server().boot()
>>> s.start()
>>> sf = SineLoop(freq=[100,101], feedback=0.12, mul=.5)
>>> sf2 = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=.5)
>>> pva = PVAnal(sf)
>>> pva2 = PVAnal(sf2)
>>> pvc = PVMorph(pva, pva2, fade=0.5)
>>> pvs = PVSynth(pvc).out()
>>> def size(x):
... pva.size = x
... pva2.size = x
>>> def olaps(x):
... pva.overlaps = x
... pva2.overlaps = x
"""
def __init__(self, input, input2, fade=0.5):
pyoArgsAssert(self, "ppO", input, input2, fade)
PyoPVObject.__init__(self)
self._input = input
self._input2 = input2
self._fade = fade
input, input2, fade, lmax = convertArgsToLists(self._input, self._input2, fade)
self._base_objs = [PVMorph_base(wrap(input,i), wrap(input2,i), wrap(fade,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setInput2(self, x):
"""
Replace the `input2` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input2 = x
x, lmax = convertArgsToLists(x)
[obj.setInput2(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setFade(self, x):
"""
Replace the `fade` attribute.
:Args:
x : float or PyoObject
new `fade` attribute.
"""
pyoArgsAssert(self, "O", x)
self._fade = x
x, lmax = convertArgsToLists(x)
[obj.setFade(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 1, "lin", "fade", self._fade)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. First input signal."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def input2(self):
"""PyoPVObject. Second input signal."""
return self._input2
@input2.setter
def input2(self, x): self.setInput2(x)
@property
def fade(self):
"""float or PyoObject. Scaling factor."""
return self._fade
@fade.setter
def fade(self, x): self.setFade(x)
class PVFilter(PyoPVObject):
"""
Spectral filter.
PVFilter filters frequency components of a pv stream
according to the shape drawn in the table given in
argument.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
table : PyoTableObject
Table containing the filter shape. If the
table length is smaller than fftsize/2,
remaining bins will be set to 0.
gain : float or PyoObject, optional
Gain of the filter applied to the input spectrum.
Defaults to 1.
mode : int, optional
Table scanning mode. Defaults to 0.
If 0, bin indexes outside table size are set to 0.
If 1, bin indexes are scaled over table length.
>>> s = Server().boot()
>>> s.start()
>>> t = ExpTable([(0,1),(61,1),(71,0),(131,1),(171,0),(511,0)], size=512)
>>> src = Noise(.4)
>>> pva = PVAnal(src, size=1024)
>>> pvf = PVFilter(pva, t)
>>> pvs = PVSynth(pvf).out()
"""
def __init__(self, input, table, gain=1, mode=0):
pyoArgsAssert(self, "ptOi", input, table, gain, mode)
PyoPVObject.__init__(self)
self._input = input
self._table = table
self._gain = gain
self._mode = mode
input, table, gain, mode, lmax = convertArgsToLists(self._input, table, gain, mode)
self._base_objs = [PVFilter_base(wrap(input,i), wrap(table,i), wrap(gain,i), wrap(mode,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setTable(self, x):
"""
Replace the `table` attribute.
:Args:
x : PyoTableObject
new `table` attribute.
"""
pyoArgsAssert(self, "t", x)
self._table = x
x, lmax = convertArgsToLists(x)
[obj.setTable(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setGain(self, x):
"""
Replace the `gain` attribute.
:Args:
x : float or PyoObject
new `gain` attribute.
"""
pyoArgsAssert(self, "O", x)
self._gain = x
x, lmax = convertArgsToLists(x)
[obj.setGain(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setMode(self, x):
"""
Replace the `mode` attribute.
:Args:
x : int
new `mode` attribute.
"""
pyoArgsAssert(self, "i", x)
self._mode = x
x, lmax = convertArgsToLists(x)
[obj.setMode(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 1, "lin", "gain", self._gain)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def table(self):
"""PyoTableObject. Table containing the filter shape."""
return self._table
@table.setter
def table(self, x): self.setTable(x)
@property
def gain(self):
"""float or PyoObject. Gain of the filter."""
return self._gain
@gain.setter
def gain(self, x): self.setGain(x)
@property
def mode(self):
"""int. Table scanning mode."""
return self._mode
@mode.setter
def mode(self, x): self.setMode(x)
class PVDelay(PyoPVObject):
"""
Spectral delays.
PVDelay applies different delay times and feedbacks for
each bin of a phase vocoder analysis. Delay times and
feedbacks are specified with PyoTableObjects.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
deltable : PyoTableObject
Table containing delay times, as integer multipliers
of the FFT hopsize (fftsize / overlaps).
If the table length is smaller than fftsize/2,
remaining bins will be set to 0.
feedtable : PyoTableObject
Table containing feedback values, between -1 and 1.
If the table length is smaller than fftsize/2,
remaining bins will be set to 0.
maxdelay : float, optional
Maximum delay time in seconds. Available at initialization
time only. Defaults to 1.0.
mode : int, optional
Tables scanning mode. Defaults to 0.
If 0, bin indexes outside table size are set to 0.
If 1, bin indexes are scaled over table length.
>>> s = Server().boot()
>>> s.start()
>>> SIZE = 1024
>>> SIZE2 = SIZE / 2
>>> OLAPS = 4
>>> MAXDEL = 2.0 # two seconds delay memories
>>> FRAMES = int(MAXDEL * s.getSamplingRate() / (SIZE / OLAPS))
>>> # Edit tables with the graph() method. yrange=(0, FRAMES) for delays table
>>> dt = DataTable(size=SIZE2, init=[i / float(SIZE2) * FRAMES for i in range(SIZE2)])
>>> ft = DataTable(size=SIZE2, init=[0.5]*SIZE2)
>>> src = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=0.5)
>>> pva = PVAnal(src, size=SIZE, overlaps=OLAPS)
>>> pvd = PVDelay(pva, dt, ft, maxdelay=MAXDEL)
>>> pvs = PVSynth(pvd).out()
"""
def __init__(self, input, deltable, feedtable, maxdelay=1.0, mode=0):
pyoArgsAssert(self, "pttni", input, deltable, feedtable, maxdelay, mode)
PyoPVObject.__init__(self)
self._input = input
self._deltable = deltable
self._feedtable = feedtable
self._maxdelay = maxdelay
self._mode = mode
input, deltable, feedtable, maxdelay, mode, lmax = convertArgsToLists(self._input, deltable, feedtable, maxdelay, mode)
self._base_objs = [PVDelay_base(wrap(input,i), wrap(deltable,i), wrap(feedtable,i), wrap(maxdelay,i), wrap(mode,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDeltable(self, x):
"""
Replace the `deltable` attribute.
:Args:
x : PyoTableObject
new `deltable` attribute.
"""
pyoArgsAssert(self, "t", x)
self._deltable = x
x, lmax = convertArgsToLists(x)
[obj.setDeltable(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setFeedtable(self, x):
"""
Replace the `feedtable` attribute.
:Args:
x : PyoTableObject
new `feedtable` attribute.
"""
pyoArgsAssert(self, "t", x)
self._feedtable = x
x, lmax = convertArgsToLists(x)
[obj.setFeedtable(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setMode(self, x):
"""
Replace the `mode` attribute.
:Args:
x : int
new `mode` attribute.
"""
pyoArgsAssert(self, "i", x)
self._mode = x
x, lmax = convertArgsToLists(x)
[obj.setMode(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def deltable(self):
"""PyoTableObject. Table containing the delay times."""
return self._deltable
@deltable.setter
def deltable(self, x): self.setDeltable(x)
@property
def feedtable(self):
"""PyoTableObject. Table containing feedback values."""
return self._feedtable
@feedtable.setter
def feedtable(self, x): self.setFeedtable(x)
@property
def mode(self):
"""int. Table scanning mode."""
return self._mode
@mode.setter
def mode(self, x): self.setMode(x)
class PVBuffer(PyoPVObject):
"""
Phase vocoder buffer and playback with transposition.
PVBuffer keeps `length` seconds of pv analysis in memory
and gives control on playback position and transposition.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
index : PyoObject
Playback position, as audio stream, normalized
between 0 and 1.
pitch : float or PyoObject, optional
Transposition factor. Defaults to 1.
length : float, optional
Memory length in seconds. Available at initialization
time only. Defaults to 1.0.
.. note::
The play() method can be called to start a new recording of
the current pv input.
>>> s = Server().boot()
>>> s.start()
>>> f = SNDS_PATH+'/transparent.aif'
>>> f_len = sndinfo(f)[1]
>>> src = SfPlayer(f, mul=0.5)
>>> index = Phasor(freq=1.0/f_len*0.25, phase=0.9)
>>> pva = PVAnal(src, size=1024, overlaps=8)
>>> pvb = PVBuffer(pva, index, pitch=1.25, length=f_len)
>>> pvs = PVSynth(pvb).out()
"""
def __init__(self, input, index, pitch=1.0, length=1.0):
pyoArgsAssert(self, "poOn", input, index, pitch, length)
PyoPVObject.__init__(self)
self._input = input
self._index = index
self._pitch = pitch
self._length = length
input, index, pitch, length, lmax = convertArgsToLists(self._input, index, pitch, length)
self._base_objs = [PVBuffer_base(wrap(input,i), wrap(index,i), wrap(pitch,i), wrap(length,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setIndex(self, x):
"""
Replace the `index` attribute.
:Args:
x : PyoObject
new `index` attribute.
"""
pyoArgsAssert(self, "o", x)
self._index = x
x, lmax = convertArgsToLists(x)
[obj.setIndex(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setPitch(self, x):
"""
Replace the `pitch` attribute.
:Args:
x : float or PyoObject
new `pitch` attribute.
"""
pyoArgsAssert(self, "O", x)
self._pitch = x
x, lmax = convertArgsToLists(x)
[obj.setPitch(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.25, 4, "lin", "pitch", self._pitch)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def index(self):
"""PyoObject. Reader's normalized position."""
return self._index
@index.setter
def index(self, x): self.setIndex(x)
@property
def pitch(self):
"""float or PyoObject. Transposition factor."""
return self._pitch
@pitch.setter
def pitch(self, x): self.setPitch(x)
class PVShift(PyoPVObject):
"""
Spectral domain frequency shifter.
PVShift linearly moves the analysis bins by the amount, in Hertz,
specified by the the `shift` argument.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
shift : float or PyoObject, optional
Frequency shift factor. Defaults to 0.
>>> s = Server().boot()
>>> s.start()
>>> sf = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=.7)
>>> pva = PVAnal(sf, size=1024)
>>> pvt = PVShift(pva, shift=500)
>>> pvs = PVSynth(pvt).out()
"""
def __init__(self, input, shift=0):
pyoArgsAssert(self, "pO", input, shift)
PyoPVObject.__init__(self)
self._input = input
self._shift = shift
input, shift, lmax = convertArgsToLists(self._input, shift)
self._base_objs = [PVShift_base(wrap(input,i), wrap(shift,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setShift(self, x):
"""
Replace the `shift` attribute.
:Args:
x : float or PyoObject
new `shift` attribute.
"""
pyoArgsAssert(self, "O", x)
self._shift = x
x, lmax = convertArgsToLists(x)
[obj.setShift(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(-5000, 5000, "lin", "shift", self._shift)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def shift(self):
"""float or PyoObject. Frequency shift factor."""
return self._shift
@shift.setter
def shift(self, x): self.setShift(x)
class PVAmpMod(PyoPVObject):
"""
Performs frequency independent amplitude modulations.
PVAmpMod modulates the magnitude of each bin of a pv
stream with an independent oscillator. `basefreq` and
`spread` are used to derive the frequency of each
modulating oscillator.
Internally, the following operations are applied to
derive oscillator frequencies (`i` is the bin number):
spread = spread * 0.001 + 1.0
f_i = basefreq * pow(spread, i)
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
basefreq : float or PyoObject, optional
Base modulation frequency, in Hertz.
Defaults to 1.
spread : float or PyoObject, optional
Spreading factor for oscillator frequencies, between
-1 and 1. 0 means every oscillator has the same frequency.
>>> s = Server().boot()
>>> s.start()
>>> src = PinkNoise(.3)
>>> pva = PVAnal(src, size=1024, overlaps=4)
>>> pvm = PVAmpMod(pva, basefreq=4, spread=0.5)
>>> pvs = PVSynth(pvm).out()
"""
def __init__(self, input, basefreq=1, spread=0):
pyoArgsAssert(self, "pOO", input, basefreq, spread)
PyoPVObject.__init__(self)
self._input = input
self._basefreq = basefreq
self._spread = spread
input, basefreq, spread, lmax = convertArgsToLists(self._input, basefreq, spread)
self._base_objs = [PVAmpMod_base(wrap(input,i), wrap(basefreq,i), wrap(spread,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setBasefreq(self, x):
"""
Replace the `basefreq` attribute.
:Args:
x : float or PyoObject
new `basefreq` attribute.
"""
pyoArgsAssert(self, "O", x)
self._basefreq = x
x, lmax = convertArgsToLists(x)
[obj.setBasefreq(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setSpread(self, x):
"""
Replace the `spread` attribute.
:Args:
x : float or PyoObject
new `spread` attribute.
"""
pyoArgsAssert(self, "O", x)
self._spread = x
x, lmax = convertArgsToLists(x)
[obj.setSpread(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def reset(self):
"""
Resets modulation pointers to 0.
"""
[obj.reset() for obj in self._base_objs]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.1, 20, "log", "basefreq", self._basefreq),
SLMap(-1, 1, "lin", "spread", self._spread)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def basefreq(self):
"""float or PyoObject. Modulator's base frequency."""
return self._basefreq
@basefreq.setter
def basefreq(self, x): self.setBasefreq(x)
@property
def spread(self):
"""float or PyoObject. Modulator's frequency spreading factor."""
return self._spread
@spread.setter
def spread(self, x): self.setSpread(x)
class PVFreqMod(PyoPVObject):
"""
Performs frequency independent frequency modulations.
PVFreqMod modulates the frequency of each bin of a pv
stream with an independent oscillator. `basefreq` and
`spread` are used to derive the frequency of each
modulating oscillator.
Internally, the following operations are applied to
derive oscillator frequencies (`i` is the bin number):
spread = spread * 0.001 + 1.0
f_i = basefreq * pow(spread, i)
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
basefreq : float or PyoObject, optional
Base modulation frequency, in Hertz.
Defaults to 1.
spread : float or PyoObject, optional
Spreading factor for oscillator frequencies, between
-1 and 1. 0 means every oscillator has the same frequency.
depth : float or PyoObject, optional
Amplitude of the modulating oscillators, between 0 and 1.
Defaults to 0.1.
>>> s = Server().boot()
>>> s.start()
>>> src = SfPlayer(SNDS_PATH+"/accord.aif", loop=True, mul=0.5)
>>> pva = PVAnal(src, size=1024, overlaps=4)
>>> pvm = PVFreqMod(pva, basefreq=8, spread=0.75, depth=0.05)
>>> pvs = PVSynth(pvm).out()
"""
def __init__(self, input, basefreq=1, spread=0, depth=0.1):
pyoArgsAssert(self, "pOOO", input, basefreq, spread, depth)
PyoPVObject.__init__(self)
self._input = input
self._basefreq = basefreq
self._spread = spread
self._depth = depth
input, basefreq, spread, depth, lmax = convertArgsToLists(self._input, basefreq, spread, depth)
self._base_objs = [PVFreqMod_base(wrap(input,i), wrap(basefreq,i), wrap(spread,i), wrap(depth,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setBasefreq(self, x):
"""
Replace the `basefreq` attribute.
:Args:
x : float or PyoObject
new `basefreq` attribute.
"""
pyoArgsAssert(self, "O", x)
self._basefreq = x
x, lmax = convertArgsToLists(x)
[obj.setBasefreq(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setSpread(self, x):
"""
Replace the `spread` attribute.
:Args:
x : float or PyoObject
new `spread` attribute.
"""
pyoArgsAssert(self, "O", x)
self._spread = x
x, lmax = convertArgsToLists(x)
[obj.setSpread(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDepth(self, x):
"""
Replace the `depth` attribute.
:Args:
x : float or PyoObject
new `depth` attribute.
"""
pyoArgsAssert(self, "O", x)
self._depth = x
x, lmax = convertArgsToLists(x)
[obj.setDepth(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def reset(self):
"""
Resets modulation pointers to 0.
"""
[obj.reset() for obj in self._base_objs]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.1, 20, "log", "basefreq", self._basefreq),
SLMap(-1, 1, "lin", "spread", self._spread),
SLMap(0, 1, "lin", "depth", self._depth)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def basefreq(self):
"""float or PyoObject. Modulator's base frequency."""
return self._basefreq
@basefreq.setter
def basefreq(self, x): self.setBasefreq(x)
@property
def spread(self):
"""float or PyoObject. Modulator's frequencies spreading factor."""
return self._spread
@spread.setter
def spread(self, x): self.setSpread(x)
@property
def depth(self):
"""float or PyoObject. Amplitude of the modulators."""
return self._depth
@depth.setter
def depth(self, x): self.setDepth(x)
class PVBufLoops(PyoPVObject):
"""
Phase vocoder buffer with bin independent speed playback.
PVBufLoops keeps `length` seconds of pv analysis in memory
and gives control on playback position independently for
every frequency bin.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
low : float or PyoObject, optional
Lowest bin speed factor. Defaults to 1.0.
high : float or PyoObject, optional
Highest bin speed factor. Defaults to 1.0.
mode : int, optional
Speed distribution algorithm. Available algorithms are:
0. linear, line between `low` and `high` (default)
1. exponential, exponential line between `low` and `high`
2. logarithmic, logarithmic line between `low` and `high`
3. random, uniform random between `low` and `high`
4. rand expon min, exponential random from `low` to `high`
5. rand expon max, exponential random from `high` to `low`
6. rand bi-expon, bipolar exponential random between `low` and `high`
length : float, optional
Memory length in seconds. Available at initialization
time only. Defaults to 1.0.
.. note::
The play() method can be called to start a new recording of
the current pv input.
>>> s = Server().boot()
>>> s.start()
>>> f = SNDS_PATH+'/transparent.aif'
>>> f_len = sndinfo(f)[1]
>>> src = SfPlayer(f, mul=0.5)
>>> pva = PVAnal(src, size=1024, overlaps=8)
>>> pvb = PVBufLoops(pva, low=0.9, high=1.1, mode=3, length=f_len)
>>> pvs = PVSynth(pvb).out()
"""
def __init__(self, input, low=1.0, high=1.0, mode=0, length=1.0):
pyoArgsAssert(self, "pOOin", input, low, high, mode, length)
PyoPVObject.__init__(self)
self._input = input
self._low = low
self._high = high
self._mode = mode
self._length = length
input, low, high, mode, length, lmax = convertArgsToLists(self._input, low, high, mode, length)
self._base_objs = [PVBufLoops_base(wrap(input,i), wrap(low,i), wrap(high,i), wrap(mode,i), wrap(length,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setLow(self, x):
"""
Replace the `low` attribute.
:Args:
x : float or PyoObject
new `low` attribute.
"""
pyoArgsAssert(self, "O", x)
self._low = x
x, lmax = convertArgsToLists(x)
[obj.setLow(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setHigh(self, x):
"""
Replace the `high` attribute.
:Args:
x : float or PyoObject
new `high` attribute.
"""
pyoArgsAssert(self, "O", x)
self._high = x
x, lmax = convertArgsToLists(x)
[obj.setHigh(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setMode(self, x):
"""
Replace the `mode` attribute.
:Args:
x : int
new `mode` attribute.
"""
pyoArgsAssert(self, "i", x)
self._mode = x
x, lmax = convertArgsToLists(x)
[obj.setMode(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def reset(self):
"""
Reset pointer positions to 0.
"""
[obj.reset() for obj in self._base_objs]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(-4, 4, "lin", "low", self._low),
SLMap(-4, 4, "lin", "high", self._high)]
PyoPVObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def low(self):
"""float or PyoObject. Lowest bin speed factor."""
return self._low
@low.setter
def low(self, x): self.setLow(x)
@property
def high(self):
"""float or PyoObject. Highest bin speed factor."""
return self._high
@high.setter
def high(self, x): self.setHigh(x)
@property
def mode(self):
"""int. Speed distribution algorithm."""
return self._mode
@mode.setter
def mode(self, x): self.setMode(x)
class PVBufTabLoops(PyoPVObject):
"""
Phase vocoder buffer with bin independent speed playback.
PVBufTabLoops keeps `length` seconds of pv analysis in memory
and gives control on playback position, using a PyoTableObject,
independently for every frequency bin.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object to process.
speed : PyoTableObject
Table which specify the speed of bin playback readers.
length : float, optional
Memory length in seconds. Available at initialization
time only. Defaults to 1.0.
.. note::
The play() method can be called to start a new recording of
the current pv input.
>>> s = Server().boot()
>>> s.start()
>>> f = SNDS_PATH+'/transparent.aif'
>>> f_len = sndinfo(f)[1]
>>> src = SfPlayer(f, mul=0.5)
>>> spd = ExpTable([(0,1), (512,0.5)], exp=6, size=512)
>>> pva = PVAnal(src, size=1024, overlaps=8)
>>> pvb = PVBufTabLoops(pva, spd, length=f_len)
>>> pvs = PVSynth(pvb).out()
"""
def __init__(self, input, speed, length=1.0):
pyoArgsAssert(self, "ptn", input, speed, length)
PyoPVObject.__init__(self)
self._input = input
self._speed = speed
self._length = length
input, speed, length, lmax = convertArgsToLists(self._input, speed, length)
self._base_objs = [PVBufTabLoops_base(wrap(input,i), wrap(speed,i), wrap(length,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setSpeed(self, x):
"""
Replace the `speed` attribute.
:Args:
x : PyoTableObject
new `speed` attribute.
"""
pyoArgsAssert(self, "t", x)
self._speed = x
x, lmax = convertArgsToLists(x)
[obj.setSpeed(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def reset(self):
"""
Reset pointer positions to 0.
"""
[obj.reset() for obj in self._base_objs]
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def speed(self):
"""PyoTableObject. Table which specify the speed of bin playback readers."""
return self._speed
@speed.setter
def speed(self, x): self.setSpeed(x)
class PVMix(PyoPVObject):
"""
Mix the most prominent components from two phase vocoder streaming objects.
:Parent: :py:class:`PyoPVObject`
:Args:
input : PyoPVObject
Phase vocoder streaming object 1.
input2 : PyoPVObject
Phase vocoder streaming object 2.
.. note::
The two input pv stream must have the same size and overlaps. It is
the responsibility of the user to be sure they are consistent. To change
the size (or the overlaps) of the phase vocoder process, one must
write a function to change both at the same time (see the example below).
Another possibility is to use channel expansion to analyse both sounds
with the same PVAnal object.
>>> s = Server().boot()
>>> s.start()
>>> sf = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=.5)
>>> sf2 = SfPlayer(SNDS_PATH+"/accord.aif", loop=True, mul=.5)
>>> pva = PVAnal(sf)
>>> pva2 = PVAnal(sf2)
>>> pvm = PVMix(pva, pva2)
>>> pvs = PVSynth(pvm).out()
>>> def size(x):
... pva.size = x
... pva2.size = x
>>> def olaps(x):
... pva.overlaps = x
... pva2.overlaps = x
"""
def __init__(self, input, input2):
pyoArgsAssert(self, "pp", input, input2)
PyoPVObject.__init__(self)
self._input = input
self._input2 = input2
input, input2, lmax = convertArgsToLists(self._input, self._input2)
self._base_objs = [PVMix_base(wrap(input,i), wrap(input2,i)) for i in range(lmax)]
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setInput2(self, x):
"""
Replace the `input2` attribute.
:Args:
x : PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input2 = x
x, lmax = convertArgsToLists(x)
[obj.setInput2(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
@property
def input(self):
"""PyoPVObject. Phase vocoder streaming object 1."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def input2(self):
"""PyoPVObject. Phase vocoder streaming object 2."""
return self._input2
@input2.setter
def input2(self, x): self.setInput2(x) | gpl-3.0 | -1,806,312,679,241,642,000 | 28.757255 | 161 | 0.571855 | false | 3.549365 | false | false | false |
allison-group/indigo-bondorder | archive/src/indigox/periodictable.py | 1 | 12545 | from indigox.exception import IndigoSearchError
class _Element(object):
def __init__(self, name, symbol, group, period, number, mass, atomic_radii,
covalent_radii, vdw_radii, chi, hyper=None):
self.name = name
self.symbol = symbol
self.group = group
self.period = period
self.number = number
self.mass = mass
self.atomic_radii = atomic_radii
self.covalent_radii = covalent_radii
self.vdw_radii = vdw_radii
self.chi = chi
if group < 13:
self.valence = group
elif group == 18 and symbol == 'He':
self.valence = 2
else:
self.valence = group - 10
if group == 1 or symbol == 'He':
self.octet = 2
elif group == 2:
self.octet = 4
elif group == 13:
self.octet = 6
else:
self.octet = 8
if hyper is None:
self.hyper = self.octet
else:
self.hyper = hyper
def __str__(self):
return self.symbol
def __eq__(self, c):
if self.symbol == c or self.number == c or self.name == c:
return True
return object.__eq__(self, c)
class _PeriodicTable(object):
def __init__(self, elements):
self.elements_number = dict()
self.elements_name = dict()
self.elements_symbol = dict()
for e in elements:
self.elements_number[e.number] = e
self.elements_name[e.name] = e
self.elements_symbol[e.symbol] = e
def __getattr__(self, name):
return self[name]
def __getitem__(self, name):
try:
int(name)
except ValueError:
intable = False
else:
intable = True
if not intable and name.title() in self.elements_name:
return self.elements_name[name.title()]
elif not intable and name.title() in self.elements_symbol:
return self.elements_symbol[name.title()]
elif intable and int(name) in self.elements_number:
return self.elements_number[int(name)]
else:
raise IndigoSearchError('Unknown element type: {}'
.format(name.title()))
_elements = [
# Name Symbol Group Period AtomicNumber Mass Radius Cvradius VdWradius chi
_Element("Nullium", "X", 18, 9, 0, 0.0000, 0.00, 0.0, 0.0, 0.0 ),
_Element("Actinium", "Ac", 3, 7, 89, 227.0278, 1.88, 0.0, 0.0, 1.3 ),
_Element("Aluminum", "Al", 13, 3, 13, 26.981539, 1.43, 1.25, 2.05, 1.61),
_Element("Americium", "Am", 0, 7, 95, 243.0614, 1.73, 0.0, 0.0, 1.3 ),
_Element("Antimony", "Sb", 15, 5, 51, 121.76, 1.82, 1.41, 2.2, 2.05),
_Element("Argon", "Ar", 18, 3, 18, 39.948, 1.74, 0.0, 1.91, 0.0 ),
_Element("Arsenic", "As", 15, 4, 33, 74.92159, 1.25, 1.21, 2.0, 2.18),
_Element("Astatine", "At", 17, 6, 85, 209.9871, 0.0, 0.0, 0.0, 1.96),
_Element("Barium", "Ba", 2, 6, 56, 137.327, 2.17, 1.98, 0.0, 0.89),
_Element("Berkelium", "Bk", 0, 7, 97, 247.0703, 1.70, 0.0, 0.0, 1.3 ),
_Element("Beryllium", "Be", 2, 2, 4, 9.012182, 1.13, 0.89, 0.0, 1.57),
_Element("Bismuth", "Bi", 15, 6, 83, 208.98037, 1.55, 1.52, 2.4, 2.0 ),
_Element("Bohrium", "Bh", 7, 7, 107, 262.12, 0.0, 0.0, 0.0, 0.0 ),
_Element("Boron", "B" , 13, 2, 5, 10.811, 0.83, 0.88, 2.08, 2.04),
_Element("Bromine", "Br", 17, 4, 35, 79.904, 0.0, 1.14, 1.95, 2.96, 12),
_Element("Cadmium", "Cd", 12, 5, 48, 112.411, 1.49, 1.41, 0.0, 1.69),
_Element("Caesium", "Cs", 1, 6, 55, 132.90543, 2.654, 2.35, 2.62, 0.79),
_Element("Calcium", "Ca", 2, 4, 20, 40.078, 1.97, 1.74, 0.0, 1.0 ),
_Element("Californium", "Cf", 0, 7, 98, 251.0796, 1.69, 0.0, 0.0, 1.3 ),
_Element("Carbon", "C", 14, 2, 6, 12.011, 0.77, 0.77, 1.85, 2.55),
_Element("Cerium", "Ce", 0, 6, 58, 140.115, 1.825, 1.65, 0.0, 1.12),
_Element("Chlorine", "Cl", 17, 3, 17, 35.4527, 0.0, 0.99, 1.81, 3.16),
_Element("Chromium", "Cr", 6, 4, 24, 51.9961, 1.25, 0.0, 0.0, 1.66),
_Element("Cobalt", "Co", 9, 4, 27, 58.9332, 1.25, 1.16, 0.0, 1.88),
_Element("Copper", "Cu", 11, 4, 29, 63.546, 1.28, 1.17, 0.0, 1.9 ),
_Element("Curium", "Cm", 0, 7, 96, 247.0703, 1.74, 0.0, 0.0, 1.3 ),
_Element("Dubnium", "Db", 4, 7, 104, 261.11, 0.0, 0.0, 0.0, 0.0 ),
_Element("Dysprosium", "Dy", 0, 6, 66, 162.5, 1.77, 1.59, 0.0, 1.23),
_Element("Einsteinium", "Es", 0, 7, 99, 252.083, 2.03, 0.0, 0.0, 1.3 ),
_Element("Erbium", "Er", 0, 6, 68, 167.26, 1.76, 1.57, 0.0, 1.25),
_Element("Europium", "Eu", 0, 6, 63, 151.965, 2.04, 1.85, 0.0, 1.2 ),
_Element("Fermium", "Fm", 0, 7, 100, 257.0951, 0.0, 0.0, 0.0, 1.3 ),
_Element("Fluorine", "F" , 17, 2, 9, 18.9984032, 0.709, 0.58, 1.35, 3.98),
_Element("Francium", "Fr", 1, 7, 87, 223.0197, 2.7, 0.0, 0.0, 0.7 ),
_Element("Gadolinium", "Gd", 0, 6, 64, 157.25, 1.8, 1.61, 0.0, 0.94),
_Element("Gallium", "Ga", 13, 4, 31, 69.723, 1.22, 1.25, 0.0, 1.81),
_Element("Germanium", "Ge", 14, 4, 32, 72.61, 1.23, 1.22, 0.0, 2.01),
_Element("Gold", "Au", 11, 6, 79, 196.96654, 1.44, 1.34, 0.0, 2.0 ),
_Element("Hafnium", "Hf", 4, 6, 72, 178.49, 1.56, 1.44, 0.0, 1.5 ),
_Element("Hahnium", "Hn", 8, 7, 108, 0.0, 0.0, 0.0, 0.0, 0.0 ),
_Element("Helium", "He", 18, 1, 2, 4.002602, 1.28, 0.0, 1.22, 0.0 ),
_Element("Holmium", "Ho", 0, 6, 67, 164.93032, 1.77, 1.58, 0.0, 1.24),
_Element("Hydrogen", "H" , 1, 1, 1, 1.00797, 0.78, 0.3, 1.2, 2.2 ),
_Element("Indium", "In", 13, 5, 49, 114.818, 1.63, 1.5, 0.0, 1.78),
_Element("Iodine", "I" , 17, 5, 53, 126.90447, 0.0, 1.33, 2.15, 2.66),
_Element("Iridium", "Ir", 9, 6, 77, 192.217, 1.36, 1.26, 0.0, 2.28),
_Element("Iron", "Fe", 8, 4, 26, 55.845, 1.24, 1.16, 0.0, 1.83),
_Element("Joliotium", "Jl", 5, 7, 105, 262.114, 0.0, 0.0, 0.0, 0.0 ),
_Element("Krypton", "Kr", 18, 4, 36, 83.80, 0.0, 1.89, 1.98, 0.0 ),
_Element("Lanthanum", "La", 3, 6, 57, 138.9055, 1.88, 1.69, 0.0, 1.1 ),
_Element("Lawrencium", "Lr", 3, 7, 103, 262.11, 0.0, 0.0, 0.0, 0.0 ),
_Element("Lead", "Pb", 14, 6, 82, 207.2, 1.75, 1.54, 0.0, 2.02),
_Element("Lithium", "Li", 1, 2, 3, 6.941, 1.52, 1.23, 0.0, 0.98),
_Element("Lutetium", "Lu", 3, 6, 71, 174.967, 1.72, 1.56, 0.0, 1.3 ),
_Element("Magnesium", "Mg", 2, 3, 12, 24.30506, 1.6, 1.36, 0.0, 1.31),
_Element("Manganese", "Mn", 7, 4, 25, 54.93805, 1.24, 1.77, 0.0, 1.55),
_Element("Meitnerium", "Mt", 9, 7, 109, 0.0, 0.0, 0.0, 0.0, 0.0 ),
_Element("Mendelevium", "Md", 0, 7, 101, 258.1, 0.0, 0.0, 0.0, 1.3 ),
_Element("Mercury", "Hg", 12, 6, 80, 200.59, 1.60, 1.44, 0.0, 1.8 ),
_Element("Molybdenum", "Mo", 6, 5, 42, 95.94, 1.36, 1.29, 0.0, 2.16),
_Element("Neodymium", "Nd", 0, 6, 60, 144.24, 1.82, 1.64, 0.0, 1.14),
_Element("Neon", "Ne", 18, 2, 10, 20.1797, 0.0, 0.0, 1.6, 0.0 ),
_Element("Neptunium", "Np", 0, 7, 93, 237.0482, 1.5, 0.0, 0.0, 1.28),
_Element("Nickel", "Ni", 10, 4, 28, 58.6934, 1.25, 1.15, 0.0, 1.91),
_Element("Niobium", "Nb", 5, 5, 41, 92.90638, 1.43, 1.34, 0.0, 1.6 ),
_Element("Nitrogen", "N" , 15, 2, 7, 14.00674, 0.71, 0.7, 1.54, 3.04),
_Element("Nobelium", "No", 0, 7, 102, 259.1009, 0.0, 0.0, 0.0, 0.0 ),
_Element("Osmium", "Os", 8, 6, 76, 190.23, 1.35, 1.26, 0.0, 2.2 ),
_Element("Oxygen", "O" , 16, 2, 8, 15.9994, 0.6, 0.66, 1.4, 3.44),
_Element("Palladium", "Pd", 10, 5, 46, 106.42, 1.38, 1.28, 0.0, 2.2 ),
_Element("Phosphorus", "P" , 15, 3, 15, 30.973762, 1.15, 1.10, 1.9, 2.19, 10),
_Element("Platinum", "Pt", 10, 6, 78, 195.08, 1.38, 1.29, 0.0, 2.54),
_Element("Plutonium", "Pu", 7, 0, 94, 244.0642, 0.0, 0.0, 0.0, 1.3 ),
_Element("Polonium", "Po", 16, 6, 84, 208.9824, 1.67, 1.53, 0.0, 2.2 ),
_Element("Potassium", "K" , 1, 4, 19, 39.0983, 2.27, 2.03, 2.31, 0.82),
_Element("Praseodymium", "Pr", 0, 6, 59, 140.90765, 1.83, 1.65, 0.0, 1.13),
_Element("Promethium", "Pm", 0, 6, 61, 144.9127, 1.81, 0.0, 0.0, 0.94),
_Element("Protactinium", "Pa", 0, 7, 91, 231.03588, 1.61, 0.0, 0.0, 1.38),
_Element("Radium", "Ra", 2, 7, 88, 226.0254, 2.23, 0.0, 0.0, 0.89),
_Element("Radon", "Rn", 18, 6, 86, 222.0176, 0.0, 0.0, 0.0, 0.7 ),
_Element("Rhenium", "Re", 7, 6, 75, 186.207, 1.37, 1.28, 0.0, 2.2 ),
_Element("Rhodium", "Rh", 9, 5, 45, 102.9055, 1.34, 1.25, 0.0, 2.28),
_Element("Rubidium", "Rb", 1, 5, 37, 85.4678, 1.475, 0.0, 2.44, 0.82),
_Element("Ruthenium", "Ru", 8, 5, 44, 101.07, 1.34, 1.24, 0.0, 2.2 ),
_Element("Rutherfordium", "Rf", 6, 7, 106, 263.118, 0.0, 0.0, 0.0, 0.0 ),
_Element("Samarium", "Sm", 0, 6, 62, 150.36, 1.8, 1.66, 0.0, 1.17),
_Element("Scandium", "Sc", 3, 4, 21, 44.95591, 1.61, 1.44, 0.0, 1.36),
_Element("Selenium", "Se", 16, 4, 34, 78.96, 2.15, 1.17, 2.0, 2.55),
_Element("Silicon", "Si", 14, 3, 14, 28.0855, 1.17, 1.17, 2.0, 1.9 ),
_Element("Silver", "Ag", 11, 5, 47, 107.8682, 1.44, 1.34, 0.0, 1.93),
_Element("Sodium", "Na", 1, 3, 11, 22.989768, 1.54, 0.0, 2.31, 0.93),
_Element("Strontium", "Sr", 2, 5, 38, 87.62, 2.15, 1.92, 0.0, 0.95),
_Element("Sulfur", "S" , 16, 3, 16, 32.066, 1.04, 1.04, 1.85, 2.58, 12),
_Element("Tantalum", "Ta", 5, 6, 73, 180.9479, 1.43, 1.34, 0.0, 2.36),
_Element("Technetium", "Tc", 7, 5, 43, 98.9072, 1.36, 0.0, 0.0, 1.9 ),
_Element("Tellurium", "Te", 16, 5, 52, 127.6, 1.43, 1.37, 2.2, 2.1 ),
_Element("Terbium", "Tb", 0, 6, 65, 158.92534, 1.78, 1.59, 0.0, 1.22),
_Element("Thallium", "Tl", 13, 6, 81, 204.3833, 1.7, 1.55, 0.0, 2.33),
_Element("Thorium", "Th", 0, 7, 90, 232.0381, 1.80, 0.0, 0.0, 0.0 ),
_Element("Thulium", "Tm", 0, 6, 69, 168.93421, 1.75, 1.56, 0.0, 0.96),
_Element("Tin", "Sn", 14, 5, 50, 118.71, 1.41, 1.4, 2.0, 1.96),
_Element("Titanium", "Ti", 4, 4, 22, 47.867, 1.45, 1.32, 0.0, 1.54),
_Element("Tungsten", "W" , 6, 6, 74, 183.84, 1.37, 1.3, 0.0, 1.9 ),
_Element("Uranium", "U" , 0, 7, 92, 238.0289, 1.54, 0.0, 0.0, 1.26),
_Element("Vanadium", "V" , 5, 4, 23, 50.9415, 1.32, 0.0, 0.0, 1.63),
_Element("Xenon", "Xe", 18, 5, 54, 131.29, 2.18, 2.09, 2.16, 2.6 ),
_Element("Ytterbium", "Yb", 0, 6, 70, 173.04, 1.94, 1.7, 0.0, 1.27),
_Element("Yttrium", "Y" , 3, 5, 39, 88.90585, 1.81, 1.62, 0.0, 1.22),
_Element("Zinc", "Zn", 12, 4, 30, 65.39, 1.33, 1.25, 0.0, 1.65),
_Element("Zirconium", "Zr", 4, 5, 40, 91.224, 1.6, 1.45, 0.0, 1.3 )
]
PeriodicTable = _PeriodicTable(_elements)
| mit | -6,510,224,344,621,918,000 | 64.680628 | 94 | 0.430052 | false | 2.270177 | false | false | false |
naorlivne/dshp | dshp.py | 1 | 2761 | #!/usr/bin/python2.7
import socket, sys, json, os, subprocess, datetime, time
from thread import *
# function that sets config variables with the following priority chain
# envvar > config file > default value
def read_config(conf_var):
try:
conf_value = os.environ[conf_var.upper()]
if conf_var == "handlers":
conf_value_list = conf_value.split(",")
return conf_value_list
except:
try:
conf_value = conf_file[conf_var]
except:
try:
conf_value = default_conf[conf_var]
except:
print "critical - missing config value for " + conf_var
sys.exit(2)
return conf_value
# function to handle connections
def client_thread(conn):
# Sending message to connected client
conn.send(reply)
# came out of loop
conn.close()
# function to run handlers
def run_handlers(handlers):
now = datetime.datetime.now()
current_run_unix_time = time.time()
if current_run_unix_time > float(last_run_unix_time + timeout):
for handler in handlers:
json_reply = str(json.dumps({
"hostname": hostname,
"ip": offender_ip,
"time": now.isoformat()
}))
subprocess.call([handler_exec + " " + handler + " '" + json_reply + "'"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd="handlers")
return current_run_unix_time
else:
return last_run_unix_time
# set config variables
default_conf = {"port": 8888, "interface": "", "reply": "", "timeout": 300, "handler_exec": "/usr/bin/python2.7"}
try:
conf_file = json.load(open("conf.json"))
print "loaded conf.json file"
except:
print "Warning - unable to load correctly phrased json config file"
try:
port = int(read_config('port'))
interface = str(read_config('interface'))
timeout = int(read_config('timeout'))
reply = str(read_config('reply'))
handlers = read_config('handlers')
handler_exec = read_config('handler_exec')
hostname = socket.gethostname()
last_run_unix_time = 0
except:
print "critical - problem setting config variable"
sys.exit(2)
# bind socket and start listening
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket created'
try:
s.bind((interface, port))
except socket.error as msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit(2)
print 'Socket bind complete'
s.listen(5)
print 'Socket now listening'
# keep waiting for connections
while 1:
conn, addr = s.accept()
offender_ip = addr[0]
print "attempted connection from " + offender_ip
last_run_unix_time = run_handlers(handlers)
start_new_thread(client_thread, (conn,))
s.close() | apache-2.0 | -7,716,142,935,125,726,000 | 29.351648 | 161 | 0.638175 | false | 3.618611 | true | false | false |
glyph/horsephrase | horsephrase/_regen_words.py | 1 | 2398 | """
Run with 'python -m horsephrase._regen_words > horsephrase/words.txt'
- Stop allowing words less than 3 characters; if we have the possibility
of words that short, it's trivially possible to attack the password as
letters rather than as selections from this list.
- Add words (both sourced from https://norvig.com/ngrams/) from a list of
correctly-spelled words (the YAWL) in the order of word
frequency (count_1w.txt) until we reach a desirable count
- The oldest recorded living human -- courtesy of this list
https://en.wikipedia.org/wiki/List_of_oldest_living_people - is
presently 116 years and 74 days old. Determine the desirable count
from the number of guesses that will exceed that time with a 5-word
passphrase assuming a trillion guesses per second.
- Remove words that are offensive, triggering or otherwise in poor taste.
Horsephrases should be communicable to people over phone lines without
being embarassing, offensive or unprofessional.
If generating a horsephrase generates something offensive, add the sha256 of
the offending word to _removed_words, run the command at the start of this
module, and open a PR with both changes.
"""
if __name__ != "__main__":
raise ImportError("module is not importable")
import hashlib
import itertools
import requests
# There's a whole bit about the oldest
# living human or something.
NUM_WORDS = 23600
# Removed words are specified by their hash,
# as we do not want to offend people who read the source.
_removed_words = set([
'31506a8448a761a448a08aa69d9116ea8a6cb1c6b3f4244b3043051f69c9cc3c',
'e9b6438440bf1991a49cfc2032b47e4bde26b7d7a6bf7594ec6f308ca1f5797c',
])
def get_words(session):
yawl = session.get("https://norvig.com/ngrams/word.list")
correct = set(yawl.text.split())
counts = session.get("https://norvig.com/ngrams/count_1w.txt")
for line in counts.text.splitlines():
word, count = line.split()
if word not in correct:
continue
yield word
def valid_words(words):
for word in words:
if len(word) <= 3:
continue
digest = hashlib.sha256(word.encode('ascii')).hexdigest()
if digest in _removed_words:
continue
yield word
for word in sorted(itertools.islice(valid_words(get_words(requests.Session())),
NUM_WORDS)):
print(word)
| mit | -6,620,722,462,035,993,000 | 35.892308 | 79 | 0.714762 | false | 3.470333 | false | false | false |
Chiel92/hearts-AI | player.py | 1 | 4062 | """This module containts the abstract class Player and some implementations."""
from random import shuffle
from card import Suit, Rank, Card, Deck
from rules import is_card_valid
class Player:
"""
Abstract class defining the interface of a Computer Player.
"""
def pass_cards(self, hand):
"""Must return a list of three cards from the given hand."""
return NotImplemented
def play_card(self, hand, trick, trick_nr, are_hearts_broken):
"""
Must return a card from the given hand.
trick is a list of cards played so far.
trick can thus have 0, 1, 2, or 3 elements.
are_hearts_broken is a boolean indicating whether the hearts are broken yet.
trick_nr is an integer indicating the current trick number, starting with 0.
"""
return NotImplemented
def see_played_trick(self, trick, trick_nr):
"""
Allows the player to have a look at all four cards in the trick being played.
"""
pass
class StupidPlayer(Player):
"""
Most simple player you can think of.
It just plays random valid cards.
"""
def pass_cards(self, hand):
return hand[:3]
def play_card(self, hand, trick, trick_nr, are_hearts_broken):
# Play first card that is valid
for card in hand:
if is_card_valid(hand, trick, card, trick_nr, are_hearts_broken):
return card
raise AssertionError(
'Apparently there is no valid card that can be played. This should not happen.'
)
class SimplePlayer(Player):
"""
This player has a notion of a card being undesirable.
It will try to get rid of the most undesirable cards while trying not to win a trick.
"""
def __init__(self, verbose=False):
self.verbose = verbose
if verbose:
deck = Deck()
deck.cards.sort(key=self.undesirability)
self.say('Card undesirability: ')
for card in deck.cards:
self.say('{}: {}', card, self.undesirability(card))
def say(self, message, *formatargs):
if self.verbose:
print(message.format(*formatargs))
def undesirability(self, card):
return (
card.rank.value
+ (10 if card.suit == Suit.spades and card.rank >= Rank.queen else 0)
)
def pass_cards(self, hand):
hand.sort(key=self.undesirability, reverse=True)
return hand[:3]
def play_card(self, hand, trick, trick_nr, are_hearts_broken):
# Lead with a low card
if not trick:
hand.sort(key=lambda card:
100 if not are_hearts_broken and card.suit == Suit.hearts else
card.rank.value)
return hand[0]
hand.sort(key=self.undesirability, reverse=True)
self.say('Hand: {}', hand)
self.say('Trick so far: {}', trick)
# Safe cards are cards which will not result in winning the trick
leading_suit = trick[0].suit
max_rank_in_leading_suit = max([card.rank for card in trick
if card.suit == leading_suit])
valid_cards = [card for card in hand
if is_card_valid(hand, trick, card, trick_nr, are_hearts_broken)]
safe_cards = [card for card in valid_cards
if card.suit != leading_suit or card.rank <= max_rank_in_leading_suit]
self.say('Valid cards: {}', valid_cards)
self.say('Safe cards: {}', safe_cards)
try:
return safe_cards[0]
except IndexError:
queen_of_spades = Card(Suit.spades, Rank.queen)
# Don't try to take a trick by laying the queen of spades
if valid_cards[0] == queen_of_spades and len(valid_cards) > 1:
return valid_cards[1]
else:
return valid_cards[0]
| gpl-3.0 | 8,617,598,506,807,111,000 | 32.134454 | 92 | 0.569916 | false | 3.955209 | false | false | false |
jigneshphipl/omaha | enterprise/build_group_policy_template.py | 63 | 2024 | #!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""A Hammer-specific wrapper for generate_group_policy_template."""
from omaha.enterprise import generate_group_policy_template
def BuildGroupPolicyTemplate(env, target, apps, apps_file_path=None):
"""Builds a Group Policy ADM template file, handling dependencies.
Causes WriteGroupPolicyTemplate() to be called at build time instead of as
part of the processing stage.
Args:
env: The environment.
target: ADM output file.
apps: A list of tuples containing information about each app. See
generate_group_policy_template for details.
apps_file_path: Optional path to the file that defines apps. Used to enforce
dependencies.
"""
def _WriteAdmFile(target, source, env):
"""Called during the build phase to generate and write the ADM file."""
source = source # Avoid PyLint warning.
generate_group_policy_template.WriteGroupPolicyTemplate(
env.File(target[0]).abspath,
env['public_apps'])
return 0
adm_output = env.Command(
target=target,
source=[],
action=_WriteAdmFile,
public_apps=apps
)
# Force ADM file to rebuild whenever the script or apps data change.
dependencies = ['$MAIN_DIR/enterprise/generate_group_policy_template.py']
if apps_file_path:
dependencies.append(apps_file_path)
env.Depends(adm_output, dependencies)
| apache-2.0 | 2,551,215,414,839,132,700 | 34.508772 | 80 | 0.699111 | false | 4.088889 | false | false | false |
johtso/django-smart-selects | test_app/migrations/0001_initial.py | 4 | 3111 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-16 17:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Continent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('continent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='test_app.Continent')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=50)),
('street', models.CharField(max_length=100)),
('continent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='test_app.Continent')),
('country', smart_selects.db_fields.ChainedForeignKey(auto_choose=True, chained_field=b'continent', chained_model_field=b'continent', on_delete=django.db.models.deletion.CASCADE, to='test_app.Country')),
],
),
migrations.CreateModel(
name='Publication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Writer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('publications', models.ManyToManyField(blank=True, to='test_app.Publication')),
],
),
migrations.AddField(
model_name='book',
name='publication',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='test_app.Publication'),
),
migrations.AddField(
model_name='book',
name='writer',
field=smart_selects.db_fields.ChainedManyToManyField(chained_field=b'publication', chained_model_field=b'publications', to='test_app.Writer'),
),
]
| bsd-3-clause | 7,108,204,114,110,410,000 | 40.48 | 219 | 0.571842 | false | 4.23842 | true | false | false |
dderevjanik/agescx | agescx/models/resource.py | 1 | 1740 | class Resource:
"""Information about player resource.
Include Ore, which is unused, but it can be enabled.
Attributes:
food (int): food
wood (int): wood
gold (int): gold
stone (int): stone
ore (int): ore
"""
def __init__(self, food=0, wood=0, gold=0, stone=0, ore=0):
"""Create Resource container for a player
Args:
food (int): starting food
wood (int): starting wood
gold (int): starting gold
stone (int): starting stone
ore (int): starting ore. unused
"""
self.food = food
self.wood = wood
self.gold = gold
self.stone = stone
self.ore = ore
def __repr__(self):
name = "Player resources: \n"
res1 = "\tWood: {}\n\tFood: {}\n".format(self.wood, self.food)
res2 = "\tGold: {}\n\tStone: {}\n".format(self.gold, self.stone)
res3 = "\tOre*: {}".format(self.ore)
return name + res1 + res2 + res3
def toJSON(self):
"""return JSON"""
data = dict()
data['food'] = self.food
data['wood'] = self.wood
data['stone'] = self.stone
data['gold'] = self.gold
return data
def setAll(self, value):
"""Set value to all resources
Args:
value (int): a value set to all resources
"""
self.food = value
self.wood = value
self.gold = value
self.stone = value
self.ore = value
def getAll(self):
"""get all resource
Return:
(tuple): resource values
"""
return (self.food, self.wood, self.gold, self.stone, self.ore)
| mit | 540,976,870,569,891,100 | 26.1875 | 72 | 0.505172 | false | 3.741935 | false | false | false |
jeremiah-c-leary/vhdl-style-guide | vsg/vhdlFile/classify/variable_assignment_statement.py | 1 | 1263 |
from vsg.token import variable_assignment_statement as token
from vsg.vhdlFile.classify import conditional_variable_assignment
from vsg.vhdlFile.classify import selected_variable_assignment
from vsg.vhdlFile.classify import simple_variable_assignment
from vsg.vhdlFile import utils
def detect(iToken, lObjects):
'''
variable_assignment_statement ::=
[ label : ] simple_variable_assignment
| [ label : ] conditional_variable_assignment
| [ label : ] selected_variable_assignment
'''
iCurrent = iToken
if selected_variable_assignment.detect(iToken, lObjects):
iCurrent = utils.tokenize_label(iCurrent, lObjects, token.label, token.label_colon)
iCurrent = selected_variable_assignment.classify(iCurrent, lObjects)
elif conditional_variable_assignment.detect(iToken, lObjects):
iCurrent = utils.tokenize_label(iCurrent, lObjects, token.label, token.label_colon)
iCurrent = conditional_variable_assignment.classify(iCurrent, lObjects)
elif simple_variable_assignment.detect(iToken, lObjects):
iCurrent = utils.tokenize_label(iCurrent, lObjects, token.label, token.label_colon)
iCurrent = simple_variable_assignment.classify(iCurrent, lObjects)
return iCurrent
| gpl-3.0 | -7,943,438,087,998,351,000 | 39.741935 | 91 | 0.745051 | false | 3.886154 | false | false | false |
rahulrrixe/libcloudCLI | libcloudcli/compute/node.py | 1 | 3000 |
"""Node action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
class CreateNode(show.ShowOne):
"""Create compute Node command"""
log = logging.getLogger(__name__ + ".create_node")
def get_parser(self, prog_name):
parser = super(CreateNode, self).get_parser(prog_name)
parser.add_argument(
"name",
metavar="<name>",
help="Name of the Node")
parser.add_argument(
"size",
metavar="<size>",
help="The size of the resource allocated")
parser.add_argument(
"image",
metavar="<image>",
help="OS image to boot on")
parser.add_argument(
"auth",
metavar="<auth>",
help="Initial authentication information")
parser.add_argument(
"location",
metavar="<location>",
help="which data center to create node")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
#compute_client = self.app.client_manager.compute
args = (
parsed_args.name,
parsed_args.size,
parsed_args.image,
parsed_args.auth,
parsed_args.location
)
#Node = compute_client.Node.create(*args)._info.copy()
#return zip(*sorted(six.iteritems(Node)))
return "its works!"
class DeleteNode(command.Command):
"""Delete compute agent command"""
log = logging.getLogger(__name__ + ".DeleteAgent")
def get_parser(self, prog_name):
parser = super(DeleteNode, self).get_parser(prog_name)
parser.add_argument(
"id",
metavar="<id>",
help="ID of agent to delete")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
print "node deleted"
return
class SetNode(show.ShowOne):
"""Set compute Node command"""
log = logging.getLogger(__name__ + ".SetAgent")
def get_parser(self, prog_name):
parser = super(SetNode, self).get_parser(prog_name)
parser.add_argument(
"id",
metavar="<id>",
help="ID of the agent")
parser.add_argument(
"version",
metavar="<version>",
help="Version of the agent")
parser.add_argument(
"url",
metavar="<url>",
help="URL")
parser.add_argument(
"md5hash",
metavar="<md5hash>",
help="MD5 hash")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
args = (
parsed_args.id,
parsed_args.version,
parsed_args.url,
parsed_args.md5hash
)
print "node set"
return
| apache-2.0 | -3,680,191,917,278,911,000 | 26.522936 | 62 | 0.538667 | false | 4.249292 | false | false | false |
ewsterrenburg/tamandua | pelicanconf.py | 1 | 2810 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
from fontawesome_markdown import FontAwesomeExtension
import os
AUTHOR = u"Erwin Sterrenburg"
SITENAME = u"PILOSA.EU"
TAGLINE = 'Scribblings of World\'s Most Curious Anteater'
TIMEZONE = 'Europe/Amsterdam'
LOCALE='en_US.utf8'
DEFAULT_LANG = u'en'
DEFAULT_PAGINATION = 5
# By default we enable pretty highlighing in markdown:
MD_EXTENSIONS = [FontAwesomeExtension(), 'codehilite(css_class=highlight,linenums=False)', 'extra', 'toc', 'typogrify']
# Leave this blank for local development, publishconf.py has the "real" value:
SITEURL = 'http://localhost:8000/'
RELATIVE_URLS = True
# Feed generation is usually not desired when developing
FEED_ALL_RSS = 'feed.xml'
CATEGORY_FEED_RSS = None
TRANSLATION_FEED_RSS = None
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
FEED_DOMAIN = SITEURL
MENUITEMS = [('Archive', 'archives.html'), ('About', 'about.html'),]
SOCIAL = (
('envelope-o', 'mailto:[email protected]'),
('github', 'https://github.com/ewsterrenburg/'),
('linkedin-square', 'https://www.linkedin.com/in/ewsterrenburg/'),
('rss', 'http://pilosa.eu/feed.xml'),
)
# Static files
# Uncomment and set to the filename of your favicon:
FAVICON_FILENAME = 'favicon.ico'
# Any extra files should be added here
#STATIC_PATHS = ['images', 'extra/CNAME']
STATIC_PATHS = [
'images',
os.path.join('extras','robots.txt'),
'extras/CNAME',
'extras/favicon.ico',
'extras/apple-touch-icon.png'
]
# Here's a sample EXTRA_PATH_METADATA that adds the favicon, an iOS touch icon and a GPG key:
EXTRA_PATH_METADATA = dict()
for f in os.listdir('content/extras'):
STATIC_PATHS.append('extras' + os.sep + '{0}'.format(f))
EXTRA_PATH_METADATA['extras' + os.sep + '{0}'.format(f)]={'path': f}
#Theme
THEME = os.path.join(os.getcwd(), "themes", "pure-single-master")
COVER_IMG_URL = "/images/bananas.jpeg"
SINGLE_AUTHOR = True
# Sole author and don't use categories ... disable these features
AUTHOR_SAVE_AS = False
AUTHORS_SAVE_AS = False
DISPLAY_CATEGORIES_ON_MENU = False
DEFAULT_DATE_FORMAT = ('%b %d %Y')
TYPOGRIFY = True
# Cleaner page links
PAGE_URL = '{slug}.html'
PAGE_SAVE_AS = '{slug}.html'
PAGE_LANG_URL = '{slug}-{lang}.html'
PAGE_LANG_SAVE_AS = '{slug}-{lang}.html'
# Cleaner Articles
ARTICLE_URL = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/'
ARTICLE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
PLUGIN_PATHS = [os.path.join(os.getcwd(), "..", "pelican-plugins")]
#PLUGINS = ['render_math', 'extended_sitemap', 'better_codeblock_line_numbering']
PLUGINS = ['extended_sitemap', 'better_codeblock_line_numbering']
TAG_CLOUD_STEPS = 4
# Setting for the better_figures_and_images plugin
RESPONSIVE_IMAGES = True
| mit | -957,077,791,029,991,000 | 28.893617 | 119 | 0.69395 | false | 2.939331 | false | false | false |
mworion/mountwizzard | snippets/indi_new/qt_indi_client.py | 1 | 5229 | #!/usr/bin/env python
"""
A PyQt5 (client) interface to an INDI server. This will only work
in the context of a PyQt application.
"""
from xml.etree import ElementTree
from PyQt5 import QtCore, QtNetwork
import snippets.indi_new.indi_xml as indiXML
class QtINDIClientException(Exception):
pass
class QtINDIClient(QtCore.QObject):
received = QtCore.pyqtSignal(object) # Received messages as INDI Python objects.
def __init__(self,
host='192.168.2.164',
port=7624,
verbose=True,
**kwds):
super().__init__(**kwds)
self.device = None
self.message_string = ""
self.host = host
self.port = port
self.connected = False
# Create socket.
self.socket = QtNetwork.QTcpSocket()
self.socket.disconnected.connect(self.handleDisconnect)
self.socket.readyRead.connect(self.handleReadyRead)
self.socket.hostFound.connect(self.handleHostFound)
self.socket.connected.connect(self.handleConnected)
self.socket.stateChanged.connect(self.handleStateChanged)
self.socket.error.connect(self.handleError)
# if not self.socket.waitForConnected():
# print("Cannot connect to indiserver at " + address + ", port " + str(port))
def handleDisconnect(self):
print('handleDisconnect')
self.connected = False
self.socket.disconnectFromHost()
def handleHostFound(self):
print('handleHostFound')
def handleConnected(self):
print('handleConnected')
print("Connect to indiserver at " + self.host + ", port " + str(self.port))
self.connected = True
def handleError(self, socketError):
print("The following error occurred: {0}".format(self.socket.errorString()))
if socketError == QtNetwork.QAbstractSocket.RemoteHostClosedError:
pass
else:
pass
def handleStateChanged(self):
print('State changed: {0}'.format(self.socket.state()))
if self.socket.state() == QtNetwork.QAbstractSocket.ConnectedState:
pass
else:
pass
def handleReadyRead(self):
# Add starting tag if this is new message.
if (len(self.message_string) == 0):
self.message_string = "<data>"
# Get message from socket.
while self.socket.bytesAvailable():
# FIXME: This does not work with Python2.
tmp = str(self.socket.read(1000000), "ascii")
self.message_string += tmp
# Add closing tag.
self.message_string += "</data>"
# Try and parse the message.
try:
messages = ElementTree.fromstring(self.message_string)
self.message_string = ""
for message in messages:
xml_message = indiXML.parseETree(message)
# Filter message is self.device is not None.
if self.device is not None:
if (self.device == xml_message.getAttr("device")):
self.received.emit(xml_message)
# Otherwise just send them all.
else:
self.received.emit(xml_message)
# Message is incomplete, remove </data> and wait..
except ElementTree.ParseError:
self.message_string = self.message_string[:-7]
def setDevice(self, device=None):
self.device = device
def sendMessage(self, indi_command):
if (self.socket.state() == QtNetwork.QAbstractSocket.ConnectedState):
self.socket.write(indi_command.toXML() + b'\n')
else:
print("Socket is not connected.")
if (__name__ == "__main__"):
import sys
import time
from PyQt5 import QtWidgets
class Widget(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.client = QtINDIClient()
self.client.received.connect(self.handleReceived)
def handleReceived(self, message):
print(message)
def send(self, message):
self.client.sendMessage(message)
app = QtWidgets.QApplication(sys.argv)
widget = Widget()
widget.show()
# Get a list of devices.
# widget.send(indiXML.clientGetProperties(indi_attr={"version": "1.0"}))
# Connect to the CCD simulator.
# widget.send(indiXML.newSwitchVector([indiXML.oneSwitch("On", indi_attr={"name": "CONNECT"})], indi_attr={"name": "CONNECTION", "device": "CCD Simulator"}))
while True:
time.sleep(1)
QtWidgets.QApplication.processEvents()
if not widget.client.connected and widget.client.socket.state() == 0:
print('try to connect to', widget.client.host)
widget.client.socket.connectToHost(widget.client.host, widget.client.port)
# Enable BLOB mode.
# widget.send(indiXML.enableBLOB("Also", indi_attr={"device": "CCD Simulator"}))
# Request image.
# widget.send(indiXML.newNumberVector([indiXML.oneNumber(1, indi_attr={"name": "CCD_EXPOSURE_VALUE"})], indi_attr={"name": "CCD_EXPOSURE", "device": "CCD Simulator"}))
sys.exit(app.exec_()) | apache-2.0 | -6,640,325,150,747,828,000 | 31.08589 | 175 | 0.608912 | false | 4.133597 | false | false | false |
devendermishrajio/nova_test_latest | nova/tests/unit/virt/libvirt/volume/test_volume.py | 4 | 30943 | # Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_config import cfg
from nova import exception
from nova import test
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova.virt.libvirt import host
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import volume
CONF = cfg.CONF
SECRET_UUID = '2a0a0d6c-babf-454d-b93e-9ac9957b95e0'
class FakeSecret(object):
def __init__(self):
self.uuid = SECRET_UUID
def getUUIDString(self):
return self.uuid
def UUIDString(self):
return self.uuid
def setValue(self, value):
self.value = value
return 0
def getValue(self, value):
return self.value
def undefine(self):
self.value = None
return 0
class LibvirtVolumeBaseTestCase(test.NoDBTestCase):
"""Contains common setup and helper methods for libvirt volume tests."""
def setUp(self):
super(LibvirtVolumeBaseTestCase, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
class FakeLibvirtDriver(object):
def __init__(self):
self._host = host.Host("qemu:///system")
def _get_all_block_devices(self):
return []
self.fake_conn = FakeLibvirtDriver()
self.connr = {
'ip': '127.0.0.1',
'initiator': 'fake_initiator',
'host': 'fake_host'
}
self.disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.name = 'volume-00000001'
self.location = '10.0.2.15:3260'
self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name
self.vol = {'id': 1, 'name': self.name}
self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
self.user = 'foo'
def _assertFileTypeEquals(self, tree, file_path):
self.assertEqual(tree.get('type'), 'file')
self.assertEqual(tree.find('./source').get('file'), file_path)
class LibvirtVolumeTestCase(LibvirtVolumeBaseTestCase):
def _assertNetworkAndProtocolEquals(self, tree):
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
rbd_name = '%s/%s' % ('rbd', self.name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
def _assertISCSINetworkAndProtocolEquals(self, tree):
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'iscsi')
iscsi_name = '%s/%s' % (self.iqn, self.vol['id'])
self.assertEqual(tree.find('./source').get('name'), iscsi_name)
def _assertDiskInfoEquals(self, tree, disk_info):
self.assertEqual(tree.get('device'), disk_info['type'])
self.assertEqual(tree.find('./target').get('bus'),
disk_info['bus'])
self.assertEqual(tree.find('./target').get('dev'),
disk_info['dev'])
def _test_libvirt_volume_driver_disk_info(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertDiskInfoEquals(tree, self.disk_info)
def test_libvirt_volume_disk_info_type(self):
self.disk_info['type'] = 'cdrom'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_dev(self):
self.disk_info['dev'] = 'hdc'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_bus(self):
self.disk_info['bus'] = 'scsi'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_driver_serial(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual('block', tree.get('type'))
self.assertEqual('fake_serial', tree.find('./serial').text)
self.assertIsNone(tree.find('./blockio'))
def test_libvirt_volume_driver_blockio(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'logical_block_size': '4096',
'physical_block_size': '4096',
},
'serial': 'fake_serial',
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
blockio = tree.find('./blockio')
self.assertEqual('4096', blockio.get('logical_block_size'))
self.assertEqual('4096', blockio.get('physical_block_size'))
def test_libvirt_volume_driver_iotune(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'qos_specs': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
iotune = tree.find('./iotune')
# ensure invalid qos_specs is ignored
self.assertIsNone(iotune)
specs = {
'total_bytes_sec': '102400',
'read_bytes_sec': '51200',
'write_bytes_sec': '0',
'total_iops_sec': '0',
'read_iops_sec': '200',
'write_iops_sec': '200',
}
del connection_info['data']['qos_specs']
connection_info['data'].update(dict(qos_specs=specs))
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/total_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/read_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/write_iops_sec').text)
def test_libvirt_volume_driver_readonly(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'access_mode': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.assertRaises(exception.InvalidVolumeAccessMode,
libvirt_driver.get_config,
connection_info, self.disk_info)
connection_info['data']['access_mode'] = 'rw'
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNone(readonly)
connection_info['data']['access_mode'] = 'ro'
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNotNone(readonly)
def iscsi_connection(self, volume, location, iqn, auth=False,
transport=None):
dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn)
if transport is not None:
dev_name = 'pci-0000:00:00.0-' + dev_name
dev_path = '/dev/disk/by-path/%s' % (dev_name)
ret = {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
'device_path': dev_path,
'qos_specs': {
'total_bytes_sec': '102400',
'read_iops_sec': '200',
}
}
}
if auth:
ret['data']['auth_method'] = 'CHAP'
ret['data']['auth_username'] = 'foo'
ret['data']['auth_password'] = 'bar'
return ret
def iscsi_connection_discovery_chap_enable(self, volume, location, iqn):
dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn)
dev_path = '/dev/disk/by-path/%s' % (dev_name)
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
'device_path': dev_path,
'discovery_auth_method': 'CHAP',
'discovery_auth_username': "testuser",
'discovery_auth_password': '123456',
'qos_specs': {
'total_bytes_sec': '102400',
'read_iops_sec': '200',
}
}
}
def generate_device(self, transport=None, lun=1, short=False):
dev_format = "ip-%s-iscsi-%s-lun-%s" % (self.location, self.iqn, lun)
if transport:
dev_format = "pci-0000:00:00.0-" + dev_format
if short:
return dev_format
fake_dev_path = "/dev/disk/by-path/" + dev_format
return fake_dev_path
def test_iscsiadm_discover_parsing(self):
# Ensure that parsing iscsiadm discover ignores cruft.
targets = [
["192.168.204.82:3260,1",
("iqn.2010-10.org.openstack:volume-"
"f9b12623-6ce3-4dac-a71f-09ad4249bdd3")],
["192.168.204.82:3261,1",
("iqn.2010-10.org.openstack:volume-"
"f9b12623-6ce3-4dac-a71f-09ad4249bdd4")]]
# This slight wonkiness brought to you by pep8, as the actual
# example output runs about 97 chars wide.
sample_input = """Loading iscsi modules: done
Starting iSCSI initiator service: done
Setting up iSCSI targets: unused
%s %s
%s %s
""" % (targets[0][0], targets[0][1], targets[1][0], targets[1][1])
driver = volume.LibvirtISCSIVolumeDriver("none")
out = driver.connector._get_target_portals_from_iscsiadm_output(
sample_input)
self.assertEqual(out, targets)
def test_libvirt_iscsi_driver(self, transport=None):
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
self.assertIsInstance(libvirt_driver.connector,
connector.ISCSIConnector)
def test_sanitize_log_run_iscsiadm(self):
# Tests that the parameters to the os-brick connector's
# _run_iscsiadm function are sanitized for passwords when logged.
def fake_debug(*args, **kwargs):
self.assertIn('node.session.auth.password', args[0])
self.assertNotIn('scrubme', args[0])
def fake_execute(*args, **kwargs):
return (None, None)
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
libvirt_driver.connector.set_execute(fake_execute)
connection_info = self.iscsi_connection(self.vol, self.location,
self.iqn)
iscsi_properties = connection_info['data']
with mock.patch.object(connector.LOG, 'debug',
side_effect=fake_debug) as debug_mock:
libvirt_driver.connector._iscsiadm_update(
iscsi_properties, 'node.session.auth.password', 'scrubme')
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
def iser_connection(self, volume, location, iqn):
return {
'driver_volume_type': 'iser',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
}
}
def sheepdog_connection(self, volume):
return {
'driver_volume_type': 'sheepdog',
'data': {
'name': volume['name']
}
}
def test_libvirt_sheepdog_driver(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.sheepdog_connection(self.vol)
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
self.assertEqual(tree.find('./source').get('name'), self.name)
libvirt_driver.disconnect_volume(connection_info, "vde")
def rbd_connection(self, volume):
return {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % ('rbd', volume['name']),
'auth_enabled': CONF.libvirt.rbd_secret_uuid is not None,
'auth_username': CONF.libvirt.rbd_user,
'secret_type': 'ceph',
'secret_uuid': CONF.libvirt.rbd_secret_uuid,
'qos_specs': {
'total_bytes_sec': '1048576',
'read_iops_sec': '500',
}
}
}
def test_libvirt_rbd_driver(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertIsNone(tree.find('./source/auth'))
self.assertEqual('1048576', tree.find('./iotune/total_bytes_sec').text)
self.assertEqual('500', tree.find('./iotune/read_iops_sec').text)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_hosts(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
hosts = ['example.com', '1.2.3.4', '::1']
ports = [None, '6790', '6791']
connection_info['data']['hosts'] = hosts
connection_info['data']['ports'] = ports
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertIsNone(tree.find('./source/auth'))
found_hosts = tree.findall('./source/host')
self.assertEqual([host.get('name') for host in found_hosts], hosts)
self.assertEqual([host.get('port') for host in found_hosts], ports)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_enabled(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
secret_type = 'ceph'
connection_info['data']['auth_enabled'] = True
connection_info['data']['auth_username'] = self.user
connection_info['data']['secret_type'] = secret_type
connection_info['data']['secret_uuid'] = self.uuid
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertEqual(tree.find('./auth').get('username'), self.user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), self.uuid)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
secret_type = 'ceph'
connection_info['data']['auth_enabled'] = True
connection_info['data']['auth_username'] = self.user
connection_info['data']['secret_type'] = secret_type
connection_info['data']['secret_uuid'] = self.uuid
flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
flags_user = 'bar'
self.flags(rbd_user=flags_user,
rbd_secret_uuid=flags_uuid,
group='libvirt')
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_disabled(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
secret_type = 'ceph'
connection_info['data']['auth_enabled'] = False
connection_info['data']['auth_username'] = self.user
connection_info['data']['secret_type'] = secret_type
connection_info['data']['secret_uuid'] = self.uuid
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertIsNone(tree.find('./auth'))
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
secret_type = 'ceph'
connection_info['data']['auth_enabled'] = False
connection_info['data']['auth_username'] = self.user
connection_info['data']['secret_type'] = secret_type
connection_info['data']['secret_uuid'] = self.uuid
# NOTE: Supplying the rbd_secret_uuid will enable authentication
# locally in nova-compute even if not enabled in nova-volume/cinder
flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
flags_user = 'bar'
self.flags(rbd_user=flags_user,
rbd_secret_uuid=flags_uuid,
group='libvirt')
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, "vde")
@mock.patch.object(host.Host, 'find_secret')
@mock.patch.object(host.Host, 'create_secret')
@mock.patch.object(host.Host, 'delete_secret')
def test_libvirt_iscsi_net_driver(self, mock_delete, mock_create,
mock_find):
mock_find.return_value = FakeSecret()
mock_create.return_value = FakeSecret()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.iscsi_connection(self.vol, self.location,
self.iqn, auth=True)
secret_type = 'iscsi'
flags_user = connection_info['data']['auth_username']
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertISCSINetworkAndProtocolEquals(tree)
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), SECRET_UUID)
libvirt_driver.disconnect_volume(connection_info, 'vde')
def test_libvirt_nfs_driver(self):
# NOTE(vish) exists is to make driver assume connecting worked
mnt_base = '/mnt'
self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/nfs/share1'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
libvirt_driver.disconnect_volume(connection_info, "vde")
device_path = os.path.join(export_mnt_base,
connection_info['data']['name'])
self.assertEqual(device_path, connection_info['data']['device_path'])
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'nfs', export_string, export_mnt_base),
('umount', export_mnt_base)]
self.assertEqual(expected_commands, self.executes)
@mock.patch.object(volume.utils, 'execute')
@mock.patch.object(volume.LOG, 'debug')
@mock.patch.object(volume.LOG, 'exception')
def test_libvirt_nfs_driver_umount_error(self, mock_LOG_exception,
mock_LOG_debug, mock_utils_exe):
export_string = '192.168.1.1:/nfs/share1'
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
mock_utils_exe.side_effect = processutils.ProcessExecutionError(
None, None, None, 'umount', 'umount: device is busy.')
libvirt_driver.disconnect_volume(connection_info, "vde")
self.assertTrue(mock_LOG_debug.called)
mock_utils_exe.side_effect = processutils.ProcessExecutionError(
None, None, None, 'umount', 'umount: target is busy.')
libvirt_driver.disconnect_volume(connection_info, "vde")
self.assertTrue(mock_LOG_debug.called)
mock_utils_exe.side_effect = processutils.ProcessExecutionError(
None, None, None, 'umount', 'umount: Other error.')
libvirt_driver.disconnect_volume(connection_info, "vde")
self.assertTrue(mock_LOG_exception.called)
def test_libvirt_nfs_driver_get_config(self):
libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
mnt_base = '/mnt'
self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
export_string = '192.168.1.1:/nfs/share1'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name,
'device_path': file_path}}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
self.assertEqual('raw', tree.find('./driver').get('type'))
def test_libvirt_nfs_driver_already_mounted(self):
# NOTE(vish) exists is to make driver assume connecting worked
mnt_base = '/mnt'
self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
export_string = '192.168.1.1:/nfs/share1'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('findmnt', '--target', export_mnt_base, '--source',
export_string),
('umount', export_mnt_base)]
self.assertEqual(self.executes, expected_commands)
def test_libvirt_nfs_driver_with_opts(self):
mnt_base = '/mnt'
self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/nfs/share1'
options = '-o intr,nfsvers=3'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name,
'options': options}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'nfs', '-o', 'intr,nfsvers=3',
export_string, export_mnt_base),
('umount', export_mnt_base),
]
self.assertEqual(expected_commands, self.executes)
@mock.patch.object(libvirt_utils, 'is_mounted')
def test_libvirt_smbfs_driver(self, mock_is_mounted):
mnt_base = '/mnt'
self.flags(smbfs_mount_point_base=mnt_base, group='libvirt')
mock_is_mounted.return_value = False
libvirt_driver = volume.LibvirtSMBFSVolumeDriver(self.fake_conn)
export_string = '//192.168.1.1/volumes'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name,
'options': None}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'cifs', '-o', 'username=guest',
export_string, export_mnt_base),
('umount', export_mnt_base)]
self.assertEqual(expected_commands, self.executes)
def test_libvirt_smbfs_driver_already_mounted(self):
mnt_base = '/mnt'
self.flags(smbfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtSMBFSVolumeDriver(self.fake_conn)
export_string = '//192.168.1.1/volumes'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('findmnt', '--target', export_mnt_base,
'--source', export_string),
('umount', export_mnt_base)]
self.assertEqual(expected_commands, self.executes)
def test_libvirt_smbfs_driver_get_config(self):
mnt_base = '/mnt'
self.flags(smbfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtSMBFSVolumeDriver(self.fake_conn)
export_string = '//192.168.1.1/volumes'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name,
'device_path': file_path}}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
@mock.patch.object(libvirt_utils, 'is_mounted')
def test_libvirt_smbfs_driver_with_opts(self, mock_is_mounted):
mnt_base = '/mnt'
self.flags(smbfs_mount_point_base=mnt_base, group='libvirt')
mock_is_mounted.return_value = False
libvirt_driver = volume.LibvirtSMBFSVolumeDriver(self.fake_conn)
export_string = '//192.168.1.1/volumes'
options = '-o user=guest,uid=107,gid=105'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name,
'options': options}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'cifs', '-o', 'user=guest,uid=107,gid=105',
export_string, export_mnt_base),
('umount', export_mnt_base)]
self.assertEqual(expected_commands, self.executes)
| apache-2.0 | 6,796,882,139,458,009,000 | 41.387671 | 79 | 0.579582 | false | 3.799951 | true | false | false |
beeftornado/sentry | src/sentry/mediators/external_requests/util.py | 1 | 2743 | from __future__ import absolute_import
import logging
from jsonschema import Draft7Validator
from requests.exceptions import Timeout, ConnectionError
from sentry.utils.sentryappwebhookrequests import SentryAppWebhookRequestsBuffer
from sentry.http import safe_urlopen
from sentry.models.sentryapp import track_response_code
logger = logging.getLogger(__name__)
SELECT_OPTIONS_SCHEMA = {
"type": "array",
"definitions": {
"select-option": {
"type": "object",
"properties": {"label": {"type": "string"}, "value": {"type": "string"}},
"required": ["label", "value"],
}
},
"properties": {"type": "array", "items": {"$ref": "#definitions/select-option"}},
}
ISSUE_LINKER_SCHEMA = {
"type": "object",
"properties": {
"webUrl": {"type": "string"},
"identifier": {"type": "string"},
"project": {"type": "string"},
},
"required": ["webUrl", "identifier", "project"],
}
SCHEMA_LIST = {"select": SELECT_OPTIONS_SCHEMA, "issue_link": ISSUE_LINKER_SCHEMA}
def validate(instance, schema_type):
schema = SCHEMA_LIST[schema_type]
v = Draft7Validator(schema)
if not v.is_valid(instance):
return False
return True
def send_and_save_sentry_app_request(url, sentry_app, org_id, event, **kwargs):
"""
Send a webhook request, and save the request into the Redis buffer for the app dashboard request log
Returns the response of the request
kwargs ends up being the arguments passed into safe_urlopen
"""
buffer = SentryAppWebhookRequestsBuffer(sentry_app)
slug = sentry_app.slug_for_metrics
try:
resp = safe_urlopen(url=url, **kwargs)
except (Timeout, ConnectionError) as e:
error_type = e.__class__.__name__.lower()
logger.info(
"send_and_save_sentry_app_request.timeout",
extra={
"error_type": error_type,
"organization_id": org_id,
"integration_slug": sentry_app.slug,
},
)
track_response_code(error_type, slug, event)
# Response code of 0 represents timeout
buffer.add_request(response_code=0, org_id=org_id, event=event, url=url)
# Re-raise the exception because some of these tasks might retry on the exception
raise
else:
track_response_code(resp.status_code, slug, event)
buffer.add_request(
response_code=resp.status_code,
org_id=org_id,
event=event,
url=url,
error_id=resp.headers.get("Sentry-Hook-Error"),
project_id=resp.headers.get("Sentry-Hook-Project"),
)
resp.raise_for_status()
return resp
| bsd-3-clause | -2,112,596,934,219,979,300 | 29.142857 | 104 | 0.611739 | false | 3.857947 | false | false | false |
jinzekid/codehub | python/数据库/MutilToMutil/orm_m2m_api.py | 1 | 1111 | # Author: Jason Lu
import orm_createTable
from sqlalchemy.orm import sessionmaker, relationship
#创建与数据库的会话session class ,注意,这里返回给session的是个class,不是实例
Session_class = sessionmaker(bind=orm_createTable.engine)
Session = Session_class() #生成session实例
# b1 = orm_createTable.Book(name="Learn python with Alex", pub_date='2014-05-02')
# b2 = orm_createTable.Book(name="Learn java with Jason", pub_date='2016-08-02')
b3 = orm_createTable.Book(name="跟我学开车", pub_date='2017-10-02')
#
#
a1 = orm_createTable.Author(name="Alex")
# a2 = orm_createTable.Author(name="Jason")
# a3 = orm_createTable.Author(name="BBB")
#
# b1.authors = [a1, a2]
# b3.authors = [a1, a2, a3]
#
# Session.add_all([b1, b2, b3, a1, a2, a3])
b3.authors = [a1]
Session.add_all([b3])
Session.commit()
# data = Session.query(orm_createTable.Author).filter(orm_createTable.Author.name=='Alex').first()
# print(data.book)
# # print(data.book[1].pub_date)
# print()
# book_obj = Session.query(orm_createTable.Book).filter(orm_createTable.Book.id==2).first()
# print(book_obj.authors)
| gpl-3.0 | -4,823,171,416,976,102,000 | 28.914286 | 98 | 0.714422 | false | 2.412442 | false | false | false |
rodrigopitanga/django-smssync | setup.py | 1 | 1349 | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-smssync',
version='0.3',
packages=find_packages(),
include_package_data=True,
license='GNU GPLv3',
description='A simple Django app to integrate with SMSSync, an SMS gateway for Android.',
long_description=README,
url='https://github.com/rodrigopitanga/django-smssync/',
author='Rodrigo Pitanga',
author_email='[email protected]',
install_requires=[
'django-phonenumber-field',
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU GPLv3',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| gpl-3.0 | -7,964,609,491,214,281,000 | 33.589744 | 93 | 0.623425 | false | 3.736842 | false | false | false |
unitedstates/federal_spending | federal_spending/settings.py | 1 | 5919 | # Django settings for retinaburner project.
import os.path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
ADMINS = (
('Kaitlin Devine', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'd-%xtxi759=renuz$l@@pav@+-_fqm+=j7wcmnk_z@bc&j8pzk'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'federal_spending.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'federal_spending.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'federal_spending.usaspending',
'federal_spending.fbo',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TMP_DIRECTORY = PROJECT_ROOT + '/tmp'
CSV_PATH = PROJECT_ROOT + '/usaspending/downloads/csvs/'
LOGGING_DIRECTORY = PROJECT_ROOT + '/usaspending/logs'
FISCAL_YEARS = [2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014]
UPDATE_YEARS = [2012, 2013, 2014]
from local_settings import * | cc0-1.0 | 200,431,119,849,129,440 | 34.238095 | 127 | 0.691671 | false | 3.653704 | false | false | false |
jerome-guichard/primitiveWS | old/src/textToSpeech/mime.py | 1 | 2702 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import pypot.primitive
import pypot.robot
import random
from pypot.primitive.move import MoveRecorder, Move, MovePlayer
from speak import Speak
class Mime(pypot.primitive.Primitive):
def __init__(self, robot):
pypot.primitive.Primitive.__init__(self, robot)
self._speak = Speak(robot)
self._move = "../src/moveRecorded/mime.move"
self._movem1 = "../src/moveRecorded/singe.move"
self._movem2 = "../src/moveRecorded/poulet.move"
self._movem3 = "../src/moveRecorded/nager.move"
self._movem4 = "../src/moveRecorded/manger.move"
#self._moveo = "../src/moveRecorded/faux.move"
#self._moven = "../src/moveRecorded/vrai4.move"
def run(self):
poppy = self.robot
for m in poppy.motors:
m.compliant = False
num1 = random.randint(1,4)
if num1 == 1 :
text1 = "singe"
mouvement = self._movem1
elif num1 == 2 :
text1 = "poulet"
mouvement = self._movem2
elif num1 == 3 :
text1 = "nager"
mouvement = self._movem3
elif num1 == 4:
text1 = "manger"
mouvement = self._movem4
else:
text1 = "singe"
mouvement = self._movem1
print (text1)
print "lancer vrai4 si le patient répond bien, faux si il se trompe."
text = "Devine ce que je mime."
with open(self._move) as f :
m = Move.load(f)
move_player = MovePlayer(self.robot, m)
move_player.start()
time.sleep(0.5)
self._speak.start(text)
time.sleep(3)
with open(mouvement) as f :
m = Move.load(f)
move_player = MovePlayer(self.robot, m)
move_player.start()
# while True :
# rep = raw_input()
# #if appuie sur n
# if rep == "n" :
# with open(self._moven) as f :
# m = Move.load(f)
# move_player = MovePlayer(self.robot, m)
# move_player.start()
# time.sleep(1.5)
# self._speak.start("Non, ce n'est pas ça. Essaye encore!")
# elif rep == "o" :
# with open(self._moveo) as f :
# m = Move.load(f)
# move_player = MovePlayer(self.robot, m)
# move_player.start()
# time.sleep(1.5)
# self._speak.start("Bravo! Tu as vu comme je suis bon acteur?")
# break | gpl-3.0 | 5,999,994,371,491,001,000 | 24.009259 | 80 | 0.494074 | false | 3.43075 | false | false | false |
lixar/giant | giant/plugins/servers/web_api_2/web_api_2/examples.py | 1 | 2944 | #!/usr/bin/env python
import random
from collections import defaultdict
from giant.giant_base.giant_base import GiantError
def raise_(ex):
raise ex
swagger_to_csharp_enum_example_map = {
'string': defaultdict(lambda: lambda enum: '"' + random.choice(enum) + '";',
{
'guid': lambda enum: 'new Guid(' + random.choice(enum) + ');',
'date': lambda enum: 'DateTime.parse(' + random.choice(enum) + ');',
'date-time': lambda enum: 'DateTime.parse(' + random.choice(enum) + ');',
'byte': lambda enum: raise_(GiantError('Shiver me timbers, I can\'t parse a enum byte type. Implement it yerself!')),
'binary': lambda enum: raise_(GiantError('Shiver me timbers, I can\'t parse a enum binary type. Implement it yerself!')),
'password': lambda enum: random.choice(enum)
}
),
'integer': defaultdict(lambda: lambda enum: str(random.choice(enum)) + ';',
{
'int32': lambda enum: str(random.choice(enum)) + ';',
'int64': lambda enum: str(random.choice(enum)) + ';'
}
),
'number': defaultdict(lambda: lambda enum: str(random.choice(enum)) + ';',
{
'float': lambda enum: str(random.choice(enum)) + ';',
'double': lambda enum: str(random.choice(enum)) + ';'
}
),
'boolean': defaultdict(lambda: lambda: str(random.choice(enum)) + ';')
}
def example_integer(schema):
minimum = schema.get('minimum', 1)
maximum = schema.get('maximum', minimum + 100)
multiple = schema.get('multipleOf', 1)
return random.choice(range(minimum, maximum, multiple))
def example_float(schema):
minimum = schema.get('minimum', 0.0)
maximum = schema.get('maximum', 100.0)
multiple = schema.get('multipleOf', 0.01)
return str(round(random.uniform(minimum, maximum) / multiple) * multiple)
swagger_to_csharp_example_map = {
'string': defaultdict(lambda: lambda schema: '"ExampleString";',
{
'guid': lambda schema: 'new Guid();',
'date': lambda schema: 'new DateTime();',
'date-time': lambda schema: 'new DateTime();',
'byte': lambda schema: 'new byte[10];',
'binary': lambda schema: 'new byte[10];',
'password': lambda schema: '"thepasswordispassword"'
}
),
'integer': defaultdict(lambda: lambda schema: str(example_integer(schema)) + ';',
{
'int32': lambda schema: str(example_integer(schema)) + ';',
'int64': lambda schema: str(example_integer(schema)) + ';'
}
),
'number': defaultdict(lambda: lambda schema: str(example_float(schema)) + ';',
{
'float': lambda schema: str(example_float(schema)) + ';',
'double': lambda schema: str(example_float(schema)) + ';'
}
),
'boolean': defaultdict(lambda: lambda schema: random.choice(('true;', 'false;')))
} | mit | 853,531,755,141,804,400 | 39.342466 | 133 | 0.581522 | false | 3.717172 | false | false | false |
pavelsof/stl | stl/cli.py | 1 | 6854 | import argparse
from stl.core import Core
from stl import __version__
class Cli:
"""
Singleton that handles the user input, inits the whole machinery, and takes
care of exiting the programme.
"""
def __init__(self):
"""
Constructor. Inits the argparse parser and then all the subparsers
through the _init_* methods.
Each of the latter defines a function that takes a Core instance and
the argparse args as arguments, which function will be called if the
respective command is called.
"""
usage = 'stl [-v] [--dir DIR] subcommand'
desc = (
'stl is a simple time logger that enables you to '
'keep tally of how many hours you have worked on this or that'
)
self.parser = argparse.ArgumentParser(usage=usage, description=desc)
self.parser.add_argument('--version', action='version', version=__version__)
self.parser.add_argument('-v', '--verbose', action='store_true',
help='print debug info')
self.parser.add_argument('--dir', help=(
'set the directory where the data will be saved; '
'defaults to ~/.config/stl or ~/.stl'))
self.subparsers = self.parser.add_subparsers(dest='command',
title='subcommands')
self._init_start()
self._init_stop()
self._init_switch()
self._init_status()
self._init_add()
self._init_edit()
def _init_start(self):
"""
Inits the subparser that handles the start command.
"""
def start(core, args):
task = ' '.join(args.task) if args.task else ''
return core.start(task=task)
usage = 'stl start [task]'
desc = (
'make a log that you are starting to work'
)
subp = self.subparsers.add_parser('start', usage=usage,
description=desc, help=desc)
subp.add_argument('task', nargs=argparse.REMAINDER,
help='the task that you are about to start working on')
subp.set_defaults(func=start)
def _init_stop(self):
"""
Inits the subparser that handles the stop command.
"""
def stop(core, args):
return core.stop()
usage = 'stl stop'
desc = (
'make a log that you just stopped working'
)
subp = self.subparsers.add_parser('stop', usage=usage,
description=desc, help=desc)
subp.set_defaults(func=stop)
def _init_switch(self):
"""
Inits the subparser that handles the switch command.
"""
def switch(core, args):
task = ' '.join(args.task) if args.task else ''
return core.switch(task=task)
usage = 'stl switch [task]'
desc = (
'shortcut for stl stop && stl start; '
'stop the current task and immediately start another one'
)
subp = self.subparsers.add_parser('switch', usage=usage,
description=desc, help=desc[:desc.find(';')])
subp.add_argument('task', nargs=argparse.REMAINDER,
help='the task that you are about to start working on')
subp.set_defaults(func=switch)
def _init_status(self):
"""
Inits the subparser that handles the status/show command.
"""
def status(core, args):
extra = None
for key in ['day', 'week', 'month', 'year', 'span', 'task']:
if getattr(args, key) is not None:
extra = (key, ' '.join(getattr(args, key)))
break
return core.status(extra=extra)
usage = (
'stl (status|show) '
'[-d ... | -w ... | -m ... | -y ... | -s ... | -t ...]'
)
desc = (
'show a status report; '
'when called without further arguments, '
'it will tell you what you are doing now'
)
subp = self.subparsers.add_parser('status', aliases=['show'],
usage=usage, description=desc, help=desc[:desc.find(';')])
group = subp.add_mutually_exclusive_group()
group.add_argument('-d', '--day', nargs=argparse.REMAINDER, help=(
'report for the given day, '
'e.g. 15 oct, 2016-10-15, today, yesterday; '
'empty string defaults to today'))
group.add_argument('-w', '--week', nargs=argparse.REMAINDER, help=(
'report for the given week, '
'possible values are this and last; '
'empty string defaults to this week'))
group.add_argument('-m', '--month', nargs=argparse.REMAINDER, help=(
'report for the given month, '
'e.g. oct, 10, 10 2016, this, last; '
'empty string defaults to this month'))
group.add_argument('-y', '--year', nargs=argparse.REMAINDER, help=(
'report for the given year, '
'e.g. 2016, this, last; '
'empty string defaults to this year'))
group.add_argument('-s', '--span', nargs=argparse.REMAINDER, help=(
'report for the time span between two dates (inclusive), '
'e.g. 15 25 oct, 15 sep 2016 25 oct 2016, 15 sep 25 oct; '
'if you specify only one date, the second will be set to today; '
'some restrictions: '
'the second date (if such) cannot be less specific than the first '
'and months cannot be numbers'))
group.add_argument('-t', '--task', nargs=argparse.REMAINDER,
help='report for the given task')
subp.set_defaults(func=status)
def _init_add(self):
"""
Inits the subparser that handles the add command.
"""
def add(core, args):
return core.add(args.start, args.stop, args.task)
usage = 'stl add start stop [task]'
desc = (
'directly add a log entry; '
'you can also do this from python, take a look at '
'stl.core.Core.add()'
)
subp = self.subparsers.add_parser('add', usage=usage,
description=desc, help=desc[:desc.find(';')])
subp.add_argument('start',
help='when work on the task started; use %%Y-%%m-%%dT%%H:%%M')
subp.add_argument('stop',
help='when work on the task stopped; use %%Y-%%m-%%dT%%H:%%M')
subp.add_argument('task', nargs='?', default='',
help='the task being worked on; optional')
subp.set_defaults(func=add)
def _init_edit(self):
"""
Inits the subparser that handles the edit command.
"""
def edit(core, args):
month = ' '.join(getattr(args, 'month', []))
core.edit(month)
usage = 'stl edit [month]'
desc = (
'lets you vim the right file'
)
subp = self.subparsers.add_parser('edit', usage=usage,
description=desc, help=desc)
subp.add_argument('month', nargs=argparse.REMAINDER,
help='the month you want to edit, e.g. oct 2016')
subp.set_defaults(func=edit)
def run(self, raw_args=None):
"""
Parses the given arguments (or, except for in unit testing, sys.argv),
inits the Core instance and transfers to that. Note that if raw_args is
None, then argparse's parser defaults to reading sys.argv.
Returns a human-readable string to be printed to the user.
"""
args = self.parser.parse_args(raw_args)
if args.command is None:
return self.parser.format_help()
core = Core(dir_path=args.dir, verbose=args.verbose)
try:
res = args.func(core, args)
except Exception as err:
return str(err)
return res
def main():
"""
The (only) entry point for the command-line interface as registered in
setup.py. Inits a Cli instance, runs it with sys.argv, and prints the
output to stdout.
"""
cli = Cli()
res = cli.run()
if res: print(res.strip())
| mit | 4,102,905,145,173,503,000 | 26.198413 | 78 | 0.657718 | false | 3.088779 | false | false | false |
pjuren/pyokit | src/pyokit/io/david.py | 1 | 4704 | #!/usr/bin/python
"""
Date of Creation: 19th Dec 2014
Description: Functions for loading DAVID gene ontology results
Copyright (C) 2010-2014
Philip J. Uren,
Authors: Philip J. Uren
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# pyokit imports
from pyokit.datastruct.geneOntology import GeneOntologyEnrichmentResult
###############################################################################
# MODULE-LEVEL CONSTANTS #
###############################################################################
NUM_FIELDS_IN_DAVID_RECORD = 13
PVAL_FIELD_NUM = 11
###############################################################################
# ITERATORS #
###############################################################################
def david_results_iterator(fn, verbose=False):
"""
Iterate over a DAVID result set and yeild GeneOntologyTerm objects
representing each of the terms reported. The expected format for a DAVID
result file is tab-seperated format. The following fields should be present:
=== =============== ========== ====================================
Num Field Type Example
=== =============== ========== ====================================
0 Category string GOTERM_BP_FAT
1 Term string GO:0046907~intracellular transport
2 Count int 43
3 Percent float 11.345646437994723
4 PValue float 1.3232857694449546E-9
5 Genes string ARSB, KPNA6, GNAS
6 List Total int 310
7 Pop Hits int 657
8 Pop Total int 13528
9 Fold Enrichment float 2.8561103746256196
10 Bonferroni float 2.6293654579179204E-6
11 Benjamini float 2.6293654579179204E-6
12 FDR float 2.2734203852792234E-6
=== =============== ========== ====================================
The first line is a header giving the field names -- this is ignored though,
and we expect them in the order given above.
Most of the fields are ignored at present; we take fields 0,1, and 11 (as the
significance/p-value). When parsing the term field, we try to extract a term
ID by splitting on tilde, but if we can't then this is set to None.
:param fn: the file to parse
:param verbose: if True, output progress to stderr.
"""
first = True
for line in open(fn):
line = line.strip()
if line == "":
continue
if first:
first = False
continue
parts = line.split("\t")
if len(parts) != NUM_FIELDS_IN_DAVID_RECORD:
raise IOError("failed to parse " + fn + " as DAVID result file. "
+ "Expected " + str(NUM_FIELDS_IN_DAVID_RECORD) + " "
+ "tab-separated fields, but found "
+ str(len(parts)) + " instead")
n_parts = parts[1].split("~")
name = n_parts[-1].strip()
identifier = n_parts[0] if len(n_parts) > 1 else None
catagory = parts[0].strip()
try:
p_val = float(parts[PVAL_FIELD_NUM])
except ValueError:
raise IOError("Failed to parse " + fn + " as DAVID result file. "
+ "Expected field " + str(PVAL_FIELD_NUM) + " "
+ "to contain a floating point number "
+ "(Benjamini), found this instead: "
+ str(parts[PVAL_FIELD_NUM]))
yield GeneOntologyEnrichmentResult(name, p_val, identifier, catagory)
###############################################################################
# BULK LOADING FUNCTIONS #
###############################################################################
def david_results_load_file(fn, verbose=False):
"""
Load a set of DAVID gene ontology results as a list of GeneOntologyTerm
objects
:param fn:
:param verbose:
"""
return [x for x in david_results_iterator(fn, verbose)]
| lgpl-2.1 | -3,720,982,818,224,237,000 | 39.205128 | 79 | 0.51977 | false | 4.226415 | false | false | false |
semonte/intellij-community | plugins/hg4idea/testData/bin/hgext/convert/p4.py | 95 | 6836 | # Perforce source for convert extension.
#
# Copyright 2009, Frank Kingswood <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from mercurial import util
from mercurial.i18n import _
from common import commit, converter_source, checktool, NoRepo
import marshal
import re
def loaditer(f):
"Yield the dictionary objects generated by p4"
try:
while True:
d = marshal.load(f)
if not d:
break
yield d
except EOFError:
pass
class p4_source(converter_source):
def __init__(self, ui, path, rev=None):
super(p4_source, self).__init__(ui, path, rev=rev)
if "/" in path and not path.startswith('//'):
raise NoRepo(_('%s does not look like a P4 repository') % path)
checktool('p4', abort=False)
self.p4changes = {}
self.heads = {}
self.changeset = {}
self.files = {}
self.tags = {}
self.lastbranch = {}
self.parent = {}
self.encoding = "latin_1"
self.depotname = {} # mapping from local name to depot name
self.re_type = re.compile(
"([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
"(\+\w+)?$")
self.re_keywords = re.compile(
r"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)"
r":[^$\n]*\$")
self.re_keywords_old = re.compile("\$(Id|Header):[^$\n]*\$")
self._parse(ui, path)
def _parse_view(self, path):
"Read changes affecting the path"
cmd = 'p4 -G changes -s submitted %s' % util.shellquote(path)
stdout = util.popen(cmd, mode='rb')
for d in loaditer(stdout):
c = d.get("change", None)
if c:
self.p4changes[c] = True
def _parse(self, ui, path):
"Prepare list of P4 filenames and revisions to import"
ui.status(_('reading p4 views\n'))
# read client spec or view
if "/" in path:
self._parse_view(path)
if path.startswith("//") and path.endswith("/..."):
views = {path[:-3]:""}
else:
views = {"//": ""}
else:
cmd = 'p4 -G client -o %s' % util.shellquote(path)
clientspec = marshal.load(util.popen(cmd, mode='rb'))
views = {}
for client in clientspec:
if client.startswith("View"):
sview, cview = clientspec[client].split()
self._parse_view(sview)
if sview.endswith("...") and cview.endswith("..."):
sview = sview[:-3]
cview = cview[:-3]
cview = cview[2:]
cview = cview[cview.find("/") + 1:]
views[sview] = cview
# list of changes that affect our source files
self.p4changes = self.p4changes.keys()
self.p4changes.sort(key=int)
# list with depot pathnames, longest first
vieworder = views.keys()
vieworder.sort(key=len, reverse=True)
# handle revision limiting
startrev = self.ui.config('convert', 'p4.startrev', default=0)
self.p4changes = [x for x in self.p4changes
if ((not startrev or int(x) >= int(startrev)) and
(not self.rev or int(x) <= int(self.rev)))]
# now read the full changelists to get the list of file revisions
ui.status(_('collecting p4 changelists\n'))
lastid = None
for change in self.p4changes:
cmd = "p4 -G describe -s %s" % change
stdout = util.popen(cmd, mode='rb')
d = marshal.load(stdout)
desc = self.recode(d["desc"])
shortdesc = desc.split("\n", 1)[0]
t = '%s %s' % (d["change"], repr(shortdesc)[1:-1])
ui.status(util.ellipsis(t, 80) + '\n')
if lastid:
parents = [lastid]
else:
parents = []
date = (int(d["time"]), 0) # timezone not set
c = commit(author=self.recode(d["user"]),
date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
parents=parents, desc=desc, branch='',
extra={"p4": change})
files = []
i = 0
while ("depotFile%d" % i) in d and ("rev%d" % i) in d:
oldname = d["depotFile%d" % i]
filename = None
for v in vieworder:
if oldname.startswith(v):
filename = views[v] + oldname[len(v):]
break
if filename:
files.append((filename, d["rev%d" % i]))
self.depotname[filename] = oldname
i += 1
self.changeset[change] = c
self.files[change] = files
lastid = change
if lastid:
self.heads = [lastid]
def getheads(self):
return self.heads
def getfile(self, name, rev):
cmd = 'p4 -G print %s' \
% util.shellquote("%s#%s" % (self.depotname[name], rev))
stdout = util.popen(cmd, mode='rb')
mode = None
contents = ""
keywords = None
for d in loaditer(stdout):
code = d["code"]
data = d.get("data")
if code == "error":
raise IOError(d["generic"], data)
elif code == "stat":
p4type = self.re_type.match(d["type"])
if p4type:
mode = ""
flags = (p4type.group(1) or "") + (p4type.group(3) or "")
if "x" in flags:
mode = "x"
if p4type.group(2) == "symlink":
mode = "l"
if "ko" in flags:
keywords = self.re_keywords_old
elif "k" in flags:
keywords = self.re_keywords
elif code == "text" or code == "binary":
contents += data
if mode is None:
raise IOError(0, "bad stat")
if keywords:
contents = keywords.sub("$\\1$", contents)
if mode == "l" and contents.endswith("\n"):
contents = contents[:-1]
return contents, mode
def getchanges(self, rev):
return self.files[rev], {}
def getcommit(self, rev):
return self.changeset[rev]
def gettags(self):
return self.tags
def getchangedfiles(self, rev, i):
return sorted([x[0] for x in self.files[rev]])
| apache-2.0 | -7,231,335,828,341,024,000 | 32.674877 | 77 | 0.486688 | false | 3.951445 | false | false | false |
jonahzheng/zperfmon | server/db_cron/daily_aggregate.py | 3 | 6481 | #!/usr/bin/python
#
# Copyright 2013 Zynga Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys,os,re,syslog,traceback,time
from os import path
from glob import glob
import commands
from itertools import groupby
import shutil
import json
server_config_file = "/etc/zperfmon/server.cfg"
daily_raw_dir = "_raw"
# holder class for putting in config parameters
class CFG:
def set_option(self, option, value):
setattr(self, option, value)
pass
def debug_print(*args):
return
#print(args)
#
# Read the server config file which is php code that creates a map.
#
def get_server_config(config_file):
config_content = open(config_file).read()
cfg = CFG()
for m in re.finditer("^[\t ]*\"([^\"]+)\"\s*=>\s*\"([^\"]+)\"",
config_content, re.MULTILINE):
cfg.set_option(m.group(1), m.group(2))
return cfg
page_re = re.compile('.*_raw/[0-9]+/(?P<runid>[0-9]*)\.(?P<filename>.*)\.xhprof')
def pagename(k):
m = page_re.match(k)
if(m):
return m.group('filename')
def collect_profiles(cfg, rawpath):
xhprof_files = glob("%s/*/*.xhprof" % rawpath)
groups = {}
for f in xhprof_files:
k = pagename(f)
groups.setdefault(k,[])
groups[k].append(f)
return groups
#
# Find all manifest.json files one level under 'source' and combine them. Dump
# result as json 'target'/manifest.json. Manifests are loaded with an eval()
# since the pure python json[encode|decode] (for 2.4) is very slow.
#
def aggregate_manifests(source, target):
aggregate = {}
for manifest in glob(path.join(source, "*", "manifest.json")):
try:
m = json.read(open(manifest).read())
#
# Very simplistic, we could use collections and sets and all
# that. Not enough gain here to justify the trouble.
#
for page, count in [[k, v[1]] for k,v in m.items()]:
if not aggregate.has_key(page):
aggregate[page] = [page, 0]
aggregate[page][1] += count
except Exception, e:
info = sys.exc_info()
syslog.syslog(str(info[0]))
syslog.syslog(traceback.format_exc())
agg_file = path.join(target, "manifest.json")
open(agg_file, "w").write(json.write(aggregate))
return agg_file
# look backwards from timestamp's half hour to num elements back
# num is 48 by default, because it's in 1/2 hour slots
# root_upload_dir=/db/zperfmon/<game_name>/timeslots/
#
def extract_profiles(cfg, root_upload_dir, timestamp, num=48):
end = int(timestamp / 1800)
start = end - 48
slots = range(start, end)
files = map(lambda x: path.join(root_upload_dir,str(x),"xhprof",cfg.xhprof_tbz_name), slots);
aggregate_dir = path.normpath(path.join(root_upload_dir,'..','xhprof.daily', str(end), cfg.blob_dir))
rawpath = path.normpath(path.join(root_upload_dir,'..','xhprof.daily', str(end), daily_raw_dir))
if(not path.exists(rawpath)):
os.makedirs(rawpath)
if(not path.exists(aggregate_dir)):
os.makedirs(aggregate_dir)
count = 0
for f in files:
os.makedirs("%s/%d" % (rawpath, count))
cmd = "tar --strip-components 1 -xjf %s -C %s/%d" % (f, rawpath, count)
result = commands.getstatusoutput(cmd)
if(result[0]):
print "Command failed: %s" % cmd
print "Ignoring error and continuing"
count += 1
aggregate_manifests(rawpath, aggregate_dir)
return (aggregate_dir, end, collect_profiles(cfg, rawpath))
def aggregate_runs(cfg, name, aggregate_dir, xhprofs):
cmd = "%s %s %s %s %s" % (cfg.profile_aggregation_command, cfg.game_name, name, aggregate_dir, " ".join(xhprofs))
result = commands.getstatusoutput(cmd)
if(result[0]):
print "Command failed: %s" % cmd
def extract_functions(cfg, name, aggregate_dir, xhprofs):
cmd = "%s %s %s %s %s" % (cfg.profile_extraction_command, cfg.game_name, name, aggregate_dir, " ".join(xhprofs))
result = commands.getstatusoutput(cmd)
if(result[0]):
print "Command failed: %s" % cmd
def cleanup_and_bzip(server_cfg, exec_dir):
# create one tbz for inserting
cwd = os.getcwd()
os.chdir(exec_dir)
#
# Remove the raw directory
#
shutil.rmtree(daily_raw_dir)
#
# bzip to insert
#
cmd = "tar jcf %s %s/" % (server_cfg.xhprof_tbz_name, server_cfg.blob_dir)
print cmd
result = commands.getstatusoutput(cmd)
debug_print(cmd)
os.chdir(cwd)
# ignore failures, recovery and sanity is not worth the returns
if result[0]:
return None
def usage():
print "error !"
def main(cfg):
args = sys.argv[1:]
if(len(args) < 2 or len(args) > 3):
usage()
return
game_name = args[0]
# xhprof_dir = args[1]
root_upload_dir = args[1]
if(len(args) == 3):
timestamp = int(args[2])
else:
timestamp = int(time.time())
cfg.set_option("game_name", game_name)
# (aggregate_dir, day, profile_slots) = extract_profiles(cfg, xhprof_dir, timestamp)
(aggregate_dir, end, profile_slots) = extract_profiles(cfg, root_upload_dir, timestamp)
for name in profile_slots.keys():
aggregate_runs(cfg, "%s.%s" % (end,name), aggregate_dir, profile_slots[name])
# TODO: optimize this to generate off the aggregate file
extract_functions(cfg, "%s.%s" % (end, name), aggregate_dir, profile_slots[name])
cleanup_and_bzip(cfg, path.normpath(path.join(aggregate_dir, "..")))
if __name__ == "__main__":
status = 37
try:
server_cfg = get_server_config(server_config_file)
status = main(server_cfg)
except:
info = sys.exc_info()
syslog.syslog(str(info[0]))
syslog.syslog(traceback.format_exc())
status = 38
print traceback.format_exc()
sys.exit(status)
| apache-2.0 | -7,632,996,699,218,153,000 | 29.00463 | 117 | 0.619195 | false | 3.373764 | true | false | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.0-py2.5.egg/sqlalchemy/schema.py | 1 | 51803 | # schema.py
# Copyright (C) 2005, 2006, 2007 Michael Bayer [email protected]
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The schema module provides the building blocks for database metadata.
This means all the entities within a SQL database that we might want
to look at, modify, or create and delete are described by these
objects, in a database-agnostic way.
A structure of SchemaItems also provides a *visitor* interface which is
the primary method by which other methods operate upon the schema.
The SQL package extends this structure with its own clause-specific
objects as well as the visitor interface, so that the schema package
*plugs in* to the SQL package.
"""
import re, inspect
from sqlalchemy import types, exceptions, util, databases
from sqlalchemy.sql import expression, visitors
import sqlalchemy
URL = None
__all__ = ['SchemaItem', 'Table', 'Column', 'ForeignKey', 'Sequence', 'Index',
'ForeignKeyConstraint', 'PrimaryKeyConstraint', 'CheckConstraint',
'UniqueConstraint', 'DefaultGenerator', 'Constraint', 'MetaData',
'ThreadLocalMetaData', 'SchemaVisitor', 'PassiveDefault',
'ColumnDefault']
class SchemaItem(object):
"""Base class for items that define a database schema."""
__metaclass__ = expression._FigureVisitName
def _init_items(self, *args):
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
item._set_parent(self)
def _get_parent(self):
raise NotImplementedError()
def _set_parent(self, parent):
"""Associate with this SchemaItem's parent object."""
raise NotImplementedError()
def get_children(self, **kwargs):
"""used to allow SchemaVisitor access"""
return []
def __repr__(self):
return "%s()" % self.__class__.__name__
def _get_bind(self, raiseerr=False):
"""Return the engine or None if no engine."""
if raiseerr:
m = self.metadata
e = m and m.bind or None
if e is None:
raise exceptions.InvalidRequestError("This SchemaItem is not connected to any Engine or Connection.")
else:
return e
else:
m = self.metadata
return m and m.bind or None
bind = property(lambda s:s._get_bind())
def _get_table_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
class _TableSingleton(expression._FigureVisitName):
"""A metaclass used by the ``Table`` object to provide singleton behavior."""
def __call__(self, name, metadata, *args, **kwargs):
schema = kwargs.get('schema', None)
autoload = kwargs.pop('autoload', False)
autoload_with = kwargs.pop('autoload_with', False)
mustexist = kwargs.pop('mustexist', False)
useexisting = kwargs.pop('useexisting', False)
include_columns = kwargs.pop('include_columns', None)
key = _get_table_key(name, schema)
try:
table = metadata.tables[key]
if args:
if not useexisting:
raise exceptions.ArgumentError("Table '%s' is already defined for this MetaData instance." % key)
return table
except KeyError:
if mustexist:
raise exceptions.ArgumentError("Table '%s' not defined" % (key))
table = type.__call__(self, name, metadata, **kwargs)
table._set_parent(metadata)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload:
try:
if autoload_with:
autoload_with.reflecttable(table, include_columns=include_columns)
else:
metadata._get_bind(raiseerr=True).reflecttable(table, include_columns=include_columns)
except exceptions.NoSuchTableError:
del metadata.tables[key]
raise
# initialize all the column, etc. objects. done after
# reflection to allow user-overrides
table._init_items(*args)
return table
class Table(SchemaItem, expression.TableClause):
"""Represent a relational database table.
This subclasses ``expression.TableClause`` to provide a table that is
associated with an instance of ``MetaData``, which in turn
may be associated with an instance of ``Engine``.
Whereas ``TableClause`` represents a table as its used in an SQL
expression, ``Table`` represents a table as it exists in a
database schema.
If this ``Table`` is ultimately associated with an engine,
the ``Table`` gains the ability to access the database directly
without the need for dealing with an explicit ``Connection`` object;
this is known as "implicit execution".
Implicit operation allows the ``Table`` to access the database to
reflect its own properties (via the autoload=True flag), it allows
the create() and drop() methods to be called without passing
a connectable, and it also propigates the underlying engine to
constructed SQL objects so that they too can be executed via their
execute() method without the need for a ``Connection``.
"""
__metaclass__ = _TableSingleton
def __init__(self, name, metadata, **kwargs):
"""Construct a Table.
Table objects can be constructed directly. The init method is
actually called via the TableSingleton metaclass. Arguments
are:
name
The name of this table, exactly as it appears, or will
appear, in the database.
This property, along with the *schema*, indicates the
*singleton identity* of this table.
Further tables constructed with the same name/schema
combination will return the same Table instance.
\*args
Should contain a listing of the Column objects for this table.
\**kwargs
Options include:
schema
The *schema name* for this table, which is
required if the table resides in a schema other than the
default selected schema for the engine's database
connection. Defaults to ``None``.
autoload
Defaults to False: the Columns for this table should be
reflected from the database. Usually there will be no
Column objects in the constructor if this property is set.
autoload_with
if autoload==True, this is an optional Engine or Connection
instance to be used for the table reflection. If ``None``,
the underlying MetaData's bound connectable will be used.
include_columns
A list of strings indicating a subset of columns to be
loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all
columns should be reflected.
mustexist
Defaults to False: indicates that this Table must already
have been defined elsewhere in the application, else an
exception is raised.
useexisting
Defaults to False: indicates that if this Table was
already defined elsewhere in the application, disregard
the rest of the constructor arguments.
owner
Defaults to None: optional owning user of this table.
useful for databases such as Oracle to aid in table
reflection.
quote
Defaults to False: indicates that the Table identifier
must be properly escaped and quoted before being sent to
the database. This flag overrides all other quoting
behavior.
quote_schema
Defaults to False: indicates that the Namespace identifier
must be properly escaped and quoted before being sent to
the database. This flag overrides all other quoting
behavior.
"""
super(Table, self).__init__(name)
self.metadata = metadata
self.schema = kwargs.pop('schema', None)
self.indexes = util.Set()
self.constraints = util.Set()
self._columns = expression.ColumnCollection()
self.primary_key = PrimaryKeyConstraint()
self._foreign_keys = util.OrderedSet()
self.quote = kwargs.pop('quote', False)
self.quote_schema = kwargs.pop('quote_schema', False)
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
self.owner = kwargs.pop('owner', None)
if len([k for k in kwargs if not re.match(r'^(?:%s)_' % '|'.join(databases.__all__), k)]):
raise TypeError("Invalid argument(s) for Table: %s" % repr(kwargs.keys()))
# store extra kwargs, which should only contain db-specific options
self.kwargs = kwargs
key = property(lambda self:_get_table_key(self.name, self.schema))
def _export_columns(self, columns=None):
# override FromClause's collection initialization logic; TableClause and Table
# implement it differently
pass
def _set_primary_key(self, pk):
if getattr(self, '_primary_key', None) in self.constraints:
self.constraints.remove(self._primary_key)
self._primary_key = pk
self.constraints.add(pk)
primary_key = property(lambda s:s._primary_key, _set_primary_key)
def __repr__(self):
return "Table(%s)" % ', '.join(
[repr(self.name)] + [repr(self.metadata)] +
[repr(x) for x in self.columns] +
["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']])
def __str__(self):
return _get_table_key(self.encodedname, self.schema)
def append_column(self, column):
"""Append a ``Column`` to this ``Table``."""
column._set_parent(self)
def append_constraint(self, constraint):
"""Append a ``Constraint`` to this ``Table``."""
constraint._set_parent(self)
def _get_parent(self):
return self.metadata
def _set_parent(self, metadata):
metadata.tables[_get_table_key(self.name, self.schema)] = self
self.metadata = metadata
def get_children(self, column_collections=True, schema_visitor=False, **kwargs):
if not schema_visitor:
return expression.TableClause.get_children(self, column_collections=column_collections, **kwargs)
else:
if column_collections:
return [c for c in self.columns]
else:
return []
def exists(self, bind=None):
"""Return True if this table exists."""
if bind is None:
bind = self._get_bind(raiseerr=True)
def do(conn):
return conn.dialect.has_table(conn, self.name, schema=self.schema)
return bind.run_callable(do)
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this table.
See also ``metadata.create_all()``."""
self.metadata.create_all(bind=bind, checkfirst=checkfirst, tables=[self])
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this table.
See also ``metadata.drop_all()``."""
self.metadata.drop_all(bind=bind, checkfirst=checkfirst, tables=[self])
def tometadata(self, metadata, schema=None):
"""Return a copy of this ``Table`` associated with a different ``MetaData``."""
try:
if schema is None:
schema = self.schema
key = _get_table_key(self.name, schema)
return metadata.tables[key]
except KeyError:
args = []
for c in self.columns:
args.append(c.copy())
for c in self.constraints:
args.append(c.copy())
return Table(self.name, metadata, schema=schema, *args)
class Column(SchemaItem, expression._ColumnClause):
"""Represent a column in a database table.
This is a subclass of ``expression.ColumnClause`` and represents an
actual existing table in the database, in a similar fashion as
``TableClause``/``Table``.
"""
def __init__(self, name, type_, *args, **kwargs):
"""Construct a new ``Column`` object.
Arguments are:
name
The name of this column. This should be the identical name
as it appears, or will appear, in the database.
type\_
The ``TypeEngine`` for this column. This can be any
subclass of ``types.AbstractType``, including the
database-agnostic types defined in the types module,
database-specific types defined within specific database
modules, or user-defined types. If the column contains a
ForeignKey, the type can also be None, in which case the
type assigned will be that of the referenced column.
\*args
Constraint, ForeignKey, ColumnDefault and Sequence objects
should be added as list values.
\**kwargs
Keyword arguments include:
key
Defaults to None: an optional *alias name* for this column.
The column will then be identified everywhere in an
application, including the column list on its Table, by
this key, and not the given name. Generated SQL, however,
will still reference the column by its actual name.
primary_key
Defaults to False: True if this column is a primary key
column. Multiple columns can have this flag set to
specify composite primary keys. As an alternative, the
primary key of a Table can be specified via an explicit
``PrimaryKeyConstraint`` instance appended to the Table's
list of objects.
nullable
Defaults to True : True if this column should allow
nulls. True is the default unless this column is a primary
key column.
default
Defaults to None: a scalar, Python callable, or ``ClauseElement``
representing the *default value* for this column, which will
be invoked upon insert if this column is not present in
the insert list or is given a value of None. The default
expression will be converted into a ``ColumnDefault`` object
upon initialization.
_is_oid
Defaults to False: used internally to indicate that this
column is used as the quasi-hidden "oid" column
index
Defaults to False: indicates that this column is
indexed. The name of the index is autogenerated. to
specify indexes with explicit names or indexes that
contain multiple columns, use the ``Index`` construct instead.
unique
Defaults to False: indicates that this column contains a
unique constraint, or if `index` is True as well,
indicates that the Index should be created with the unique
flag. To specify multiple columns in the constraint/index
or to specify an explicit name, use the
``UniqueConstraint`` or ``Index`` constructs instead.
autoincrement
Defaults to True: indicates that integer-based primary key
columns should have autoincrementing behavior, if
supported by the underlying database. This will affect
``CREATE TABLE`` statements such that they will use the
databases *auto-incrementing* keyword (such as ``SERIAL``
for Postgres, ``AUTO_INCREMENT`` for Mysql) and will also
affect the behavior of some dialects during ``INSERT``
statement execution such that they will assume primary key
values are created in this manner. If a ``Column`` has an
explicit ``ColumnDefault`` object (such as via the `default`
keyword, or a ``Sequence`` or ``PassiveDefault``), then
the value of `autoincrement` is ignored and is assumed to be
False. `autoincrement` value is only significant for a
column with a type or subtype of Integer.
quote
Defaults to False: indicates that the Column identifier
must be properly escaped and quoted before being sent to
the database. This flag should normally not be required
as dialects can auto-detect conditions where quoting is
required.
"""
super(Column, self).__init__(name, None, type_)
self.args = args
self.key = kwargs.pop('key', name)
self._primary_key = kwargs.pop('primary_key', False)
self.nullable = kwargs.pop('nullable', not self.primary_key)
self._is_oid = kwargs.pop('_is_oid', False)
self.default = kwargs.pop('default', None)
self.index = kwargs.pop('index', None)
self.unique = kwargs.pop('unique', None)
self.quote = kwargs.pop('quote', False)
self.onupdate = kwargs.pop('onupdate', None)
self.autoincrement = kwargs.pop('autoincrement', True)
self.constraints = util.Set()
self.__originating_column = self
self._foreign_keys = util.OrderedSet()
if kwargs:
raise exceptions.ArgumentError("Unknown arguments passed to Column: " + repr(kwargs.keys()))
primary_key = util.SimpleProperty('_primary_key')
foreign_keys = util.SimpleProperty('_foreign_keys')
columns = property(lambda self:[self])
def __str__(self):
if self.table is not None:
if self.table.named_with_column():
return (self.table.encodedname + "." + self.encodedname)
else:
return self.encodedname
else:
return self.encodedname
def _get_bind(self):
return self.table.bind
def references(self, column):
"""return true if this column references the given column via foreign key"""
for fk in self.foreign_keys:
if fk.column is column:
return True
else:
return False
def append_foreign_key(self, fk):
fk._set_parent(self)
def __repr__(self):
kwarg = []
if self.key != self.name:
kwarg.append('key')
if self._primary_key:
kwarg.append('primary_key')
if not self.nullable:
kwarg.append('nullable')
if self.onupdate:
kwarg.append('onupdate')
if self.default:
kwarg.append('default')
return "Column(%s)" % ', '.join(
[repr(self.name)] + [repr(self.type)] +
[repr(x) for x in self.foreign_keys if x is not None] +
[repr(x) for x in self.constraints] +
["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg])
def _get_parent(self):
return self.table
def _set_parent(self, table):
self.metadata = table.metadata
if getattr(self, 'table', None) is not None:
raise exceptions.ArgumentError("this Column already has a table!")
if not self._is_oid:
self._pre_existing_column = table._columns.get(self.key)
table._columns.add(self)
else:
self._pre_existing_column = None
if self.primary_key:
table.primary_key.add(self)
elif self.key in table.primary_key:
raise exceptions.ArgumentError("Trying to redefine primary-key column '%s' as a non-primary-key column on table '%s'" % (self.key, table.fullname))
# if we think this should not raise an error, we'd instead do this:
#table.primary_key.remove(self)
self.table = table
if self.index:
if isinstance(self.index, basestring):
raise exceptions.ArgumentError("The 'index' keyword argument on Column is boolean only. To create indexes with a specific name, create an explicit Index object external to the Table.")
Index('ix_%s' % self._label, self, unique=self.unique)
elif self.unique:
if isinstance(self.unique, basestring):
raise exceptions.ArgumentError("The 'unique' keyword argument on Column is boolean only. To create unique constraints or indexes with a specific name, append an explicit UniqueConstraint to the Table's list of elements, or create an explicit Index object external to the Table.")
table.append_constraint(UniqueConstraint(self.key))
toinit = list(self.args)
if self.default is not None:
toinit.append(ColumnDefault(self.default))
if self.onupdate is not None:
toinit.append(ColumnDefault(self.onupdate, for_update=True))
self._init_items(*toinit)
self.args = None
def copy(self):
"""Create a copy of this ``Column``, unitialized.
This is used in ``Table.tometadata``.
"""
return Column(self.name, self.type, self.default, key = self.key, primary_key = self.primary_key, nullable = self.nullable, _is_oid = self._is_oid, quote=self.quote, index=self.index, *[c.copy() for c in self.constraints])
def _make_proxy(self, selectable, name = None):
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement).
"""
fk = [ForeignKey(f._colspec) for f in self.foreign_keys]
c = Column(name or self.name, self.type, self.default, key = name or self.key, primary_key = self.primary_key, nullable = self.nullable, _is_oid = self._is_oid, quote=self.quote, *fk)
c.table = selectable
c.orig_set = self.orig_set
c.__originating_column = self.__originating_column
c._distance = self._distance + 1
c._pre_existing_column = self._pre_existing_column
if not c._is_oid:
selectable.columns.add(c)
if self.primary_key:
selectable.primary_key.add(c)
[c._init_items(f) for f in fk]
return c
def get_children(self, schema_visitor=False, **kwargs):
if schema_visitor:
return [x for x in (self.default, self.onupdate) if x is not None] + \
list(self.foreign_keys) + list(self.constraints)
else:
return expression._ColumnClause.get_children(self, **kwargs)
class ForeignKey(SchemaItem):
"""Defines a column-level ``ForeignKey`` constraint between two columns.
``ForeignKey`` is specified as an argument to a Column object.
One or more ``ForeignKey`` objects are used within a
``ForeignKeyConstraint`` object which represents the table-level
constraint definition.
"""
def __init__(self, column, constraint=None, use_alter=False, name=None, onupdate=None, ondelete=None):
"""Construct a new ``ForeignKey`` object.
column
Can be a ``schema.Column`` object representing the relationship,
or just its string name given as ``tablename.columnname``.
schema can be specified as ``schema.tablename.columnname``.
constraint
Is the owning ``ForeignKeyConstraint`` object, if any. if not
given, then a ``ForeignKeyConstraint`` will be automatically
created and added to the parent table.
"""
self._colspec = column
self._column = None
self.constraint = constraint
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
def __repr__(self):
return "ForeignKey(%s)" % repr(self._get_colspec())
def copy(self):
"""Produce a copy of this ForeignKey object."""
return ForeignKey(self._get_colspec())
def _get_colspec(self):
if isinstance(self._colspec, basestring):
return self._colspec
elif self._colspec.table.schema is not None:
return "%s.%s.%s" % (self._colspec.table.schema, self._colspec.table.name, self._colspec.key)
else:
return "%s.%s" % (self._colspec.table.name, self._colspec.key)
def references(self, table):
"""Return True if the given table is referenced by this ``ForeignKey``."""
return table.corresponding_column(self.column, False) is not None
def _init_column(self):
# ForeignKey inits its remote column as late as possible, so tables can
# be defined without dependencies
if self._column is None:
if isinstance(self._colspec, basestring):
# locate the parent table this foreign key is attached to.
# we use the "original" column which our parent column represents
# (its a list of columns/other ColumnElements if the parent table is a UNION)
for c in self.parent.orig_set:
if isinstance(c, Column):
parenttable = c.table
break
else:
raise exceptions.ArgumentError("Parent column '%s' does not descend from a table-attached Column" % str(self.parent))
m = re.match(r"^(.+?)(?:\.(.+?))?(?:\.(.+?))?$", self._colspec, re.UNICODE)
if m is None:
raise exceptions.ArgumentError("Invalid foreign key column specification: " + self._colspec)
if m.group(3) is None:
(tname, colname) = m.group(1, 2)
schema = None
else:
(schema,tname,colname) = m.group(1,2,3)
if _get_table_key(tname, schema) not in parenttable.metadata:
raise exceptions.InvalidRequestError("Could not find table '%s' with which to generate a foreign key" % tname)
table = Table(tname, parenttable.metadata, mustexist=True, schema=schema)
try:
if colname is None:
# colname is None in the case that ForeignKey argument was specified
# as table name only, in which case we match the column name to the same
# column on the parent.
key = self.parent
self._column = table.c[self.parent.key]
else:
self._column = table.c[colname]
except KeyError, e:
raise exceptions.ArgumentError("Could not create ForeignKey '%s' on table '%s': table '%s' has no column named '%s'" % (self._colspec, parenttable.name, table.name, str(e)))
else:
self._column = self._colspec
# propigate TypeEngine to parent if it didnt have one
if isinstance(self.parent.type, types.NullType):
self.parent.type = self._column.type
return self._column
column = property(lambda s: s._init_column())
def _get_parent(self):
return self.parent
def _set_parent(self, column):
self.parent = column
if self.parent._pre_existing_column is not None:
# remove existing FK which matches us
for fk in self.parent._pre_existing_column.foreign_keys:
if fk._colspec == self._colspec:
self.parent.table.foreign_keys.remove(fk)
self.parent.table.constraints.remove(fk.constraint)
if self.constraint is None and isinstance(self.parent.table, Table):
self.constraint = ForeignKeyConstraint([],[], use_alter=self.use_alter, name=self.name, onupdate=self.onupdate, ondelete=self.ondelete)
self.parent.table.append_constraint(self.constraint)
self.constraint._append_fk(self)
self.parent.foreign_keys.add(self)
self.parent.table.foreign_keys.add(self)
class DefaultGenerator(SchemaItem):
"""Base class for column *default* values."""
def __init__(self, for_update=False, metadata=None):
self.for_update = for_update
self.metadata = util.assert_arg_type(metadata, (MetaData, type(None)), 'metadata')
def _get_parent(self):
return getattr(self, 'column', None)
def _set_parent(self, column):
self.column = column
self.metadata = self.column.table.metadata
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
def execute(self, bind=None, **kwargs):
if bind is None:
bind = self._get_bind(raiseerr=True)
return bind._execute_default(self, **kwargs)
def __repr__(self):
return "DefaultGenerator()"
class PassiveDefault(DefaultGenerator):
"""A default that takes effect on the database side."""
def __init__(self, arg, **kwargs):
super(PassiveDefault, self).__init__(**kwargs)
self.arg = arg
def __repr__(self):
return "PassiveDefault(%s)" % repr(self.arg)
class ColumnDefault(DefaultGenerator):
"""A plain default value on a column.
This could correspond to a constant, a callable function, or a SQL
clause.
"""
def __init__(self, arg, **kwargs):
super(ColumnDefault, self).__init__(**kwargs)
if callable(arg):
if not inspect.isfunction(arg):
self.arg = lambda ctx: arg()
else:
argspec = inspect.getargspec(arg)
if len(argspec[0]) == 0:
self.arg = lambda ctx: arg()
else:
defaulted = argspec[3] is not None and len(argspec[3]) or 0
if len(argspec[0]) - defaulted > 1:
raise exceptions.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments")
else:
self.arg = arg
else:
self.arg = arg
def _visit_name(self):
if self.for_update:
return "column_onupdate"
else:
return "column_default"
__visit_name__ = property(_visit_name)
def __repr__(self):
return "ColumnDefault(%s)" % repr(self.arg)
class Sequence(DefaultGenerator):
"""Represents a named sequence."""
def __init__(self, name, start=None, increment=None, schema=None,
optional=False, quote=False, **kwargs):
super(Sequence, self).__init__(**kwargs)
self.name = name
self.start = start
self.increment = increment
self.optional=optional
self.quote = quote
self.schema = schema
self.kwargs = kwargs
def __repr__(self):
return "Sequence(%s)" % ', '.join(
[repr(self.name)] +
["%s=%s" % (k, repr(getattr(self, k)))
for k in ['start', 'increment', 'optional']])
def _set_parent(self, column):
super(Sequence, self)._set_parent(column)
column.sequence = self
def create(self, bind=None, checkfirst=True):
"""Creates this sequence in the database."""
if bind is None:
bind = self._get_bind(raiseerr=True)
bind.create(self, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=True):
"""Drops this sequence from the database."""
if bind is None:
bind = self._get_bind(raiseerr=True)
bind.drop(self, checkfirst=checkfirst)
class Constraint(SchemaItem):
"""Represent a table-level ``Constraint`` such as a composite primary key, foreign key, or unique constraint.
Implements a hybrid of dict/setlike behavior with regards to the
list of underying columns.
"""
def __init__(self, name=None):
self.name = name
self.columns = expression.ColumnCollection()
def __contains__(self, x):
return self.columns.contains_column(x)
def keys(self):
return self.columns.keys()
def __add__(self, other):
return self.columns + other
def __iter__(self):
return iter(self.columns)
def __len__(self):
return len(self.columns)
def copy(self):
raise NotImplementedError()
def _get_parent(self):
return getattr(self, 'table', None)
class CheckConstraint(Constraint):
def __init__(self, sqltext, name=None):
super(CheckConstraint, self).__init__(name)
self.sqltext = sqltext
def _visit_name(self):
if isinstance(self.parent, Table):
return "check_constraint"
else:
return "column_check_constraint"
__visit_name__ = property(_visit_name)
def _set_parent(self, parent):
self.parent = parent
parent.constraints.add(self)
def copy(self):
return CheckConstraint(self.sqltext, name=self.name)
class ForeignKeyConstraint(Constraint):
"""Table-level foreign key constraint, represents a collection of ``ForeignKey`` objects."""
def __init__(self, columns, refcolumns, name=None, onupdate=None, ondelete=None, use_alter=False):
super(ForeignKeyConstraint, self).__init__(name)
self.__colnames = columns
self.__refcolnames = refcolumns
self.elements = util.OrderedSet()
self.onupdate = onupdate
self.ondelete = ondelete
if self.name is None and use_alter:
raise exceptions.ArgumentError("Alterable ForeignKey/ForeignKeyConstraint requires a name")
self.use_alter = use_alter
def _set_parent(self, table):
self.table = table
table.constraints.add(self)
for (c, r) in zip(self.__colnames, self.__refcolnames):
self.append_element(c,r)
def append_element(self, col, refcol):
fk = ForeignKey(refcol, constraint=self, name=self.name, onupdate=self.onupdate, ondelete=self.ondelete, use_alter=self.use_alter)
fk._set_parent(self.table.c[col])
self._append_fk(fk)
def _append_fk(self, fk):
self.columns.add(self.table.c[fk.parent.key])
self.elements.add(fk)
def copy(self):
return ForeignKeyConstraint([x.parent.name for x in self.elements], [x._get_colspec() for x in self.elements], name=self.name, onupdate=self.onupdate, ondelete=self.ondelete, use_alter=self.use_alter)
class PrimaryKeyConstraint(Constraint):
def __init__(self, *columns, **kwargs):
super(PrimaryKeyConstraint, self).__init__(name=kwargs.pop('name', None))
self.__colnames = list(columns)
def _set_parent(self, table):
self.table = table
table.primary_key = self
for c in self.__colnames:
self.append_column(table.c[c])
def add(self, col):
self.append_column(col)
def remove(self, col):
col.primary_key=False
del self.columns[col.key]
def append_column(self, col):
self.columns.add(col)
col.primary_key=True
def copy(self):
return PrimaryKeyConstraint(name=self.name, *[c.key for c in self])
def __eq__(self, other):
return self.columns == other
class UniqueConstraint(Constraint):
def __init__(self, *columns, **kwargs):
super(UniqueConstraint, self).__init__(name=kwargs.pop('name', None))
self.__colnames = list(columns)
def _set_parent(self, table):
self.table = table
table.constraints.add(self)
for c in self.__colnames:
self.append_column(table.c[c])
def append_column(self, col):
self.columns.add(col)
def copy(self):
return UniqueConstraint(name=self.name, *self.__colnames)
class Index(SchemaItem):
"""Represent an index of columns from a database table."""
def __init__(self, name, *columns, **kwargs):
"""Construct an index object.
Arguments are:
name
The name of the index
\*columns
Columns to include in the index. All columns must belong to
the same table, and no column may appear more than once.
\**kwargs
Keyword arguments include:
unique
Defaults to False: create a unique index.
postgres_where
Defaults to None: create a partial index when using PostgreSQL
"""
self.name = name
self.columns = []
self.table = None
self.unique = kwargs.pop('unique', False)
self.kwargs = kwargs
self._init_items(*columns)
def _init_items(self, *args):
for column in args:
self.append_column(column)
def _get_parent(self):
return self.table
def _set_parent(self, table):
self.table = table
self.metadata = table.metadata
table.indexes.add(self)
def append_column(self, column):
# make sure all columns are from the same table
# and no column is repeated
if self.table is None:
self._set_parent(column.table)
elif column.table != self.table:
# all columns muse be from same table
raise exceptions.ArgumentError("All index columns must be from same table. "
"%s is from %s not %s" % (column,
column.table,
self.table))
elif column.name in [ c.name for c in self.columns ]:
raise exceptions.ArgumentError("A column may not appear twice in the "
"same index (%s already has column %s)"
% (self.name, column))
self.columns.append(column)
def create(self, bind=None):
if bind is not None:
bind.create(self)
else:
self._get_bind(raiseerr=True).create(self)
return self
def drop(self, bind=None):
if bind is not None:
bind.drop(self)
else:
self._get_bind(raiseerr=True).drop(self)
def __str__(self):
return repr(self)
def __repr__(self):
return 'Index("%s", %s%s)' % (self.name,
', '.join([repr(c)
for c in self.columns]),
(self.unique and ', unique=True') or '')
class MetaData(SchemaItem):
"""A collection of Tables and their associated schema constructs.
Holds a collection of Tables and an optional binding to an
``Engine`` or ``Connection``. If bound, the
[sqlalchemy.schema#Table] objects in the collection and their
columns may participate in implicit SQL execution.
The ``bind`` property may be assigned to dynamically. A common
pattern is to start unbound and then bind later when an engine is
available::
metadata = MetaData()
# define tables
Table('mytable', metadata, ...)
# connect to an engine later, perhaps after loading a URL from a
# configuration file
metadata.bind = an_engine
MetaData is a thread-safe object after tables have been explicitly
defined or loaded via reflection.
"""
__visit_name__ = 'metadata'
def __init__(self, bind=None, reflect=False):
"""Create a new MetaData object.
bind
An Engine or Connection to bind to. May also be a string or
URL instance, these are passed to create_engine() and this
MetaData will be bound to the resulting engine.
reflect
Optional, automatically load all tables from the bound database.
Defaults to False. ``bind`` is required when this option is
set. For finer control over loaded tables, use the ``reflect``
method of ``MetaData``.
"""
self.tables = {}
self.bind = bind
self.metadata = self
if reflect:
if not bind:
raise exceptions.ArgumentError(
"A bind must be supplied in conjunction with reflect=True")
self.reflect()
def __repr__(self):
return 'MetaData(%r)' % self.bind
def __contains__(self, key):
return key in self.tables
def __getstate__(self):
return {'tables': self.tables}
def __setstate__(self, state):
self.tables = state['tables']
self._bind = None
def is_bound(self):
"""True if this MetaData is bound to an Engine or Connection."""
return self._bind is not None
# @deprecated
def connect(self, bind, **kwargs):
"""Bind this MetaData to an Engine.
Use ``metadata.bind = <engine>`` or ``metadata.bind = <url>``.
bind
A string, ``URL``, ``Engine`` or ``Connection`` instance. If
a string or ``URL``, will be passed to ``create_engine()`` along
with ``\**kwargs`` to produce the engine which to connect to.
Otherwise connects directly to the given ``Engine``.
"""
global URL
if URL is None:
from sqlalchemy.engine.url import URL
if isinstance(bind, (basestring, URL)):
self._bind = sqlalchemy.create_engine(bind, **kwargs)
else:
self._bind = bind
connect = util.deprecated(connect)
def _bind_to(self, bind):
"""Bind this MetaData to an Engine, Connection, string or URL."""
global URL
if URL is None:
from sqlalchemy.engine.url import URL
if isinstance(bind, (basestring, URL)):
self._bind = sqlalchemy.create_engine(bind)
else:
self._bind = bind
bind = property(lambda self: self._bind, _bind_to, doc=
"""An Engine or Connection to which this MetaData is bound.
This property may be assigned an ``Engine`` or
``Connection``, or assigned a string or URL to
automatically create a basic ``Engine`` for this bind
with ``create_engine()``.""")
def clear(self):
self.tables.clear()
def remove(self, table):
# TODO: scan all other tables and remove FK _column
del self.tables[table.key]
def table_iterator(self, reverse=True, tables=None):
from sqlalchemy.sql import util as sql_util
if tables is None:
tables = self.tables.values()
else:
tables = util.Set(tables).intersection(self.tables.values())
sorter = sql_util.TableCollection(list(tables))
return iter(sorter.sort(reverse=reverse))
def _get_parent(self):
return None
def reflect(self, bind=None, schema=None, only=None):
"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the ``MetaData``.
May be called multiple times to pick up tables recently added to the
database, however no special action is taken if a table in this
``MetaData`` no longer exists in the database.
bind
A ``Connectable`` used to access the database; if None, uses
the existing bind on this ``MetaData``, if any.
schema
Optional, query and reflect tables from an alterate schema.
only
Optional. Load only a sub-set of available named tables. May
be specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate
to filter the list of potential table names. The callable is
called with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
"""
reflect_opts = {'autoload': True}
if bind is None:
bind = self._get_bind(raiseerr=True)
conn = None
else:
reflect_opts['autoload_with'] = bind
conn = bind.contextual_connect()
if schema is not None:
reflect_opts['schema'] = schema
available = util.OrderedSet(bind.engine.table_names(schema,
connection=conn))
current = util.Set(self.tables.keys())
if only is None:
load = [name for name in available if name not in current]
elif callable(only):
load = [name for name in available
if name not in current and only(name, self)]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ''
raise exceptions.InvalidRequestError(
'Could not reflect: requested table(s) not available '
'in %s%s: (%s)' % (bind.engine.url, s, ', '.join(missing)))
load = [name for name in only if name not in current]
for name in load:
Table(name, self, **reflect_opts)
def create_all(self, bind=None, tables=None, checkfirst=True):
"""Create all tables stored in this metadata.
This will conditionally create tables depending on if they do
not yet exist in the database.
bind
A ``Connectable`` used to access the database; if None, uses
the existing bind on this ``MetaData``, if any.
tables
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
"""
if bind is None:
bind = self._get_bind(raiseerr=True)
bind.create(self, checkfirst=checkfirst, tables=tables)
def drop_all(self, bind=None, tables=None, checkfirst=True):
"""Drop all tables stored in this metadata.
This will conditionally drop tables depending on if they
currently exist in the database.
bind
A ``Connectable`` used to access the database; if None, uses
the existing bind on this ``MetaData``, if any.
tables
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
"""
if bind is None:
bind = self._get_bind(raiseerr=True)
bind.drop(self, checkfirst=checkfirst, tables=tables)
def _get_bind(self, raiseerr=False):
if not self.is_bound():
if raiseerr:
raise exceptions.InvalidRequestError("This SchemaItem is not connected to any Engine or Connection.")
else:
return None
return self._bind
class ThreadLocalMetaData(MetaData):
"""A MetaData variant that presents a different ``bind`` in every thread.
Makes the ``bind`` property of the MetaData a thread-local value,
allowing this collection of tables to be bound to different ``Engine``
implementations or connections in each thread.
The ThreadLocalMetaData starts off bound to None in each thread.
Binds must be made explicitly by assigning to the ``bind`` property or
using ``connect()``. You can also re-bind dynamically multiple times per
thread, just like a regular ``MetaData``.
Use this type of MetaData when your tables are present in more than
one database and you need to address them simultanesouly.
"""
__visit_name__ = 'metadata'
def __init__(self):
"""Construct a ThreadLocalMetaData.
Takes no arguments.
"""
self.context = util.ThreadLocal()
self.__engines = {}
super(ThreadLocalMetaData, self).__init__()
# @deprecated
def connect(self, bind, **kwargs):
"""Bind to an Engine in the caller's thread.
Use ``metadata.bind=<engine>`` or ``metadata.bind=<url>``.
bind
A string, ``URL``, ``Engine`` or ``Connection`` instance. If
a string or ``URL``, will be passed to ``create_engine()`` along
with ``\**kwargs`` to produce the engine which to connect to.
Otherwise connects directly to the given ``Engine``.
"""
global URL
if URL is None:
from sqlalchemy.engine.url import URL
if isinstance(bind, (basestring, URL)):
try:
engine = self.__engines[bind]
except KeyError:
engine = sqlalchemy.create_engine(bind, **kwargs)
bind = engine
self._bind_to(bind)
connect = util.deprecated(connect)
def _get_bind(self, raiseerr=False):
"""The bound ``Engine`` or ``Connectable`` for this thread."""
if hasattr(self.context, '_engine'):
return self.context._engine
else:
if raiseerr:
raise exceptions.InvalidRequestError(
"This ThreadLocalMetaData is not bound to any Engine or "
"Connection.")
else:
return None
def _bind_to(self, bind):
"""Bind to a Connectable in the caller's thread."""
global URL
if URL is None:
from sqlalchemy.engine.url import URL
if isinstance(bind, (basestring, URL)):
try:
self.context._engine = self.__engines[bind]
except KeyError:
e = sqlalchemy.create_engine(bind)
self.__engines[bind] = e
self.context._engine = e
else:
# TODO: this is squirrely. we shouldnt have to hold onto engines
# in a case like this
if bind not in self.__engines:
self.__engines[bind] = bind
self.context._engine = bind
bind = property(_get_bind, _bind_to, doc=
"""The bound Engine or Connection for this thread.
This property may be assigned an Engine or Connection,
or assigned a string or URL to automatically create a
basic Engine for this bind with ``create_engine()``.""")
def is_bound(self):
"""True if there is a bind for this thread."""
return (hasattr(self.context, '_engine') and
self.context._engine is not None)
def dispose(self):
"""Dispose any and all ``Engines`` to which this ``ThreadLocalMetaData`` has been connected."""
for e in self.__engines.values():
if hasattr(e, 'dispose'):
e.dispose()
class SchemaVisitor(visitors.ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {'schema_visitor':True}
| bsd-3-clause | 1,451,388,813,756,351,500 | 36.757289 | 296 | 0.595043 | false | 4.505392 | false | false | false |
rbuffat/pyidf | pyidf/performance_curves.py | 1 | 334706 | """ Data objects in group "Performance Curves"
"""
from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class CurveLinear(DataObject):
""" Corresponds to IDD object `Curve:Linear`
Linear curve with one independent variable.
Input for the linear curve consists of a curve name, the two coefficients, and the
maximum and minimum valid independent variable values. Optional inputs for
curve minimum and maximum may be used to limit the output of the performance curve.
curve = C1 + C2*x
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 x',
{'name': u'Coefficient2 x',
'pyname': u'coefficient2_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'Pressure',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:Linear',
'pyname': u'CurveLinear',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_x(self):
"""field `Coefficient2 x`
Args:
value (float): value for IDD Field `Coefficient2 x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_x` or None if not set
"""
return self["Coefficient2 x"]
@coefficient2_x.setter
def coefficient2_x(self, value=None):
"""Corresponds to IDD field `Coefficient2 x`"""
self["Coefficient2 x"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveQuadLinear(DataObject):
""" Corresponds to IDD object `Curve:QuadLinear`
Linear curve with four independent variables.
Input for the linear curve consists of a curve name, the two coefficients, and the
maximum and minimum valid independent variable values. Optional inputs for curve
minimum and maximum may be used to limit the output of the performance curve.
curve = C1 + C2*w + C3*x + C4*y + C5*z
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 w',
{'name': u'Coefficient2 w',
'pyname': u'coefficient2_w',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 x',
{'name': u'Coefficient3 x',
'pyname': u'coefficient3_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 y',
{'name': u'Coefficient4 y',
'pyname': u'coefficient4_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient5 z',
{'name': u'Coefficient5 z',
'pyname': u'coefficient5_z',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of w',
{'name': u'Minimum Value of w',
'pyname': u'minimum_value_of_w',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of w',
{'name': u'Maximum Value of w',
'pyname': u'maximum_value_of_w',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of y',
{'name': u'Minimum Value of y',
'pyname': u'minimum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of y',
{'name': u'Maximum Value of y',
'pyname': u'maximum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of z',
{'name': u'Minimum Value of z',
'pyname': u'minimum_value_of_z',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of z',
{'name': u'Maximum Value of z',
'pyname': u'maximum_value_of_z',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for w',
{'name': u'Input Unit Type for w',
'pyname': u'input_unit_type_for_w',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance',
u'VolumetricFlowPerPower'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for x',
{'name': u'Input Unit Type for x',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance',
u'VolumetricFlowPerPower'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for y',
{'name': u'Input Unit Type for y',
'pyname': u'input_unit_type_for_y',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance',
u'VolumetricFlowPerPower'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for z',
{'name': u'Input Unit Type for z',
'pyname': u'input_unit_type_for_z',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance',
u'VolumetricFlowPerPower'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:QuadLinear',
'pyname': u'CurveQuadLinear',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_w(self):
"""field `Coefficient2 w`
Args:
value (float): value for IDD Field `Coefficient2 w`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_w` or None if not set
"""
return self["Coefficient2 w"]
@coefficient2_w.setter
def coefficient2_w(self, value=None):
"""Corresponds to IDD field `Coefficient2 w`"""
self["Coefficient2 w"] = value
@property
def coefficient3_x(self):
"""field `Coefficient3 x`
Args:
value (float): value for IDD Field `Coefficient3 x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_x` or None if not set
"""
return self["Coefficient3 x"]
@coefficient3_x.setter
def coefficient3_x(self, value=None):
"""Corresponds to IDD field `Coefficient3 x`"""
self["Coefficient3 x"] = value
@property
def coefficient4_y(self):
"""field `Coefficient4 y`
Args:
value (float): value for IDD Field `Coefficient4 y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_y` or None if not set
"""
return self["Coefficient4 y"]
@coefficient4_y.setter
def coefficient4_y(self, value=None):
"""Corresponds to IDD field `Coefficient4 y`"""
self["Coefficient4 y"] = value
@property
def coefficient5_z(self):
"""field `Coefficient5 z`
Args:
value (float): value for IDD Field `Coefficient5 z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient5_z` or None if not set
"""
return self["Coefficient5 z"]
@coefficient5_z.setter
def coefficient5_z(self, value=None):
"""Corresponds to IDD field `Coefficient5 z`"""
self["Coefficient5 z"] = value
@property
def minimum_value_of_w(self):
"""field `Minimum Value of w`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of w`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_w` or None if not set
"""
return self["Minimum Value of w"]
@minimum_value_of_w.setter
def minimum_value_of_w(self, value=None):
"""Corresponds to IDD field `Minimum Value of w`"""
self["Minimum Value of w"] = value
@property
def maximum_value_of_w(self):
"""field `Maximum Value of w`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of w`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_w` or None if not set
"""
return self["Maximum Value of w"]
@maximum_value_of_w.setter
def maximum_value_of_w(self, value=None):
"""Corresponds to IDD field `Maximum Value of w`"""
self["Maximum Value of w"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_value_of_y(self):
"""field `Minimum Value of y`
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Minimum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_y` or None if not set
"""
return self["Minimum Value of y"]
@minimum_value_of_y.setter
def minimum_value_of_y(self, value=None):
"""Corresponds to IDD field `Minimum Value of y`"""
self["Minimum Value of y"] = value
@property
def maximum_value_of_y(self):
"""field `Maximum Value of y`
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Maximum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_y` or None if not set
"""
return self["Maximum Value of y"]
@maximum_value_of_y.setter
def maximum_value_of_y(self, value=None):
"""Corresponds to IDD field `Maximum Value of y`"""
self["Maximum Value of y"] = value
@property
def minimum_value_of_z(self):
"""field `Minimum Value of z`
| Units are based on field `A5`
Args:
value (float): value for IDD Field `Minimum Value of z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_z` or None if not set
"""
return self["Minimum Value of z"]
@minimum_value_of_z.setter
def minimum_value_of_z(self, value=None):
"""Corresponds to IDD field `Minimum Value of z`"""
self["Minimum Value of z"] = value
@property
def maximum_value_of_z(self):
"""field `Maximum Value of z`
| Units are based on field `A5`
Args:
value (float): value for IDD Field `Maximum Value of z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_z` or None if not set
"""
return self["Maximum Value of z"]
@maximum_value_of_z.setter
def maximum_value_of_z(self, value=None):
"""Corresponds to IDD field `Maximum Value of z`"""
self["Maximum Value of z"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_w(self):
"""field `Input Unit Type for w`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for w`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_w` or None if not set
"""
return self["Input Unit Type for w"]
@input_unit_type_for_w.setter
def input_unit_type_for_w(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for w`"""
self["Input Unit Type for w"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for x`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for x`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for x"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for x`"""
self["Input Unit Type for x"] = value
@property
def input_unit_type_for_y(self):
"""field `Input Unit Type for y`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for y`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_y` or None if not set
"""
return self["Input Unit Type for y"]
@input_unit_type_for_y.setter
def input_unit_type_for_y(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for y`"""
self["Input Unit Type for y"] = value
@property
def input_unit_type_for_z(self):
"""field `Input Unit Type for z`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for z`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_z` or None if not set
"""
return self["Input Unit Type for z"]
@input_unit_type_for_z.setter
def input_unit_type_for_z(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for z`"""
self["Input Unit Type for z"] = value
class CurveQuadratic(DataObject):
""" Corresponds to IDD object `Curve:Quadratic`
Quadratic curve with one independent variable.
Input for a quadratic curve consists of the curve name, the three coefficients, and
the maximum and minimum valid independent variable values. Optional inputs for curve
minimum and maximum may be used to limit the output of the performance curve.
curve = C1 + C2*x + C3*x**2
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 x',
{'name': u'Coefficient2 x',
'pyname': u'coefficient2_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 x**2',
{'name': u'Coefficient3 x**2',
'pyname': u'coefficient3_x2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:Quadratic',
'pyname': u'CurveQuadratic',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_x(self):
"""field `Coefficient2 x`
Args:
value (float): value for IDD Field `Coefficient2 x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_x` or None if not set
"""
return self["Coefficient2 x"]
@coefficient2_x.setter
def coefficient2_x(self, value=None):
"""Corresponds to IDD field `Coefficient2 x`"""
self["Coefficient2 x"] = value
@property
def coefficient3_x2(self):
"""field `Coefficient3 x**2`
Args:
value (float): value for IDD Field `Coefficient3 x**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_x2` or None if not set
"""
return self["Coefficient3 x**2"]
@coefficient3_x2.setter
def coefficient3_x2(self, value=None):
""" Corresponds to IDD field `Coefficient3 x**2`
"""
self["Coefficient3 x**2"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveCubic(DataObject):
""" Corresponds to IDD object `Curve:Cubic`
Cubic curve with one independent variable.
Input for a cubic curve consists of the curve name, the 4 coefficients, and the
maximum and minimum valid independent variable values. Optional inputs for curve
minimum and maximum may be used to limit the output of the performance curve.
curve = C1 + C2*x + C3*x**2 + C4*x**3
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 x',
{'name': u'Coefficient2 x',
'pyname': u'coefficient2_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 x**2',
{'name': u'Coefficient3 x**2',
'pyname': u'coefficient3_x2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 x**3',
{'name': u'Coefficient4 x**3',
'pyname': u'coefficient4_x3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:Cubic',
'pyname': u'CurveCubic',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_x(self):
"""field `Coefficient2 x`
Args:
value (float): value for IDD Field `Coefficient2 x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_x` or None if not set
"""
return self["Coefficient2 x"]
@coefficient2_x.setter
def coefficient2_x(self, value=None):
"""Corresponds to IDD field `Coefficient2 x`"""
self["Coefficient2 x"] = value
@property
def coefficient3_x2(self):
"""field `Coefficient3 x**2`
Args:
value (float): value for IDD Field `Coefficient3 x**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_x2` or None if not set
"""
return self["Coefficient3 x**2"]
@coefficient3_x2.setter
def coefficient3_x2(self, value=None):
""" Corresponds to IDD field `Coefficient3 x**2`
"""
self["Coefficient3 x**2"] = value
@property
def coefficient4_x3(self):
"""field `Coefficient4 x**3`
Args:
value (float): value for IDD Field `Coefficient4 x**3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_x3` or None if not set
"""
return self["Coefficient4 x**3"]
@coefficient4_x3.setter
def coefficient4_x3(self, value=None):
""" Corresponds to IDD field `Coefficient4 x**3`
"""
self["Coefficient4 x**3"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveQuartic(DataObject):
""" Corresponds to IDD object `Curve:Quartic`
Quartic (fourth order polynomial) curve with one independent variable.
Input for a Quartic curve consists of the curve name, the
five coefficients, and the maximum and minimum valid independent variable values.
Optional inputs for curve minimum and maximum may be used to limit the
output of the performance curve.
curve = C1 + C2*x + C3*x**2 + C4*x**3 + C5*x**4
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 x',
{'name': u'Coefficient2 x',
'pyname': u'coefficient2_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 x**2',
{'name': u'Coefficient3 x**2',
'pyname': u'coefficient3_x2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 x**3',
{'name': u'Coefficient4 x**3',
'pyname': u'coefficient4_x3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient5 x**4',
{'name': u'Coefficient5 x**4',
'pyname': u'coefficient5_x4',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:Quartic',
'pyname': u'CurveQuartic',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_x(self):
"""field `Coefficient2 x`
Args:
value (float): value for IDD Field `Coefficient2 x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_x` or None if not set
"""
return self["Coefficient2 x"]
@coefficient2_x.setter
def coefficient2_x(self, value=None):
"""Corresponds to IDD field `Coefficient2 x`"""
self["Coefficient2 x"] = value
@property
def coefficient3_x2(self):
"""field `Coefficient3 x**2`
Args:
value (float): value for IDD Field `Coefficient3 x**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_x2` or None if not set
"""
return self["Coefficient3 x**2"]
@coefficient3_x2.setter
def coefficient3_x2(self, value=None):
""" Corresponds to IDD field `Coefficient3 x**2`
"""
self["Coefficient3 x**2"] = value
@property
def coefficient4_x3(self):
"""field `Coefficient4 x**3`
Args:
value (float): value for IDD Field `Coefficient4 x**3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_x3` or None if not set
"""
return self["Coefficient4 x**3"]
@coefficient4_x3.setter
def coefficient4_x3(self, value=None):
""" Corresponds to IDD field `Coefficient4 x**3`
"""
self["Coefficient4 x**3"] = value
@property
def coefficient5_x4(self):
"""field `Coefficient5 x**4`
Args:
value (float): value for IDD Field `Coefficient5 x**4`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient5_x4` or None if not set
"""
return self["Coefficient5 x**4"]
@coefficient5_x4.setter
def coefficient5_x4(self, value=None):
""" Corresponds to IDD field `Coefficient5 x**4`
"""
self["Coefficient5 x**4"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveExponent(DataObject):
""" Corresponds to IDD object `Curve:Exponent`
Exponent curve with one independent variable.
Input for a exponent curve consists of the curve name, the 3 coefficients, and the
maximum and minimum valid independent variable values. Optional inputs for curve
minimum and maximum may be used to limit the output of the performance curve.
curve = C1 + C2*x**C3
The independent variable x is raised to the C3 power, multiplied by C2, and C1 is added to the result.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 constant',
{'name': u'Coefficient2 Constant',
'pyname': u'coefficient2_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 constant',
{'name': u'Coefficient3 Constant',
'pyname': u'coefficient3_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 6,
'name': u'Curve:Exponent',
'pyname': u'CurveExponent',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_constant(self):
"""field `Coefficient2 Constant`
Args:
value (float): value for IDD Field `Coefficient2 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_constant` or None if not set
"""
return self["Coefficient2 Constant"]
@coefficient2_constant.setter
def coefficient2_constant(self, value=None):
"""Corresponds to IDD field `Coefficient2 Constant`"""
self["Coefficient2 Constant"] = value
@property
def coefficient3_constant(self):
"""field `Coefficient3 Constant`
Args:
value (float): value for IDD Field `Coefficient3 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_constant` or None if not set
"""
return self["Coefficient3 Constant"]
@coefficient3_constant.setter
def coefficient3_constant(self, value=None):
"""Corresponds to IDD field `Coefficient3 Constant`"""
self["Coefficient3 Constant"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Specify the minimum value of the independent variable x allowed
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Specify the maximum value of the independent variable x allowed
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveBicubic(DataObject):
""" Corresponds to IDD object `Curve:Bicubic`
Cubic curve with two independent variables. Input consists of the
curve name, the ten coefficients, and the minimum and maximum values for each of
the independent variables. Optional inputs for curve minimum and maximum may
be used to limit the output of the performance curve.
curve = C1 + C2*x + C3*x**2 + C4*y + C5*y**2 + C6*x*y + C7*x**3 + C8*y**3 + C9*x**2*y
+ C10*x*y**2
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 x',
{'name': u'Coefficient2 x',
'pyname': u'coefficient2_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 x**2',
{'name': u'Coefficient3 x**2',
'pyname': u'coefficient3_x2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 y',
{'name': u'Coefficient4 y',
'pyname': u'coefficient4_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient5 y**2',
{'name': u'Coefficient5 y**2',
'pyname': u'coefficient5_y2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient6 x*y',
{'name': u'Coefficient6 x*y',
'pyname': u'coefficient6_xy',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient7 x**3',
{'name': u'Coefficient7 x**3',
'pyname': u'coefficient7_x3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient8 y**3',
{'name': u'Coefficient8 y**3',
'pyname': u'coefficient8_y3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient9 x**2*y',
{'name': u'Coefficient9 x**2*y',
'pyname': u'coefficient9_x2y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient10 x*y**2',
{'name': u'Coefficient10 x*y**2',
'pyname': u'coefficient10_xy2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of y',
{'name': u'Minimum Value of y',
'pyname': u'minimum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of y',
{'name': u'Maximum Value of y',
'pyname': u'maximum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for y',
{'name': u'Input Unit Type for Y',
'pyname': u'input_unit_type_for_y',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:Bicubic',
'pyname': u'CurveBicubic',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_x(self):
"""field `Coefficient2 x`
Args:
value (float): value for IDD Field `Coefficient2 x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_x` or None if not set
"""
return self["Coefficient2 x"]
@coefficient2_x.setter
def coefficient2_x(self, value=None):
"""Corresponds to IDD field `Coefficient2 x`"""
self["Coefficient2 x"] = value
@property
def coefficient3_x2(self):
"""field `Coefficient3 x**2`
Args:
value (float): value for IDD Field `Coefficient3 x**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_x2` or None if not set
"""
return self["Coefficient3 x**2"]
@coefficient3_x2.setter
def coefficient3_x2(self, value=None):
""" Corresponds to IDD field `Coefficient3 x**2`
"""
self["Coefficient3 x**2"] = value
@property
def coefficient4_y(self):
"""field `Coefficient4 y`
Args:
value (float): value for IDD Field `Coefficient4 y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_y` or None if not set
"""
return self["Coefficient4 y"]
@coefficient4_y.setter
def coefficient4_y(self, value=None):
"""Corresponds to IDD field `Coefficient4 y`"""
self["Coefficient4 y"] = value
@property
def coefficient5_y2(self):
"""field `Coefficient5 y**2`
Args:
value (float): value for IDD Field `Coefficient5 y**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient5_y2` or None if not set
"""
return self["Coefficient5 y**2"]
@coefficient5_y2.setter
def coefficient5_y2(self, value=None):
""" Corresponds to IDD field `Coefficient5 y**2`
"""
self["Coefficient5 y**2"] = value
@property
def coefficient6_xy(self):
"""field `Coefficient6 x*y`
Args:
value (float): value for IDD Field `Coefficient6 x*y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient6_xy` or None if not set
"""
return self["Coefficient6 x*y"]
@coefficient6_xy.setter
def coefficient6_xy(self, value=None):
""" Corresponds to IDD field `Coefficient6 x*y`
"""
self["Coefficient6 x*y"] = value
@property
def coefficient7_x3(self):
"""field `Coefficient7 x**3`
Args:
value (float): value for IDD Field `Coefficient7 x**3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient7_x3` or None if not set
"""
return self["Coefficient7 x**3"]
@coefficient7_x3.setter
def coefficient7_x3(self, value=None):
""" Corresponds to IDD field `Coefficient7 x**3`
"""
self["Coefficient7 x**3"] = value
@property
def coefficient8_y3(self):
"""field `Coefficient8 y**3`
Args:
value (float): value for IDD Field `Coefficient8 y**3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient8_y3` or None if not set
"""
return self["Coefficient8 y**3"]
@coefficient8_y3.setter
def coefficient8_y3(self, value=None):
""" Corresponds to IDD field `Coefficient8 y**3`
"""
self["Coefficient8 y**3"] = value
@property
def coefficient9_x2y(self):
"""field `Coefficient9 x**2*y`
Args:
value (float): value for IDD Field `Coefficient9 x**2*y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient9_x2y` or None if not set
"""
return self["Coefficient9 x**2*y"]
@coefficient9_x2y.setter
def coefficient9_x2y(self, value=None):
""" Corresponds to IDD field `Coefficient9 x**2*y`
"""
self["Coefficient9 x**2*y"] = value
@property
def coefficient10_xy2(self):
"""field `Coefficient10 x*y**2`
Args:
value (float): value for IDD Field `Coefficient10 x*y**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient10_xy2` or None if not set
"""
return self["Coefficient10 x*y**2"]
@coefficient10_xy2.setter
def coefficient10_xy2(self, value=None):
""" Corresponds to IDD field `Coefficient10 x*y**2`
"""
self["Coefficient10 x*y**2"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_value_of_y(self):
"""field `Minimum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_y` or None if not set
"""
return self["Minimum Value of y"]
@minimum_value_of_y.setter
def minimum_value_of_y(self, value=None):
"""Corresponds to IDD field `Minimum Value of y`"""
self["Minimum Value of y"] = value
@property
def maximum_value_of_y(self):
"""field `Maximum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_y` or None if not set
"""
return self["Maximum Value of y"]
@maximum_value_of_y.setter
def maximum_value_of_y(self, value=None):
"""Corresponds to IDD field `Maximum Value of y`"""
self["Maximum Value of y"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def input_unit_type_for_y(self):
"""field `Input Unit Type for Y`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for Y`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_y` or None if not set
"""
return self["Input Unit Type for Y"]
@input_unit_type_for_y.setter
def input_unit_type_for_y(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for Y`"""
self["Input Unit Type for Y"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveBiquadratic(DataObject):
""" Corresponds to IDD object `Curve:Biquadratic`
Quadratic curve with two independent variables. Input consists of the curve name, the
six coefficients, and min and max values for each of the independent variables.
Optional inputs for curve minimum and maximum may be used to limit the
output of the performance curve.
curve = C1 + C2*x + C3*x**2 + C4*y + C5*y**2 + C6*x*y
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 x',
{'name': u'Coefficient2 x',
'pyname': u'coefficient2_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 x**2',
{'name': u'Coefficient3 x**2',
'pyname': u'coefficient3_x2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 y',
{'name': u'Coefficient4 y',
'pyname': u'coefficient4_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient5 y**2',
{'name': u'Coefficient5 y**2',
'pyname': u'coefficient5_y2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient6 x*y',
{'name': u'Coefficient6 x*y',
'pyname': u'coefficient6_xy',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of y',
{'name': u'Minimum Value of y',
'pyname': u'minimum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of y',
{'name': u'Maximum Value of y',
'pyname': u'maximum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for y',
{'name': u'Input Unit Type for Y',
'pyname': u'input_unit_type_for_y',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:Biquadratic',
'pyname': u'CurveBiquadratic',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_x(self):
"""field `Coefficient2 x`
Args:
value (float): value for IDD Field `Coefficient2 x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_x` or None if not set
"""
return self["Coefficient2 x"]
@coefficient2_x.setter
def coefficient2_x(self, value=None):
"""Corresponds to IDD field `Coefficient2 x`"""
self["Coefficient2 x"] = value
@property
def coefficient3_x2(self):
"""field `Coefficient3 x**2`
Args:
value (float): value for IDD Field `Coefficient3 x**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_x2` or None if not set
"""
return self["Coefficient3 x**2"]
@coefficient3_x2.setter
def coefficient3_x2(self, value=None):
""" Corresponds to IDD field `Coefficient3 x**2`
"""
self["Coefficient3 x**2"] = value
@property
def coefficient4_y(self):
"""field `Coefficient4 y`
Args:
value (float): value for IDD Field `Coefficient4 y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_y` or None if not set
"""
return self["Coefficient4 y"]
@coefficient4_y.setter
def coefficient4_y(self, value=None):
"""Corresponds to IDD field `Coefficient4 y`"""
self["Coefficient4 y"] = value
@property
def coefficient5_y2(self):
"""field `Coefficient5 y**2`
Args:
value (float): value for IDD Field `Coefficient5 y**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient5_y2` or None if not set
"""
return self["Coefficient5 y**2"]
@coefficient5_y2.setter
def coefficient5_y2(self, value=None):
""" Corresponds to IDD field `Coefficient5 y**2`
"""
self["Coefficient5 y**2"] = value
@property
def coefficient6_xy(self):
"""field `Coefficient6 x*y`
Args:
value (float): value for IDD Field `Coefficient6 x*y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient6_xy` or None if not set
"""
return self["Coefficient6 x*y"]
@coefficient6_xy.setter
def coefficient6_xy(self, value=None):
""" Corresponds to IDD field `Coefficient6 x*y`
"""
self["Coefficient6 x*y"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_value_of_y(self):
"""field `Minimum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_y` or None if not set
"""
return self["Minimum Value of y"]
@minimum_value_of_y.setter
def minimum_value_of_y(self, value=None):
"""Corresponds to IDD field `Minimum Value of y`"""
self["Minimum Value of y"] = value
@property
def maximum_value_of_y(self):
"""field `Maximum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_y` or None if not set
"""
return self["Maximum Value of y"]
@maximum_value_of_y.setter
def maximum_value_of_y(self, value=None):
"""Corresponds to IDD field `Maximum Value of y`"""
self["Maximum Value of y"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def input_unit_type_for_y(self):
"""field `Input Unit Type for Y`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for Y`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_y` or None if not set
"""
return self["Input Unit Type for Y"]
@input_unit_type_for_y.setter
def input_unit_type_for_y(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for Y`"""
self["Input Unit Type for Y"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveQuadraticLinear(DataObject):
""" Corresponds to IDD object `Curve:QuadraticLinear`
Quadratic-linear curve with two independent variables. Input consists of the curve
name, the six coefficients, and min and max values for each of the independent
variables. Optional inputs for curve minimum and maximum may be used to limit the
output of the performance curve.
curve = (C1 + C2*x + C3*x**2) + (C4 + C5*x + C6*x**2)*y
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 x',
{'name': u'Coefficient2 x',
'pyname': u'coefficient2_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 x**2',
{'name': u'Coefficient3 x**2',
'pyname': u'coefficient3_x2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 y',
{'name': u'Coefficient4 y',
'pyname': u'coefficient4_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient5 x*y',
{'name': u'Coefficient5 x*y',
'pyname': u'coefficient5_xy',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient6 x**2*y',
{'name': u'Coefficient6 x**2*y',
'pyname': u'coefficient6_x2y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of y',
{'name': u'Minimum Value of y',
'pyname': u'minimum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of y',
{'name': u'Maximum Value of y',
'pyname': u'maximum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for y',
{'name': u'Input Unit Type for Y',
'pyname': u'input_unit_type_for_y',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:QuadraticLinear',
'pyname': u'CurveQuadraticLinear',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_x(self):
"""field `Coefficient2 x`
Args:
value (float): value for IDD Field `Coefficient2 x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_x` or None if not set
"""
return self["Coefficient2 x"]
@coefficient2_x.setter
def coefficient2_x(self, value=None):
"""Corresponds to IDD field `Coefficient2 x`"""
self["Coefficient2 x"] = value
@property
def coefficient3_x2(self):
"""field `Coefficient3 x**2`
Args:
value (float): value for IDD Field `Coefficient3 x**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_x2` or None if not set
"""
return self["Coefficient3 x**2"]
@coefficient3_x2.setter
def coefficient3_x2(self, value=None):
""" Corresponds to IDD field `Coefficient3 x**2`
"""
self["Coefficient3 x**2"] = value
@property
def coefficient4_y(self):
"""field `Coefficient4 y`
Args:
value (float): value for IDD Field `Coefficient4 y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_y` or None if not set
"""
return self["Coefficient4 y"]
@coefficient4_y.setter
def coefficient4_y(self, value=None):
"""Corresponds to IDD field `Coefficient4 y`"""
self["Coefficient4 y"] = value
@property
def coefficient5_xy(self):
"""field `Coefficient5 x*y`
Args:
value (float): value for IDD Field `Coefficient5 x*y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient5_xy` or None if not set
"""
return self["Coefficient5 x*y"]
@coefficient5_xy.setter
def coefficient5_xy(self, value=None):
""" Corresponds to IDD field `Coefficient5 x*y`
"""
self["Coefficient5 x*y"] = value
@property
def coefficient6_x2y(self):
"""field `Coefficient6 x**2*y`
Args:
value (float): value for IDD Field `Coefficient6 x**2*y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient6_x2y` or None if not set
"""
return self["Coefficient6 x**2*y"]
@coefficient6_x2y.setter
def coefficient6_x2y(self, value=None):
""" Corresponds to IDD field `Coefficient6 x**2*y`
"""
self["Coefficient6 x**2*y"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_value_of_y(self):
"""field `Minimum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_y` or None if not set
"""
return self["Minimum Value of y"]
@minimum_value_of_y.setter
def minimum_value_of_y(self, value=None):
"""Corresponds to IDD field `Minimum Value of y`"""
self["Minimum Value of y"] = value
@property
def maximum_value_of_y(self):
"""field `Maximum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_y` or None if not set
"""
return self["Maximum Value of y"]
@maximum_value_of_y.setter
def maximum_value_of_y(self, value=None):
"""Corresponds to IDD field `Maximum Value of y`"""
self["Maximum Value of y"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def input_unit_type_for_y(self):
"""field `Input Unit Type for Y`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for Y`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_y` or None if not set
"""
return self["Input Unit Type for Y"]
@input_unit_type_for_y.setter
def input_unit_type_for_y(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for Y`"""
self["Input Unit Type for Y"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveCubicLinear(DataObject):
""" Corresponds to IDD object `Curve:CubicLinear`
Cubic-linear curve with two independent variables. Input consists of the curve
name, the six coefficients, and min and max values for each of the independent
variables. Optional inputs for curve minimum and maximum may be used to limit the
output of the performance curve.
curve = (C1 + C2*x + C3*x**2 + C4*x**3) + (C5 + C6*x)*y
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 x',
{'name': u'Coefficient2 x',
'pyname': u'coefficient2_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 x**2',
{'name': u'Coefficient3 x**2',
'pyname': u'coefficient3_x2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 x**3',
{'name': u'Coefficient4 x**3',
'pyname': u'coefficient4_x3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient5 y',
{'name': u'Coefficient5 y',
'pyname': u'coefficient5_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient6 x*y',
{'name': u'Coefficient6 x*y',
'pyname': u'coefficient6_xy',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of y',
{'name': u'Minimum Value of y',
'pyname': u'minimum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of y',
{'name': u'Maximum Value of y',
'pyname': u'maximum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for y',
{'name': u'Input Unit Type for Y',
'pyname': u'input_unit_type_for_y',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:CubicLinear',
'pyname': u'CurveCubicLinear',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_x(self):
"""field `Coefficient2 x`
Args:
value (float): value for IDD Field `Coefficient2 x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_x` or None if not set
"""
return self["Coefficient2 x"]
@coefficient2_x.setter
def coefficient2_x(self, value=None):
"""Corresponds to IDD field `Coefficient2 x`"""
self["Coefficient2 x"] = value
@property
def coefficient3_x2(self):
"""field `Coefficient3 x**2`
Args:
value (float): value for IDD Field `Coefficient3 x**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_x2` or None if not set
"""
return self["Coefficient3 x**2"]
@coefficient3_x2.setter
def coefficient3_x2(self, value=None):
""" Corresponds to IDD field `Coefficient3 x**2`
"""
self["Coefficient3 x**2"] = value
@property
def coefficient4_x3(self):
"""field `Coefficient4 x**3`
Args:
value (float): value for IDD Field `Coefficient4 x**3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_x3` or None if not set
"""
return self["Coefficient4 x**3"]
@coefficient4_x3.setter
def coefficient4_x3(self, value=None):
""" Corresponds to IDD field `Coefficient4 x**3`
"""
self["Coefficient4 x**3"] = value
@property
def coefficient5_y(self):
"""field `Coefficient5 y`
Args:
value (float): value for IDD Field `Coefficient5 y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient5_y` or None if not set
"""
return self["Coefficient5 y"]
@coefficient5_y.setter
def coefficient5_y(self, value=None):
"""Corresponds to IDD field `Coefficient5 y`"""
self["Coefficient5 y"] = value
@property
def coefficient6_xy(self):
"""field `Coefficient6 x*y`
Args:
value (float): value for IDD Field `Coefficient6 x*y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient6_xy` or None if not set
"""
return self["Coefficient6 x*y"]
@coefficient6_xy.setter
def coefficient6_xy(self, value=None):
""" Corresponds to IDD field `Coefficient6 x*y`
"""
self["Coefficient6 x*y"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_value_of_y(self):
"""field `Minimum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_y` or None if not set
"""
return self["Minimum Value of y"]
@minimum_value_of_y.setter
def minimum_value_of_y(self, value=None):
"""Corresponds to IDD field `Minimum Value of y`"""
self["Minimum Value of y"] = value
@property
def maximum_value_of_y(self):
"""field `Maximum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_y` or None if not set
"""
return self["Maximum Value of y"]
@maximum_value_of_y.setter
def maximum_value_of_y(self, value=None):
"""Corresponds to IDD field `Maximum Value of y`"""
self["Maximum Value of y"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def input_unit_type_for_y(self):
"""field `Input Unit Type for Y`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for Y`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_y` or None if not set
"""
return self["Input Unit Type for Y"]
@input_unit_type_for_y.setter
def input_unit_type_for_y(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for Y`"""
self["Input Unit Type for Y"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveTriquadratic(DataObject):
""" Corresponds to IDD object `Curve:Triquadratic`
Quadratic curve with three independent variables. Input consists of the curve name,
the twenty seven coefficients, and min and max values for each of the independent
variables. Optional inputs for curve minimum and maximum may be used to
limit the output of the performance curve.
curve = a0 + a1*x**2 + a2*x + a3*y**2 + a4*y
+ a5*z**2 + a6*z + a7*x**2*y**2 + a8*x*y
+ a9*x*y**2 + a10*x**2*y + a11*x**2*z**2
+ a12*x*z + a13*x*z**2 + a14*x**2*z + a15*y**2*z**2
+ a16*y*z + a17*y*z**2 + a18*y**2*z + a19*x**2*y**2*z**2
+ a20*x**2*y**2*z + a21*x**2*y*z**2 + a22*x*y**2*z**2
+ a23*x**2*y*z + a24*x*y**2*z + a25*x*y*z**2 +a26*x*y*z
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 constant',
{'name': u'Coefficient1 Constant',
'pyname': u'coefficient1_constant',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 x**2',
{'name': u'Coefficient2 x**2',
'pyname': u'coefficient2_x2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 x',
{'name': u'Coefficient3 x',
'pyname': u'coefficient3_x',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 y**2',
{'name': u'Coefficient4 y**2',
'pyname': u'coefficient4_y2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient5 y',
{'name': u'Coefficient5 y',
'pyname': u'coefficient5_y',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient6 z**2',
{'name': u'Coefficient6 z**2',
'pyname': u'coefficient6_z2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient7 z',
{'name': u'Coefficient7 z',
'pyname': u'coefficient7_z',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient8 x**2*y**2',
{'name': u'Coefficient8 x**2*y**2',
'pyname': u'coefficient8_x2y2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient9 x*y',
{'name': u'Coefficient9 x*y',
'pyname': u'coefficient9_xy',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient10 x*y**2',
{'name': u'Coefficient10 x*y**2',
'pyname': u'coefficient10_xy2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient11 x**2*y',
{'name': u'Coefficient11 x**2*y',
'pyname': u'coefficient11_x2y',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient12 x**2*z**2',
{'name': u'Coefficient12 x**2*z**2',
'pyname': u'coefficient12_x2z2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient13 x*z',
{'name': u'Coefficient13 x*z',
'pyname': u'coefficient13_xz',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient14 x*z**2',
{'name': u'Coefficient14 x*z**2',
'pyname': u'coefficient14_xz2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient15 x**2*z',
{'name': u'Coefficient15 x**2*z',
'pyname': u'coefficient15_x2z',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient16 y**2*z**2',
{'name': u'Coefficient16 y**2*z**2',
'pyname': u'coefficient16_y2z2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient17 y*z',
{'name': u'Coefficient17 y*z',
'pyname': u'coefficient17_yz',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient18 y*z**2',
{'name': u'Coefficient18 y*z**2',
'pyname': u'coefficient18_yz2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient19 y**2*z',
{'name': u'Coefficient19 y**2*z',
'pyname': u'coefficient19_y2z',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient20 x**2*y**2*z**2',
{'name': u'Coefficient20 x**2*y**2*z**2',
'pyname': u'coefficient20_x2y2z2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient21 x**2*y**2*z',
{'name': u'Coefficient21 x**2*y**2*z',
'pyname': u'coefficient21_x2y2z',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient22 x**2*y*z**2',
{'name': u'Coefficient22 x**2*y*z**2',
'pyname': u'coefficient22_x2yz2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient23 x*y**2*z**2',
{'name': u'Coefficient23 x*y**2*z**2',
'pyname': u'coefficient23_xy2z2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient24 x**2*y*z',
{'name': u'Coefficient24 x**2*y*z',
'pyname': u'coefficient24_x2yz',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient25 x*y**2*z',
{'name': u'Coefficient25 x*y**2*z',
'pyname': u'coefficient25_xy2z',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient26 x*y*z**2',
{'name': u'Coefficient26 x*y*z**2',
'pyname': u'coefficient26_xyz2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient27 x*y*z',
{'name': u'Coefficient27 x*y*z',
'pyname': u'coefficient27_xyz',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of y',
{'name': u'Minimum Value of y',
'pyname': u'minimum_value_of_y',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of y',
{'name': u'Maximum Value of y',
'pyname': u'maximum_value_of_y',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of z',
{'name': u'Minimum Value of z',
'pyname': u'minimum_value_of_z',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of z',
{'name': u'Maximum Value of z',
'pyname': u'maximum_value_of_z',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for y',
{'name': u'Input Unit Type for Y',
'pyname': u'input_unit_type_for_y',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for z',
{'name': u'Input Unit Type for Z',
'pyname': u'input_unit_type_for_z',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:Triquadratic',
'pyname': u'CurveTriquadratic',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_constant(self):
"""field `Coefficient1 Constant`
Args:
value (float): value for IDD Field `Coefficient1 Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_constant` or None if not set
"""
return self["Coefficient1 Constant"]
@coefficient1_constant.setter
def coefficient1_constant(self, value=None):
"""Corresponds to IDD field `Coefficient1 Constant`"""
self["Coefficient1 Constant"] = value
@property
def coefficient2_x2(self):
"""field `Coefficient2 x**2`
Args:
value (float): value for IDD Field `Coefficient2 x**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_x2` or None if not set
"""
return self["Coefficient2 x**2"]
@coefficient2_x2.setter
def coefficient2_x2(self, value=None):
""" Corresponds to IDD field `Coefficient2 x**2`
"""
self["Coefficient2 x**2"] = value
@property
def coefficient3_x(self):
"""field `Coefficient3 x`
Args:
value (float): value for IDD Field `Coefficient3 x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_x` or None if not set
"""
return self["Coefficient3 x"]
@coefficient3_x.setter
def coefficient3_x(self, value=None):
"""Corresponds to IDD field `Coefficient3 x`"""
self["Coefficient3 x"] = value
@property
def coefficient4_y2(self):
"""field `Coefficient4 y**2`
Args:
value (float): value for IDD Field `Coefficient4 y**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_y2` or None if not set
"""
return self["Coefficient4 y**2"]
@coefficient4_y2.setter
def coefficient4_y2(self, value=None):
""" Corresponds to IDD field `Coefficient4 y**2`
"""
self["Coefficient4 y**2"] = value
@property
def coefficient5_y(self):
"""field `Coefficient5 y`
Args:
value (float): value for IDD Field `Coefficient5 y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient5_y` or None if not set
"""
return self["Coefficient5 y"]
@coefficient5_y.setter
def coefficient5_y(self, value=None):
"""Corresponds to IDD field `Coefficient5 y`"""
self["Coefficient5 y"] = value
@property
def coefficient6_z2(self):
"""field `Coefficient6 z**2`
Args:
value (float): value for IDD Field `Coefficient6 z**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient6_z2` or None if not set
"""
return self["Coefficient6 z**2"]
@coefficient6_z2.setter
def coefficient6_z2(self, value=None):
""" Corresponds to IDD field `Coefficient6 z**2`
"""
self["Coefficient6 z**2"] = value
@property
def coefficient7_z(self):
"""field `Coefficient7 z`
Args:
value (float): value for IDD Field `Coefficient7 z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient7_z` or None if not set
"""
return self["Coefficient7 z"]
@coefficient7_z.setter
def coefficient7_z(self, value=None):
"""Corresponds to IDD field `Coefficient7 z`"""
self["Coefficient7 z"] = value
@property
def coefficient8_x2y2(self):
"""field `Coefficient8 x**2*y**2`
Args:
value (float): value for IDD Field `Coefficient8 x**2*y**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient8_x2y2` or None if not set
"""
return self["Coefficient8 x**2*y**2"]
@coefficient8_x2y2.setter
def coefficient8_x2y2(self, value=None):
""" Corresponds to IDD field `Coefficient8 x**2*y**2`
"""
self["Coefficient8 x**2*y**2"] = value
@property
def coefficient9_xy(self):
"""field `Coefficient9 x*y`
Args:
value (float): value for IDD Field `Coefficient9 x*y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient9_xy` or None if not set
"""
return self["Coefficient9 x*y"]
@coefficient9_xy.setter
def coefficient9_xy(self, value=None):
""" Corresponds to IDD field `Coefficient9 x*y`
"""
self["Coefficient9 x*y"] = value
@property
def coefficient10_xy2(self):
"""field `Coefficient10 x*y**2`
Args:
value (float): value for IDD Field `Coefficient10 x*y**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient10_xy2` or None if not set
"""
return self["Coefficient10 x*y**2"]
@coefficient10_xy2.setter
def coefficient10_xy2(self, value=None):
""" Corresponds to IDD field `Coefficient10 x*y**2`
"""
self["Coefficient10 x*y**2"] = value
@property
def coefficient11_x2y(self):
"""field `Coefficient11 x**2*y`
Args:
value (float): value for IDD Field `Coefficient11 x**2*y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient11_x2y` or None if not set
"""
return self["Coefficient11 x**2*y"]
@coefficient11_x2y.setter
def coefficient11_x2y(self, value=None):
""" Corresponds to IDD field `Coefficient11 x**2*y`
"""
self["Coefficient11 x**2*y"] = value
@property
def coefficient12_x2z2(self):
"""field `Coefficient12 x**2*z**2`
Args:
value (float): value for IDD Field `Coefficient12 x**2*z**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient12_x2z2` or None if not set
"""
return self["Coefficient12 x**2*z**2"]
@coefficient12_x2z2.setter
def coefficient12_x2z2(self, value=None):
""" Corresponds to IDD field `Coefficient12 x**2*z**2`
"""
self["Coefficient12 x**2*z**2"] = value
@property
def coefficient13_xz(self):
"""field `Coefficient13 x*z`
Args:
value (float): value for IDD Field `Coefficient13 x*z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient13_xz` or None if not set
"""
return self["Coefficient13 x*z"]
@coefficient13_xz.setter
def coefficient13_xz(self, value=None):
""" Corresponds to IDD field `Coefficient13 x*z`
"""
self["Coefficient13 x*z"] = value
@property
def coefficient14_xz2(self):
"""field `Coefficient14 x*z**2`
Args:
value (float): value for IDD Field `Coefficient14 x*z**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient14_xz2` or None if not set
"""
return self["Coefficient14 x*z**2"]
@coefficient14_xz2.setter
def coefficient14_xz2(self, value=None):
""" Corresponds to IDD field `Coefficient14 x*z**2`
"""
self["Coefficient14 x*z**2"] = value
@property
def coefficient15_x2z(self):
"""field `Coefficient15 x**2*z`
Args:
value (float): value for IDD Field `Coefficient15 x**2*z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient15_x2z` or None if not set
"""
return self["Coefficient15 x**2*z"]
@coefficient15_x2z.setter
def coefficient15_x2z(self, value=None):
""" Corresponds to IDD field `Coefficient15 x**2*z`
"""
self["Coefficient15 x**2*z"] = value
@property
def coefficient16_y2z2(self):
"""field `Coefficient16 y**2*z**2`
Args:
value (float): value for IDD Field `Coefficient16 y**2*z**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient16_y2z2` or None if not set
"""
return self["Coefficient16 y**2*z**2"]
@coefficient16_y2z2.setter
def coefficient16_y2z2(self, value=None):
""" Corresponds to IDD field `Coefficient16 y**2*z**2`
"""
self["Coefficient16 y**2*z**2"] = value
@property
def coefficient17_yz(self):
"""field `Coefficient17 y*z`
Args:
value (float): value for IDD Field `Coefficient17 y*z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient17_yz` or None if not set
"""
return self["Coefficient17 y*z"]
@coefficient17_yz.setter
def coefficient17_yz(self, value=None):
""" Corresponds to IDD field `Coefficient17 y*z`
"""
self["Coefficient17 y*z"] = value
@property
def coefficient18_yz2(self):
"""field `Coefficient18 y*z**2`
Args:
value (float): value for IDD Field `Coefficient18 y*z**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient18_yz2` or None if not set
"""
return self["Coefficient18 y*z**2"]
@coefficient18_yz2.setter
def coefficient18_yz2(self, value=None):
""" Corresponds to IDD field `Coefficient18 y*z**2`
"""
self["Coefficient18 y*z**2"] = value
@property
def coefficient19_y2z(self):
"""field `Coefficient19 y**2*z`
Args:
value (float): value for IDD Field `Coefficient19 y**2*z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient19_y2z` or None if not set
"""
return self["Coefficient19 y**2*z"]
@coefficient19_y2z.setter
def coefficient19_y2z(self, value=None):
""" Corresponds to IDD field `Coefficient19 y**2*z`
"""
self["Coefficient19 y**2*z"] = value
@property
def coefficient20_x2y2z2(self):
"""field `Coefficient20 x**2*y**2*z**2`
Args:
value (float): value for IDD Field `Coefficient20 x**2*y**2*z**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient20_x2y2z2` or None if not set
"""
return self["Coefficient20 x**2*y**2*z**2"]
@coefficient20_x2y2z2.setter
def coefficient20_x2y2z2(self, value=None):
""" Corresponds to IDD field `Coefficient20 x**2*y**2*z**2`
"""
self["Coefficient20 x**2*y**2*z**2"] = value
@property
def coefficient21_x2y2z(self):
"""field `Coefficient21 x**2*y**2*z`
Args:
value (float): value for IDD Field `Coefficient21 x**2*y**2*z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient21_x2y2z` or None if not set
"""
return self["Coefficient21 x**2*y**2*z"]
@coefficient21_x2y2z.setter
def coefficient21_x2y2z(self, value=None):
""" Corresponds to IDD field `Coefficient21 x**2*y**2*z`
"""
self["Coefficient21 x**2*y**2*z"] = value
@property
def coefficient22_x2yz2(self):
"""field `Coefficient22 x**2*y*z**2`
Args:
value (float): value for IDD Field `Coefficient22 x**2*y*z**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient22_x2yz2` or None if not set
"""
return self["Coefficient22 x**2*y*z**2"]
@coefficient22_x2yz2.setter
def coefficient22_x2yz2(self, value=None):
""" Corresponds to IDD field `Coefficient22 x**2*y*z**2`
"""
self["Coefficient22 x**2*y*z**2"] = value
@property
def coefficient23_xy2z2(self):
"""field `Coefficient23 x*y**2*z**2`
Args:
value (float): value for IDD Field `Coefficient23 x*y**2*z**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient23_xy2z2` or None if not set
"""
return self["Coefficient23 x*y**2*z**2"]
@coefficient23_xy2z2.setter
def coefficient23_xy2z2(self, value=None):
""" Corresponds to IDD field `Coefficient23 x*y**2*z**2`
"""
self["Coefficient23 x*y**2*z**2"] = value
@property
def coefficient24_x2yz(self):
"""field `Coefficient24 x**2*y*z`
Args:
value (float): value for IDD Field `Coefficient24 x**2*y*z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient24_x2yz` or None if not set
"""
return self["Coefficient24 x**2*y*z"]
@coefficient24_x2yz.setter
def coefficient24_x2yz(self, value=None):
""" Corresponds to IDD field `Coefficient24 x**2*y*z`
"""
self["Coefficient24 x**2*y*z"] = value
@property
def coefficient25_xy2z(self):
"""field `Coefficient25 x*y**2*z`
Args:
value (float): value for IDD Field `Coefficient25 x*y**2*z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient25_xy2z` or None if not set
"""
return self["Coefficient25 x*y**2*z"]
@coefficient25_xy2z.setter
def coefficient25_xy2z(self, value=None):
""" Corresponds to IDD field `Coefficient25 x*y**2*z`
"""
self["Coefficient25 x*y**2*z"] = value
@property
def coefficient26_xyz2(self):
"""field `Coefficient26 x*y*z**2`
Args:
value (float): value for IDD Field `Coefficient26 x*y*z**2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient26_xyz2` or None if not set
"""
return self["Coefficient26 x*y*z**2"]
@coefficient26_xyz2.setter
def coefficient26_xyz2(self, value=None):
""" Corresponds to IDD field `Coefficient26 x*y*z**2`
"""
self["Coefficient26 x*y*z**2"] = value
@property
def coefficient27_xyz(self):
"""field `Coefficient27 x*y*z`
Args:
value (float): value for IDD Field `Coefficient27 x*y*z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient27_xyz` or None if not set
"""
return self["Coefficient27 x*y*z"]
@coefficient27_xyz.setter
def coefficient27_xyz(self, value=None):
""" Corresponds to IDD field `Coefficient27 x*y*z`
"""
self["Coefficient27 x*y*z"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_value_of_y(self):
"""field `Minimum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_y` or None if not set
"""
return self["Minimum Value of y"]
@minimum_value_of_y.setter
def minimum_value_of_y(self, value=None):
"""Corresponds to IDD field `Minimum Value of y`"""
self["Minimum Value of y"] = value
@property
def maximum_value_of_y(self):
"""field `Maximum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_y` or None if not set
"""
return self["Maximum Value of y"]
@maximum_value_of_y.setter
def maximum_value_of_y(self, value=None):
"""Corresponds to IDD field `Maximum Value of y`"""
self["Maximum Value of y"] = value
@property
def minimum_value_of_z(self):
"""field `Minimum Value of z`
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Minimum Value of z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_z` or None if not set
"""
return self["Minimum Value of z"]
@minimum_value_of_z.setter
def minimum_value_of_z(self, value=None):
"""Corresponds to IDD field `Minimum Value of z`"""
self["Minimum Value of z"] = value
@property
def maximum_value_of_z(self):
"""field `Maximum Value of z`
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Maximum Value of z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_z` or None if not set
"""
return self["Maximum Value of z"]
@maximum_value_of_z.setter
def maximum_value_of_z(self, value=None):
"""Corresponds to IDD field `Maximum Value of z`"""
self["Maximum Value of z"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A5`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A5`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def input_unit_type_for_y(self):
"""field `Input Unit Type for Y`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for Y`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_y` or None if not set
"""
return self["Input Unit Type for Y"]
@input_unit_type_for_y.setter
def input_unit_type_for_y(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for Y`"""
self["Input Unit Type for Y"] = value
@property
def input_unit_type_for_z(self):
"""field `Input Unit Type for Z`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for Z`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_z` or None if not set
"""
return self["Input Unit Type for Z"]
@input_unit_type_for_z.setter
def input_unit_type_for_z(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for Z`"""
self["Input Unit Type for Z"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveFunctionalPressureDrop(DataObject):
""" Corresponds to IDD object `Curve:Functional:PressureDrop`
Sets up curve information for minor loss and/or friction
calculations in plant pressure simulations
Expression: DeltaP = {K + f*(L/D)} * (rho * V^2) / 2
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'diameter',
{'name': u'Diameter',
'pyname': u'diameter',
'minimum>': 0.0,
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm'}),
(u'minor loss coefficient',
{'name': u'Minor Loss Coefficient',
'pyname': u'minor_loss_coefficient',
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'length',
{'name': u'Length',
'pyname': u'length',
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm'}),
(u'roughness',
{'name': u'Roughness',
'pyname': u'roughness',
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm'}),
(u'fixed friction factor',
{'name': u'Fixed Friction Factor',
'pyname': u'fixed_friction_factor',
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 5,
'name': u'Curve:Functional:PressureDrop',
'pyname': u'CurveFunctionalPressureDrop',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def diameter(self):
"""field `Diameter`
| "D" in above expression, used to also calculate local velocity
| Units: m
Args:
value (float): value for IDD Field `Diameter`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `diameter` or None if not set
"""
return self["Diameter"]
@diameter.setter
def diameter(self, value=None):
"""Corresponds to IDD field `Diameter`"""
self["Diameter"] = value
@property
def minor_loss_coefficient(self):
"""field `Minor Loss Coefficient`
| "K" in above expression
| Units: dimensionless
Args:
value (float): value for IDD Field `Minor Loss Coefficient`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minor_loss_coefficient` or None if not set
"""
return self["Minor Loss Coefficient"]
@minor_loss_coefficient.setter
def minor_loss_coefficient(self, value=None):
"""Corresponds to IDD field `Minor Loss Coefficient`"""
self["Minor Loss Coefficient"] = value
@property
def length(self):
"""field `Length`
| "L" in above expression
| Units: m
Args:
value (float): value for IDD Field `Length`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `length` or None if not set
"""
return self["Length"]
@length.setter
def length(self, value=None):
"""Corresponds to IDD field `Length`"""
self["Length"] = value
@property
def roughness(self):
"""field `Roughness`
| This will be used to calculate "f" from Moody-chart approximations
| Units: m
Args:
value (float): value for IDD Field `Roughness`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `roughness` or None if not set
"""
return self["Roughness"]
@roughness.setter
def roughness(self, value=None):
"""Corresponds to IDD field `Roughness`"""
self["Roughness"] = value
@property
def fixed_friction_factor(self):
"""field `Fixed Friction Factor`
| Optional way to set a constant value for "f", instead of using
| internal Moody-chart approximations
Args:
value (float): value for IDD Field `Fixed Friction Factor`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `fixed_friction_factor` or None if not set
"""
return self["Fixed Friction Factor"]
@fixed_friction_factor.setter
def fixed_friction_factor(self, value=None):
"""Corresponds to IDD field `Fixed Friction Factor`"""
self["Fixed Friction Factor"] = value
class CurveFanPressureRise(DataObject):
""" Corresponds to IDD object `Curve:FanPressureRise`
Special curve type with two independent variables.
Input for the fan total pressure rise curve consists of the curve name, the four
coefficients, and the maximum and minimum valid independent variable values. Optional
inputs for the curve minimum and maximum may be used to limit the output of the
performance curve.
curve = C1*Qfan**2+C2*Qfan+C3*Qfan*(Psm-Po)**0.5+C4*(Psm-Po)
Po assumed to be zero
See InputOut Reference for curve details
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 c1',
{'name': u'Coefficient1 C1',
'pyname': u'coefficient1_c1',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 c2',
{'name': u'Coefficient2 C2',
'pyname': u'coefficient2_c2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 c3',
{'name': u'Coefficient3 C3',
'pyname': u'coefficient3_c3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 c4',
{'name': u'Coefficient4 C4',
'pyname': u'coefficient4_c4',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of qfan',
{'name': u'Minimum Value of Qfan',
'pyname': u'minimum_value_of_qfan',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'maximum value of qfan',
{'name': u'Maximum Value of Qfan',
'pyname': u'maximum_value_of_qfan',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'minimum value of psm',
{'name': u'Minimum Value of Psm',
'pyname': u'minimum_value_of_psm',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'Pa'}),
(u'maximum value of psm',
{'name': u'Maximum Value of Psm',
'pyname': u'maximum_value_of_psm',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'Pa'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'Pa'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'Pa'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:FanPressureRise',
'pyname': u'CurveFanPressureRise',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_c1(self):
"""field `Coefficient1 C1`
Args:
value (float): value for IDD Field `Coefficient1 C1`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_c1` or None if not set
"""
return self["Coefficient1 C1"]
@coefficient1_c1.setter
def coefficient1_c1(self, value=None):
"""Corresponds to IDD field `Coefficient1 C1`"""
self["Coefficient1 C1"] = value
@property
def coefficient2_c2(self):
"""field `Coefficient2 C2`
Args:
value (float): value for IDD Field `Coefficient2 C2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_c2` or None if not set
"""
return self["Coefficient2 C2"]
@coefficient2_c2.setter
def coefficient2_c2(self, value=None):
"""Corresponds to IDD field `Coefficient2 C2`"""
self["Coefficient2 C2"] = value
@property
def coefficient3_c3(self):
"""field `Coefficient3 C3`
Args:
value (float): value for IDD Field `Coefficient3 C3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_c3` or None if not set
"""
return self["Coefficient3 C3"]
@coefficient3_c3.setter
def coefficient3_c3(self, value=None):
"""Corresponds to IDD field `Coefficient3 C3`"""
self["Coefficient3 C3"] = value
@property
def coefficient4_c4(self):
"""field `Coefficient4 C4`
Args:
value (float): value for IDD Field `Coefficient4 C4`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_c4` or None if not set
"""
return self["Coefficient4 C4"]
@coefficient4_c4.setter
def coefficient4_c4(self, value=None):
"""Corresponds to IDD field `Coefficient4 C4`"""
self["Coefficient4 C4"] = value
@property
def minimum_value_of_qfan(self):
"""field `Minimum Value of Qfan`
| Units: m3/s
Args:
value (float): value for IDD Field `Minimum Value of Qfan`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_qfan` or None if not set
"""
return self["Minimum Value of Qfan"]
@minimum_value_of_qfan.setter
def minimum_value_of_qfan(self, value=None):
"""Corresponds to IDD field `Minimum Value of Qfan`"""
self["Minimum Value of Qfan"] = value
@property
def maximum_value_of_qfan(self):
"""field `Maximum Value of Qfan`
| Units: m3/s
Args:
value (float): value for IDD Field `Maximum Value of Qfan`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_qfan` or None if not set
"""
return self["Maximum Value of Qfan"]
@maximum_value_of_qfan.setter
def maximum_value_of_qfan(self, value=None):
"""Corresponds to IDD field `Maximum Value of Qfan`"""
self["Maximum Value of Qfan"] = value
@property
def minimum_value_of_psm(self):
"""field `Minimum Value of Psm`
| Units: Pa
| IP-Units: Pa
Args:
value (float): value for IDD Field `Minimum Value of Psm`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_psm` or None if not set
"""
return self["Minimum Value of Psm"]
@minimum_value_of_psm.setter
def minimum_value_of_psm(self, value=None):
"""Corresponds to IDD field `Minimum Value of Psm`"""
self["Minimum Value of Psm"] = value
@property
def maximum_value_of_psm(self):
"""field `Maximum Value of Psm`
| Units: Pa
| IP-Units: Pa
Args:
value (float): value for IDD Field `Maximum Value of Psm`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_psm` or None if not set
"""
return self["Maximum Value of Psm"]
@maximum_value_of_psm.setter
def maximum_value_of_psm(self, value=None):
"""Corresponds to IDD field `Maximum Value of Psm`"""
self["Maximum Value of Psm"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units: Pa
| IP-Units: Pa
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units: Pa
| IP-Units: Pa
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
class CurveExponentialSkewNormal(DataObject):
""" Corresponds to IDD object `Curve:ExponentialSkewNormal`
Exponential-modified skew normal curve with one independent variable.
Input consists of the curve name, the four coefficients, and the maximum
and minimum valid independent variable values. Optional inputs for the curve minimum
and maximum may be used to limit the output of the performance curve.
curve = see Input Output Reference
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 c1',
{'name': u'Coefficient1 C1',
'pyname': u'coefficient1_c1',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 c2',
{'name': u'Coefficient2 C2',
'pyname': u'coefficient2_c2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 c3',
{'name': u'Coefficient3 C3',
'pyname': u'coefficient3_c3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 c4',
{'name': u'Coefficient4 C4',
'pyname': u'coefficient4_c4',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for x',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:ExponentialSkewNormal',
'pyname': u'CurveExponentialSkewNormal',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| See InputOut Reference for curve description
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_c1(self):
"""field `Coefficient1 C1`
Args:
value (float): value for IDD Field `Coefficient1 C1`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_c1` or None if not set
"""
return self["Coefficient1 C1"]
@coefficient1_c1.setter
def coefficient1_c1(self, value=None):
"""Corresponds to IDD field `Coefficient1 C1`"""
self["Coefficient1 C1"] = value
@property
def coefficient2_c2(self):
"""field `Coefficient2 C2`
Args:
value (float): value for IDD Field `Coefficient2 C2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_c2` or None if not set
"""
return self["Coefficient2 C2"]
@coefficient2_c2.setter
def coefficient2_c2(self, value=None):
"""Corresponds to IDD field `Coefficient2 C2`"""
self["Coefficient2 C2"] = value
@property
def coefficient3_c3(self):
"""field `Coefficient3 C3`
Args:
value (float): value for IDD Field `Coefficient3 C3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_c3` or None if not set
"""
return self["Coefficient3 C3"]
@coefficient3_c3.setter
def coefficient3_c3(self, value=None):
"""Corresponds to IDD field `Coefficient3 C3`"""
self["Coefficient3 C3"] = value
@property
def coefficient4_c4(self):
"""field `Coefficient4 C4`
Args:
value (float): value for IDD Field `Coefficient4 C4`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_c4` or None if not set
"""
return self["Coefficient4 C4"]
@coefficient4_c4.setter
def coefficient4_c4(self, value=None):
"""Corresponds to IDD field `Coefficient4 C4`"""
self["Coefficient4 C4"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for x`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for x`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for x"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for x`"""
self["Input Unit Type for x"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveSigmoid(DataObject):
""" Corresponds to IDD object `Curve:Sigmoid`
Sigmoid curve with one independent variable.
Input consists of the curve name, the five coefficients, and the maximum and minimum
valid independent variable values. Optional inputs for the curve minimum and maximum
may be used to limit the output of the performance curve.
curve = C1+C2/[1+exp((C3-x)/C4)]**C5
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 c1',
{'name': u'Coefficient1 C1',
'pyname': u'coefficient1_c1',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 c2',
{'name': u'Coefficient2 C2',
'pyname': u'coefficient2_c2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 c3',
{'name': u'Coefficient3 C3',
'pyname': u'coefficient3_c3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 c4',
{'name': u'Coefficient4 C4',
'pyname': u'coefficient4_c4',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient5 c5',
{'name': u'Coefficient5 C5',
'pyname': u'coefficient5_c5',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for x',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:Sigmoid',
'pyname': u'CurveSigmoid',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| See InputOut Reference for curve description
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_c1(self):
"""field `Coefficient1 C1`
Args:
value (float): value for IDD Field `Coefficient1 C1`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_c1` or None if not set
"""
return self["Coefficient1 C1"]
@coefficient1_c1.setter
def coefficient1_c1(self, value=None):
"""Corresponds to IDD field `Coefficient1 C1`"""
self["Coefficient1 C1"] = value
@property
def coefficient2_c2(self):
"""field `Coefficient2 C2`
Args:
value (float): value for IDD Field `Coefficient2 C2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_c2` or None if not set
"""
return self["Coefficient2 C2"]
@coefficient2_c2.setter
def coefficient2_c2(self, value=None):
"""Corresponds to IDD field `Coefficient2 C2`"""
self["Coefficient2 C2"] = value
@property
def coefficient3_c3(self):
"""field `Coefficient3 C3`
Args:
value (float): value for IDD Field `Coefficient3 C3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_c3` or None if not set
"""
return self["Coefficient3 C3"]
@coefficient3_c3.setter
def coefficient3_c3(self, value=None):
"""Corresponds to IDD field `Coefficient3 C3`"""
self["Coefficient3 C3"] = value
@property
def coefficient4_c4(self):
"""field `Coefficient4 C4`
Args:
value (float): value for IDD Field `Coefficient4 C4`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_c4` or None if not set
"""
return self["Coefficient4 C4"]
@coefficient4_c4.setter
def coefficient4_c4(self, value=None):
"""Corresponds to IDD field `Coefficient4 C4`"""
self["Coefficient4 C4"] = value
@property
def coefficient5_c5(self):
"""field `Coefficient5 C5`
Args:
value (float): value for IDD Field `Coefficient5 C5`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient5_c5` or None if not set
"""
return self["Coefficient5 C5"]
@coefficient5_c5.setter
def coefficient5_c5(self, value=None):
"""Corresponds to IDD field `Coefficient5 C5`"""
self["Coefficient5 C5"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for x`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for x`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for x"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for x`"""
self["Input Unit Type for x"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveRectangularHyperbola1(DataObject):
""" Corresponds to IDD object `Curve:RectangularHyperbola1`
Rectangular hyperbola type 1 curve with one independent variable.
Input consists of the curve name, the three coefficients, and the maximum and
minimum valid independent variable values. Optional inputs for the curve minimum and
maximum may be used to limit the output of the performance curve.
curve = ((C1*x)/(C2+x))+C3
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 c1',
{'name': u'Coefficient1 C1',
'pyname': u'coefficient1_c1',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 c2',
{'name': u'Coefficient2 C2',
'pyname': u'coefficient2_c2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 c3',
{'name': u'Coefficient3 C3',
'pyname': u'coefficient3_c3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for x',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:RectangularHyperbola1',
'pyname': u'CurveRectangularHyperbola1',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_c1(self):
"""field `Coefficient1 C1`
Args:
value (float): value for IDD Field `Coefficient1 C1`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_c1` or None if not set
"""
return self["Coefficient1 C1"]
@coefficient1_c1.setter
def coefficient1_c1(self, value=None):
"""Corresponds to IDD field `Coefficient1 C1`"""
self["Coefficient1 C1"] = value
@property
def coefficient2_c2(self):
"""field `Coefficient2 C2`
Args:
value (float): value for IDD Field `Coefficient2 C2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_c2` or None if not set
"""
return self["Coefficient2 C2"]
@coefficient2_c2.setter
def coefficient2_c2(self, value=None):
"""Corresponds to IDD field `Coefficient2 C2`"""
self["Coefficient2 C2"] = value
@property
def coefficient3_c3(self):
"""field `Coefficient3 C3`
Args:
value (float): value for IDD Field `Coefficient3 C3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_c3` or None if not set
"""
return self["Coefficient3 C3"]
@coefficient3_c3.setter
def coefficient3_c3(self, value=None):
"""Corresponds to IDD field `Coefficient3 C3`"""
self["Coefficient3 C3"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for x`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for x`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for x"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for x`"""
self["Input Unit Type for x"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveRectangularHyperbola2(DataObject):
""" Corresponds to IDD object `Curve:RectangularHyperbola2`
Rectangular hyperbola type 2 curve with one independent variable.
Input consists of the curve name, the three coefficients, and the maximum and
minimum valid independent variable values. Optional inputs for the curve minimum and
maximum may be used to limit the output of the performance curve.
curve = ((C1*x)/(C2+x))+(C3*x)
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 c1',
{'name': u'Coefficient1 C1',
'pyname': u'coefficient1_c1',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 c2',
{'name': u'Coefficient2 C2',
'pyname': u'coefficient2_c2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 c3',
{'name': u'Coefficient3 C3',
'pyname': u'coefficient3_c3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for x',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:RectangularHyperbola2',
'pyname': u'CurveRectangularHyperbola2',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_c1(self):
"""field `Coefficient1 C1`
Args:
value (float): value for IDD Field `Coefficient1 C1`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_c1` or None if not set
"""
return self["Coefficient1 C1"]
@coefficient1_c1.setter
def coefficient1_c1(self, value=None):
"""Corresponds to IDD field `Coefficient1 C1`"""
self["Coefficient1 C1"] = value
@property
def coefficient2_c2(self):
"""field `Coefficient2 C2`
Args:
value (float): value for IDD Field `Coefficient2 C2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_c2` or None if not set
"""
return self["Coefficient2 C2"]
@coefficient2_c2.setter
def coefficient2_c2(self, value=None):
"""Corresponds to IDD field `Coefficient2 C2`"""
self["Coefficient2 C2"] = value
@property
def coefficient3_c3(self):
"""field `Coefficient3 C3`
Args:
value (float): value for IDD Field `Coefficient3 C3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_c3` or None if not set
"""
return self["Coefficient3 C3"]
@coefficient3_c3.setter
def coefficient3_c3(self, value=None):
"""Corresponds to IDD field `Coefficient3 C3`"""
self["Coefficient3 C3"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for x`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for x`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for x"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for x`"""
self["Input Unit Type for x"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveExponentialDecay(DataObject):
""" Corresponds to IDD object `Curve:ExponentialDecay`
Exponential decay curve with one independent variable.
Input consists of the curve name, the three coefficients, and the maximum and minimum
valid independent variable values. Optional inputs for the curve minimum and
maximum may be used to limit the output of the performance curve.
curve = C1+C2*exp(C3*x)
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 c1',
{'name': u'Coefficient1 C1',
'pyname': u'coefficient1_c1',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 c2',
{'name': u'Coefficient2 C2',
'pyname': u'coefficient2_c2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 c3',
{'name': u'Coefficient3 C3',
'pyname': u'coefficient3_c3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for x',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:ExponentialDecay',
'pyname': u'CurveExponentialDecay',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_c1(self):
"""field `Coefficient1 C1`
Args:
value (float): value for IDD Field `Coefficient1 C1`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_c1` or None if not set
"""
return self["Coefficient1 C1"]
@coefficient1_c1.setter
def coefficient1_c1(self, value=None):
"""Corresponds to IDD field `Coefficient1 C1`"""
self["Coefficient1 C1"] = value
@property
def coefficient2_c2(self):
"""field `Coefficient2 C2`
Args:
value (float): value for IDD Field `Coefficient2 C2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_c2` or None if not set
"""
return self["Coefficient2 C2"]
@coefficient2_c2.setter
def coefficient2_c2(self, value=None):
"""Corresponds to IDD field `Coefficient2 C2`"""
self["Coefficient2 C2"] = value
@property
def coefficient3_c3(self):
"""field `Coefficient3 C3`
Args:
value (float): value for IDD Field `Coefficient3 C3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_c3` or None if not set
"""
return self["Coefficient3 C3"]
@coefficient3_c3.setter
def coefficient3_c3(self, value=None):
"""Corresponds to IDD field `Coefficient3 C3`"""
self["Coefficient3 C3"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for x`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for x`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for x"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for x`"""
self["Input Unit Type for x"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveDoubleExponentialDecay(DataObject):
""" Corresponds to IDD object `Curve:DoubleExponentialDecay`
Double exponential decay curve with one independent variable.
Input consists of the curve name, the five coefficients, and the maximum and minimum
valid independent variable values. Optional inputs for the curve minimum and
maximum may be used to limit the output of the performance curve.
curve = C1+C2*exp(C3*x)+C4*exp(C5*x)
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 c1',
{'name': u'Coefficient1 C1',
'pyname': u'coefficient1_c1',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 c2',
{'name': u'Coefficient2 C2',
'pyname': u'coefficient2_c2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 c3',
{'name': u'Coefficient3 C3',
'pyname': u'coefficient3_c3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 c4',
{'name': u'Coefficient3 C4',
'pyname': u'coefficient3_c4',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 c5',
{'name': u'Coefficient3 C5',
'pyname': u'coefficient3_c5',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for x',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:DoubleExponentialDecay',
'pyname': u'CurveDoubleExponentialDecay',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_c1(self):
"""field `Coefficient1 C1`
Args:
value (float): value for IDD Field `Coefficient1 C1`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_c1` or None if not set
"""
return self["Coefficient1 C1"]
@coefficient1_c1.setter
def coefficient1_c1(self, value=None):
"""Corresponds to IDD field `Coefficient1 C1`"""
self["Coefficient1 C1"] = value
@property
def coefficient2_c2(self):
"""field `Coefficient2 C2`
Args:
value (float): value for IDD Field `Coefficient2 C2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_c2` or None if not set
"""
return self["Coefficient2 C2"]
@coefficient2_c2.setter
def coefficient2_c2(self, value=None):
"""Corresponds to IDD field `Coefficient2 C2`"""
self["Coefficient2 C2"] = value
@property
def coefficient3_c3(self):
"""field `Coefficient3 C3`
Args:
value (float): value for IDD Field `Coefficient3 C3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_c3` or None if not set
"""
return self["Coefficient3 C3"]
@coefficient3_c3.setter
def coefficient3_c3(self, value=None):
"""Corresponds to IDD field `Coefficient3 C3`"""
self["Coefficient3 C3"] = value
@property
def coefficient3_c4(self):
"""field `Coefficient3 C4`
Args:
value (float): value for IDD Field `Coefficient3 C4`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_c4` or None if not set
"""
return self["Coefficient3 C4"]
@coefficient3_c4.setter
def coefficient3_c4(self, value=None):
"""Corresponds to IDD field `Coefficient3 C4`"""
self["Coefficient3 C4"] = value
@property
def coefficient3_c5(self):
"""field `Coefficient3 C5`
Args:
value (float): value for IDD Field `Coefficient3 C5`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_c5` or None if not set
"""
return self["Coefficient3 C5"]
@coefficient3_c5.setter
def coefficient3_c5(self, value=None):
"""Corresponds to IDD field `Coefficient3 C5`"""
self["Coefficient3 C5"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for x`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for x`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for x"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for x`"""
self["Input Unit Type for x"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
class CurveChillerPartLoadWithLift(DataObject):
""" Corresponds to IDD object `Curve:ChillerPartLoadWithLift`
This chiller part-load performance curve has three independent variables.
Input consists of the curve name, the twelve coefficients, and the maximum
and minimum valid independent variable values. Optional inputs for the curve minimum
and maximum may be used to limit the output of the performance curve.
curve = C1 + C2*x + C3*x**2 + C4*y + C5*y**2 + C6*x*y + C7*x**3
+ C8*y**3 + C9*x**2*y + C10*x*y**2 + C11*x**2*y**2 + C12*z*y**3
x = dT* = normalized fractional Lift = dT / dTref
y = PLR = part load ratio (cooling load/steady state capacity)
z = Tdev* = normalized Tdev = Tdev / dTref
Where:
dT = Lift = Leaving Condenser Water Temperature - Leaving Chilled Water Temperature
dTref = dT at the reference condition
Tdev = Leaving Chilled Water Temperature - Reference Chilled Water Temperature
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'coefficient1 c1',
{'name': u'Coefficient1 C1',
'pyname': u'coefficient1_c1',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient2 c2',
{'name': u'Coefficient2 C2',
'pyname': u'coefficient2_c2',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient3 c3',
{'name': u'Coefficient3 C3',
'pyname': u'coefficient3_c3',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient4 c4',
{'name': u'Coefficient4 C4',
'pyname': u'coefficient4_c4',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient5 c5',
{'name': u'Coefficient5 C5',
'pyname': u'coefficient5_c5',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient6 c6',
{'name': u'Coefficient6 C6',
'pyname': u'coefficient6_c6',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient7 c7',
{'name': u'Coefficient7 C7',
'pyname': u'coefficient7_c7',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient8 c8',
{'name': u'Coefficient8 C8',
'pyname': u'coefficient8_c8',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient9 c9',
{'name': u'Coefficient9 C9',
'pyname': u'coefficient9_c9',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient10 c10',
{'name': u'Coefficient10 C10',
'pyname': u'coefficient10_c10',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient11 c11',
{'name': u'Coefficient11 C11',
'pyname': u'coefficient11_c11',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'coefficient12 c12',
{'name': u'Coefficient12 C12',
'pyname': u'coefficient12_c12',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of x',
{'name': u'Minimum Value of x',
'pyname': u'minimum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of x',
'pyname': u'maximum_value_of_x',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of y',
{'name': u'Minimum Value of y',
'pyname': u'minimum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of y',
{'name': u'Maximum Value of y',
'pyname': u'maximum_value_of_y',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of z',
{'name': u'Minimum Value of z',
'pyname': u'minimum_value_of_z',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of z',
{'name': u'Maximum Value of z',
'pyname': u'maximum_value_of_z',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum curve output',
{'name': u'Minimum Curve Output',
'pyname': u'minimum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum curve output',
{'name': u'Maximum Curve Output',
'pyname': u'maximum_curve_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for x',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for y',
{'name': u'Input Unit Type for y',
'pyname': u'input_unit_type_for_y',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for z',
{'name': u'Input Unit Type for z',
'pyname': u'input_unit_type_for_z',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Performance Curves',
'min-fields': 0,
'name': u'Curve:ChillerPartLoadWithLift',
'pyname': u'CurveChillerPartLoadWithLift',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def coefficient1_c1(self):
"""field `Coefficient1 C1`
Args:
value (float): value for IDD Field `Coefficient1 C1`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient1_c1` or None if not set
"""
return self["Coefficient1 C1"]
@coefficient1_c1.setter
def coefficient1_c1(self, value=None):
"""Corresponds to IDD field `Coefficient1 C1`"""
self["Coefficient1 C1"] = value
@property
def coefficient2_c2(self):
"""field `Coefficient2 C2`
Args:
value (float): value for IDD Field `Coefficient2 C2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient2_c2` or None if not set
"""
return self["Coefficient2 C2"]
@coefficient2_c2.setter
def coefficient2_c2(self, value=None):
"""Corresponds to IDD field `Coefficient2 C2`"""
self["Coefficient2 C2"] = value
@property
def coefficient3_c3(self):
"""field `Coefficient3 C3`
Args:
value (float): value for IDD Field `Coefficient3 C3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient3_c3` or None if not set
"""
return self["Coefficient3 C3"]
@coefficient3_c3.setter
def coefficient3_c3(self, value=None):
"""Corresponds to IDD field `Coefficient3 C3`"""
self["Coefficient3 C3"] = value
@property
def coefficient4_c4(self):
"""field `Coefficient4 C4`
Args:
value (float): value for IDD Field `Coefficient4 C4`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient4_c4` or None if not set
"""
return self["Coefficient4 C4"]
@coefficient4_c4.setter
def coefficient4_c4(self, value=None):
"""Corresponds to IDD field `Coefficient4 C4`"""
self["Coefficient4 C4"] = value
@property
def coefficient5_c5(self):
"""field `Coefficient5 C5`
Args:
value (float): value for IDD Field `Coefficient5 C5`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient5_c5` or None if not set
"""
return self["Coefficient5 C5"]
@coefficient5_c5.setter
def coefficient5_c5(self, value=None):
"""Corresponds to IDD field `Coefficient5 C5`"""
self["Coefficient5 C5"] = value
@property
def coefficient6_c6(self):
"""field `Coefficient6 C6`
Args:
value (float): value for IDD Field `Coefficient6 C6`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient6_c6` or None if not set
"""
return self["Coefficient6 C6"]
@coefficient6_c6.setter
def coefficient6_c6(self, value=None):
"""Corresponds to IDD field `Coefficient6 C6`"""
self["Coefficient6 C6"] = value
@property
def coefficient7_c7(self):
"""field `Coefficient7 C7`
Args:
value (float): value for IDD Field `Coefficient7 C7`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient7_c7` or None if not set
"""
return self["Coefficient7 C7"]
@coefficient7_c7.setter
def coefficient7_c7(self, value=None):
"""Corresponds to IDD field `Coefficient7 C7`"""
self["Coefficient7 C7"] = value
@property
def coefficient8_c8(self):
"""field `Coefficient8 C8`
Args:
value (float): value for IDD Field `Coefficient8 C8`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient8_c8` or None if not set
"""
return self["Coefficient8 C8"]
@coefficient8_c8.setter
def coefficient8_c8(self, value=None):
"""Corresponds to IDD field `Coefficient8 C8`"""
self["Coefficient8 C8"] = value
@property
def coefficient9_c9(self):
"""field `Coefficient9 C9`
Args:
value (float): value for IDD Field `Coefficient9 C9`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient9_c9` or None if not set
"""
return self["Coefficient9 C9"]
@coefficient9_c9.setter
def coefficient9_c9(self, value=None):
"""Corresponds to IDD field `Coefficient9 C9`"""
self["Coefficient9 C9"] = value
@property
def coefficient10_c10(self):
"""field `Coefficient10 C10`
Args:
value (float): value for IDD Field `Coefficient10 C10`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient10_c10` or None if not set
"""
return self["Coefficient10 C10"]
@coefficient10_c10.setter
def coefficient10_c10(self, value=None):
"""Corresponds to IDD field `Coefficient10 C10`"""
self["Coefficient10 C10"] = value
@property
def coefficient11_c11(self):
"""field `Coefficient11 C11`
Args:
value (float): value for IDD Field `Coefficient11 C11`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient11_c11` or None if not set
"""
return self["Coefficient11 C11"]
@coefficient11_c11.setter
def coefficient11_c11(self, value=None):
"""Corresponds to IDD field `Coefficient11 C11`"""
self["Coefficient11 C11"] = value
@property
def coefficient12_c12(self):
"""field `Coefficient12 C12`
Args:
value (float): value for IDD Field `Coefficient12 C12`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `coefficient12_c12` or None if not set
"""
return self["Coefficient12 C12"]
@coefficient12_c12.setter
def coefficient12_c12(self, value=None):
"""Corresponds to IDD field `Coefficient12 C12`"""
self["Coefficient12 C12"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Minimum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of x"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of x`"""
self["Minimum Value of x"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of x`
| Units are based on field `A2`
Args:
value (float): value for IDD Field `Maximum Value of x`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of x"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of x`"""
self["Maximum Value of x"] = value
@property
def minimum_value_of_y(self):
"""field `Minimum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Minimum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_y` or None if not set
"""
return self["Minimum Value of y"]
@minimum_value_of_y.setter
def minimum_value_of_y(self, value=None):
"""Corresponds to IDD field `Minimum Value of y`"""
self["Minimum Value of y"] = value
@property
def maximum_value_of_y(self):
"""field `Maximum Value of y`
| Units are based on field `A3`
Args:
value (float): value for IDD Field `Maximum Value of y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_y` or None if not set
"""
return self["Maximum Value of y"]
@maximum_value_of_y.setter
def maximum_value_of_y(self, value=None):
"""Corresponds to IDD field `Maximum Value of y`"""
self["Maximum Value of y"] = value
@property
def minimum_value_of_z(self):
"""field `Minimum Value of z`
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Minimum Value of z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_z` or None if not set
"""
return self["Minimum Value of z"]
@minimum_value_of_z.setter
def minimum_value_of_z(self, value=None):
"""Corresponds to IDD field `Minimum Value of z`"""
self["Minimum Value of z"] = value
@property
def maximum_value_of_z(self):
"""field `Maximum Value of z`
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Maximum Value of z`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_z` or None if not set
"""
return self["Maximum Value of z"]
@maximum_value_of_z.setter
def maximum_value_of_z(self, value=None):
"""Corresponds to IDD field `Maximum Value of z`"""
self["Maximum Value of z"] = value
@property
def minimum_curve_output(self):
"""field `Minimum Curve Output`
| Specify the minimum value calculated by this curve object
| Units are based on field `A5`
Args:
value (float): value for IDD Field `Minimum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_curve_output` or None if not set
"""
return self["Minimum Curve Output"]
@minimum_curve_output.setter
def minimum_curve_output(self, value=None):
"""Corresponds to IDD field `Minimum Curve Output`"""
self["Minimum Curve Output"] = value
@property
def maximum_curve_output(self):
"""field `Maximum Curve Output`
| Specify the maximum value calculated by this curve object
| Units are based on field `A5`
Args:
value (float): value for IDD Field `Maximum Curve Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_curve_output` or None if not set
"""
return self["Maximum Curve Output"]
@maximum_curve_output.setter
def maximum_curve_output(self, value=None):
"""Corresponds to IDD field `Maximum Curve Output`"""
self["Maximum Curve Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for x`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for x`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for x"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for x`"""
self["Input Unit Type for x"] = value
@property
def input_unit_type_for_y(self):
"""field `Input Unit Type for y`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for y`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_y` or None if not set
"""
return self["Input Unit Type for y"]
@input_unit_type_for_y.setter
def input_unit_type_for_y(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for y`"""
self["Input Unit Type for y"] = value
@property
def input_unit_type_for_z(self):
"""field `Input Unit Type for z`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for z`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_z` or None if not set
"""
return self["Input Unit Type for z"]
@input_unit_type_for_z.setter
def input_unit_type_for_z(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for z`"""
self["Input Unit Type for z"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
| apache-2.0 | -385,419,006,265,597,700 | 35.583889 | 110 | 0.438032 | false | 4.882726 | false | false | false |
jeromecc/doctoctocbot | src/ip/host.py | 1 | 1118 | import socket
import logging
from django.core.cache import cache
from django.conf import settings
logger = logging.getLogger(__name__)
def hostname_ip(hostname):
try:
host_ip = socket.gethostbyname(hostname)
logger.debug(f"Hostname : {hostname}, IP: {host_ip}")
return host_ip
except:
logger.debug(f"Unable to get IP for {hostname}")
return None
def ip_yours(request):
logger.debug(request.META)
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
logger.debug(f'HTTP_X_FORWARDED_FOR: {x_forwarded_for}')
ip = x_forwarded_for.split(',')[0].strip()
elif request.META.get('HTTP_X_REAL_IP'):
ip = request.META.get('HTTP_X_REAL_IP')
logger.debug(f'HTTP_X_REAL_IP: {ip}')
else:
ip = request.META.get('REMOTE_ADDR')
logger.debug(f'REMOTE_ADDR: {ip}')
return ip
def set_discourse_ip_cache():
discourse_ip = hostname_ip(settings.DISCOURSE_HOSTNAME)
cache.set(settings.DISCOURSE_IP_CACHE_KEY, discourse_ip, settings.DISCOURSE_IP_CACHE_TTL)
return discourse_ip | mpl-2.0 | 3,690,792,907,844,767,000 | 31.911765 | 93 | 0.655635 | false | 3.25 | false | false | false |
trondth/quizbot | Question.py | 1 | 3425 | #import GlobalConfig as conf
import glob
import re
import sqlite3
import string
import datetime
import time
from random import randint # TODO kan slettes
from random import choice
VERBOSE = True
class Question(object):
def __init__(self, cfg):
"""
Set up a new question object.
@type cfg: dict
@param cfg: Config data
"""
self.cfg = cfg
self.data = {}
self.status = 'new'
self.regex = None
self.qid = 1 # TODO
self.active = False # TODO control if question is active
def getStatus(self):
"""
@return: Status in question loop
"""
return self.status
def getID(self):
"""
TODO
"""
return 1
def reset(self):
"""
Resets status for question.
"""
self.status = 'new'
def __str__(self):
"""
@return: Question string
"""
return self.data['Question']
def askQuestion(self, c, n):
"""
Returns formatted question for channel.
@type c: string
@param c: channel
@type n: number
@param n: Question number
"""
self.status = 0
return [(0, c, "Question {}: {}".format(n, self.__str__()))]
def tip_n(self):
"""
@return: Number of possible tips
"""
tmp = len(self.data['Tip'])
if tmp > self.cfg['max_tips']:
return self.cfg['max_tips']
return len(self.data['Tip'])
def giveTip(self):
"""
@return: Next tip if exist, else returns correct answer.
"""
if VERBOSE: print("Len-tip: {}".format(len(self.data['Tip'])))
if VERBOSE: print("give tip ... {}".format(self.status))
if self.tip_n() > self.status + 1:
self.status += 1
return self.data['Tip'][self.status - 1]
else:
self.status = 'finished'
return self.data['Answer']
# return self.data['Tip']
def stringToQuestion(self, qstring, source):
"""
Creates question from string.
@type qstring: string
@param qstring: Question formatted as string
@type source: string
@param source: Source name of string
"""
#print("stringToQuestion: {}".format(qstring))
tmp = qstring.splitlines()
self.data = dict([l.strip() for l in line.split(':',1)] for line in tmp if line != '' and line[0:3] != 'Tip')
self.data['Tip'] = [ line[4:].strip() for line in tmp if line[0:3] == 'Tip']
if len(self.data['Tip']) == 0:
self.data['Tip'] = self.createTip(self.data['Answer'])
self.data['Source'] = source
if 'Regexp' in self.data:
self.regex = re.compile(self.data['Regexp'], re.IGNORECASE)
else:
self.regex = re.compile(self.data['Answer'], re.IGNORECASE)
#print self.data
def createTip(self, qstring):
"""
Creates tips.
TODO: Improve tips - ignore whitespace.
@return: list of tips.
"""
tmp = []
i = 0
while i < len(qstring) and i < self.cfg['tip_freq']:
tmp.append(''.join(c if (j-i) % self.cfg['tip_freq'] == 0 or c == ' ' else '.' for j,c in enumerate(qstring)))
i += 1
#print tmp
return tmp[:-1]
| gpl-3.0 | 4,289,356,896,538,422,300 | 26.4 | 122 | 0.515912 | false | 3.982558 | false | false | false |
amirajdhawan/management | hw0plot.py | 2 | 1196 | #!/usr/bin/env python
"""
Plot histograms of the HW0 self-assessment results.
"""
import yaml
import numpy as np
import matplotlib.pyplot as plt
def gather_scores(recs, name):
"""Gather a set of 1-5 scores from student response records
Args:
recs: list of student responses
name: name of self-assessment score to consider
Returns:
length-5 histogram of how many self-assessed as levels 1-5
"""
scores = np.zeros(5)
for rec in recs:
if name in rec:
scores[int(rec[name]-1)] += 1
return scores
def score_plot(recs, name):
"""Produce a histogram plot file for a HW0 score.
Args:
recs: list of student responses
name: name of self-assessment score to consider
"""
ind = np.arange(5)+1
scores = gather_scores(recs, name)
plt.figure()
plt.bar(ind-0.4, scores, 0.8)
plt.title(name)
plt.savefig("hw0-{0}.pdf".format(name))
if __name__ == "__main__":
attrs = ['git', 'shell', 'c', 'python', 'architecture',
'concurrency', 'numerics']
with open("hw0.yml", "r") as f:
recs = yaml.load(f)
for attr in attrs:
score_plot(recs, attr)
| mit | -6,631,796,793,994,061,000 | 22.45098 | 66 | 0.605351 | false | 3.538462 | false | false | false |
somia/pgcs | pgcs/html/diff.py | 1 | 9584 | import difflib
import pgcs.core.data
import pgcs.core.diff
core = pgcs.core
from . import tags
database_objects = None
def get_colors(values):
groups = {}
for value, group in values:
if value is not None:
assert group >= 0
count = groups.get(group, 0)
groups[group] = count + 1
else:
assert group == -1
def get_sorting(item):
group, count = item
return -count
def get_group(item):
group, count = item
return group
return [get_group(i) for i in sorted(groups.iteritems(), key=get_sorting)]
def gen_columns(parent, diff):
colors = get_colors(diff.values)
span = parent.span["columns"]
for column, (value, group) in enumerate(diff.values):
classes = ["column-%d" % column]
content = ""
if value is None:
classes.append("miss")
else:
classes.append("have")
color = colors.index(group)
classes.append("color-%d" % color)
if isinstance(value, core.data.Table):
if value.has_content is core.data.unknown:
content = "?"
elif value.has_content:
content = "1"
else:
content = "0"
span.span[classes].div[:] = content
def gen_named_object_list(parent, diff, name=None):
if diff:
element = parent.div["list"]
if name:
element.div["head"].span["name"][:] = name
for entry in diff.entries:
kind, func = diff_types[type(entry.diff)]
div = element.div["entry"]
if entry.value:
div.div["expander"][:] = "+"
div.span["type"][:] = kind
div.span["name"][:] = entry.name
gen_columns(div, entry.value)
if entry.value:
children = div.div["children"]
func(children, entry.diff)
def gen_value(parent, diff, name, is_list=False):
if diff:
cls = "value"
if is_list:
cls = "list"
element = parent.div[cls]
head = element.div["head"]
head.span["name"][:] = name
table = element.table
obis_by_group = []
dbis_by_group = []
for group in xrange(diff.groups):
obis = []
obis_by_group.append(obis)
dbis = []
dbis_by_group.append(dbis)
for i, (o, g) in enumerate(diff.values):
if g == group:
obis.append(i)
dbis.append(i)
colors = get_colors(diff.values)
tr = table.tr
for color, group in enumerate(colors):
dbis = dbis_by_group[group]
dbns = [database_objects[i].get_name() for i in dbis]
tr.th["color-%d" % color].div[:] = " ".join(dbns)
def listlen(l):
if l is None:
return 0
else:
return len(l)
if is_list:
if len(colors) == 2:
lists = [diff.lists[obis_by_group[g][0]] for g in colors]
gen_2column(table, *lists)
else:
for i in xrange(max([listlen(l) for l in diff.lists])):
tr = table.tr
for group in colors:
obi = obis_by_group[group][0]
lis = diff.lists[obi]
td = tr.td
if i < len(lis):
td.div[:] = dump_column(lis[i])
elif isinstance(diff, core.diff.ObjectValue):
tr = table.tr
for group in colors:
obi = obis_by_group[group][0]
obj = diff.objects[obi]
tr.td.div[:] = ".".join(obj.flatten())
else:
tr = table.tr
for group in colors:
obi = obis_by_group[group][0]
val, grp = diff.values[obi]
try:
content = unicode(val)
except:
content = "?"
tr.td.div[:] = content
def gen_ordered_object_list(parent, diff, name):
gen_value(parent, diff, name, True)
def dump_column(obj):
s = "%s %s" % (obj.name, obj.type.name)
if obj.notnull:
s += " notnull"
if obj.default:
s += " %s" % obj.default
return s
class NamedHash(object):
def __init__(self, object):
self.object = object
def __hash__(self):
return hash(self.object.name)
def __eq__(self, other):
return self.object.name == other.object.name
def gen_2column(table, seq1, seq2):
hash1 = [NamedHash(o) for o in seq1]
hash2 = [NamedHash(o) for o in seq2]
match = difflib.SequenceMatcher(a=hash1, b=hash2)
for tag, i1, i2, j1, j2 in match.get_opcodes():
if tag == "delete":
for obj in seq1[i1:i2]:
tr = table.tr
tr.td.div[:] = dump_column(obj)
tr.td
elif tag == "insert":
for obj in seq2[j1:j2]:
tr = table.tr
tr.td
tr.td.div[:] = dump_column(obj)
elif tag in ("replace", "equal"):
for n in xrange(i2 - i1):
tr = table.tr
if i1 + n < i2:
obj1 = seq1[i1 + n]
tr.td.div[:] = dump_column(obj1)
else:
tr.td
if j1 + n < j2:
obj2 = seq2[j1 + n]
tr.td.div[:] = dump_column(obj2)
else:
tr.td
# Database
def gen_database(tree, diff):
div = tree.div["database"]
gen_database_head(div, diff)
gen_database_body(div, diff)
def gen_database_head(parent, diff):
span = parent.div["head"].span["columns"]
for column, obj in enumerate(diff.objects):
span.span[("column-%d" % column)][:] = obj.get_name()
def gen_database_body(parent, diff):
body = parent.div["body"]
body.div["expander"][:] = "+"
div = body.div["children"]
gen_named_object_list(div, diff.languages)
gen_named_object_list(div, diff.namespaces)
# Language
def gen_language(div, diff):
gen_value(div, diff.owner, "owner")
# Namespace
def gen_namespace(div, diff):
gen_value(div, diff.owner, "owner")
gen_named_object_list(div, diff.types)
gen_named_object_list(div, diff.composites)
gen_named_object_list(div, diff.indexes)
gen_named_object_list(div, diff.tables)
gen_named_object_list(div, diff.views)
gen_named_object_list(div, diff.sequences)
gen_named_object_list(div, diff.functions)
gen_named_object_list(div, diff.operators)
gen_named_object_list(div, diff.opclasses)
# Type
def gen_type(div, diff):
gen_value(div, diff.owner, "owner")
gen_value(div, diff.notnull, "notnull")
gen_value(div, diff.default, "default")
def gen_domain(div, diff):
gen_type(div, diff)
gen_value(div, diff.basetype, "basetype")
gen_named_object_list(div, diff.constraints, "constraints")
# Function
def gen_function(div, diff):
gen_value(div, diff.owner, "owner")
gen_value(div, diff.language, "language")
gen_value(div, diff.rettype, "rettype")
gen_value(div, diff.argtypes, "argtypes")
gen_value(div, diff.source1, "source1")
gen_value(div, diff.source2, "source2")
# Relation
def gen_relation(div, diff):
gen_value(div, diff.owner, "owner")
gen_ordered_object_list(div, diff.columns, "columns")
def gen_rule_relation(div, diff):
gen_relation(div, diff)
gen_named_object_list(div, diff.rules, "rules")
def gen_table(div, diff):
gen_rule_relation(div, diff)
gen_named_object_list(div, diff.triggers, "triggers")
gen_named_object_list(div, diff.constraints, "constraints")
# Sequence
def gen_sequence(div, diff):
gen_value(div, diff.owner, "owner")
gen_value(div, diff.increment, "increment")
gen_value(div, diff.minimum, "minimum")
gen_value(div, diff.maximum, "maximum")
# Column
def gen_column(div, diff):
gen_value(div, diff.type, "type")
gen_value(div, diff.notnull, "notnull")
gen_value(div, diff.default, "default")
# Constraint
def gen_constraint(div, diff):
gen_value(div, diff.definition, "definition")
def gen_column_constraint(div, diff):
gen_constraint(div, diff)
gen_ordered_object_list(div, diff.columns, "columns")
def gen_foreign_key(div, diff):
gen_column_constraint(div, diff)
gen_value(div, diff.foreign_table, "foreign-table")
gen_ordered_object_list(div, diff.foreign_columns, "foreign-columns")
# Trigger
def gen_trigger(div, diff):
gen_value(div, diff.function, "function")
gen_value(div, diff.description, "description")
# Rule
def gen_rule(div, diff):
gen_value(div, diff.definition, "definition")
# Operator
def gen_operator(div, diff):
gen_value(div, diff.owner, "owner")
def gen_operator_class(div, diff):
gen_value(div, diff.owner, "owner")
gen_value(div, diff.intype, "intype")
gen_value(div, diff.default, "default")
gen_value(div, diff.keytype, "keytype")
diff_types = {
core.diff.CheckColumnConstraint: ("check-column-constraint", gen_column_constraint),
core.diff.CheckConstraint: ("check-constraint", gen_constraint),
core.diff.Column: ("column", gen_column),
core.diff.Composite: ("composite", gen_relation),
core.diff.Domain: ("domain", gen_domain),
core.diff.ForeignKey: ("foreign-key", gen_foreign_key),
core.diff.Function: ("function", gen_function),
core.diff.Index: ("index", gen_relation),
core.diff.Language: ("language", gen_language),
core.diff.Namespace: ("namespace", gen_namespace),
core.diff.Operator: ("operator", gen_operator),
core.diff.OperatorClass: ("operator-class", gen_operator_class),
core.diff.PrimaryKey: ("primary-key", gen_column_constraint),
core.diff.Rule: ("rule", gen_rule),
core.diff.Sequence: ("sequence", gen_sequence),
core.diff.Table: ("table", gen_table),
core.diff.Trigger: ("trigger", gen_trigger),
core.diff.Type: ("type", gen_type),
core.diff.UniqueColumnConstraint: ("unique-column-constraint", gen_column_constraint),
core.diff.UniqueConstraint: ("unique-constraint", gen_constraint),
core.diff.View: ("view", gen_rule_relation),
}
def generate(diff):
global database_objects
database_objects = diff.objects
tree = tags.TagTree()
gen_database(tree, diff)
return tree.get_element_tree()
| mit | 4,085,317,844,343,348,000 | 25.043478 | 87 | 0.623644 | false | 2.882406 | false | false | false |
kaedroho/django | django/core/files/storage.py | 18 | 14486 | import os
from datetime import datetime
from urllib.parse import urljoin
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import File, locks
from django.core.files.move import file_move_safe
from django.core.signals import setting_changed
from django.utils import timezone
from django.utils._os import safe_join
from django.utils.crypto import get_random_string
from django.utils.deconstruct import deconstructible
from django.utils.encoding import filepath_to_uri
from django.utils.functional import LazyObject, cached_property
from django.utils.module_loading import import_string
from django.utils.text import get_valid_filename
__all__ = (
'Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage',
'get_storage_class',
)
class Storage:
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""Retrieve the specified file from storage."""
return self._open(name, mode)
def save(self, name, content, max_length=None):
"""
Save new content to the file specified by name. The content should be
a proper File object or any Python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content, name)
name = self.get_available_name(name, max_length=max_length)
return self._save(name, content)
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Return a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_alternative_name(self, file_root, file_ext):
"""
Return an alternative filename, by adding an underscore and a random 7
character alphanumeric string (before the file extension, if one
exists) to the filename.
"""
return '%s_%s%s' % (file_root, get_random_string(7), file_ext)
def get_available_name(self, name, max_length=None):
"""
Return a filename that's free on the target storage system and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, generate an alternative filename
# until it doesn't exist.
# Truncate original name if required, so the new filename does not
# exceed the max_length.
while self.exists(name) or (max_length and len(name) > max_length):
# file_ext includes the dot.
name = os.path.join(dir_name, self.get_alternative_name(file_root, file_ext))
if max_length is None:
continue
# Truncate file_root if max_length exceeded.
truncation = len(name) - max_length
if truncation > 0:
file_root = file_root[:-truncation]
# Entire file_root was truncated in attempt to find an available filename.
if not file_root:
raise SuspiciousFileOperation(
'Storage can not find an available filename for "%s". '
'Please make sure that the corresponding file field '
'allows sufficient "max_length".' % name
)
name = os.path.join(dir_name, self.get_alternative_name(file_root, file_ext))
return name
def generate_filename(self, filename):
"""
Validate the filename by calling get_valid_name() and return a filename
to be passed to the save() method.
"""
# `filename` may include a path as returned by FileField.upload_to.
dirname, filename = os.path.split(filename)
return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))
def path(self, name):
"""
Return a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Delete the specified file from the storage system.
"""
raise NotImplementedError('subclasses of Storage must provide a delete() method')
def exists(self, name):
"""
Return True if a file referenced by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError('subclasses of Storage must provide an exists() method')
def listdir(self, path):
"""
List the contents of the specified path. Return a 2-tuple of lists:
the first item being directories, the second item being files.
"""
raise NotImplementedError('subclasses of Storage must provide a listdir() method')
def size(self, name):
"""
Return the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide a size() method')
def url(self, name):
"""
Return an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError('subclasses of Storage must provide a url() method')
def get_accessed_time(self, name):
"""
Return the last accessed time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method')
def get_created_time(self, name):
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError('subclasses of Storage must provide a get_created_time() method')
def get_modified_time(self, name):
"""
Return the last modified time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError('subclasses of Storage must provide a get_modified_time() method')
@deconstructible
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
# The combination of O_CREAT and O_EXCL makes os.open() raise OSError if
# the file already exists before it's opened.
OS_OPEN_FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL | getattr(os, 'O_BINARY', 0)
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None):
self._location = location
self._base_url = base_url
self._file_permissions_mode = file_permissions_mode
self._directory_permissions_mode = directory_permissions_mode
setting_changed.connect(self._clear_cached_properties)
def _clear_cached_properties(self, setting, **kwargs):
"""Reset setting based property values."""
if setting == 'MEDIA_ROOT':
self.__dict__.pop('base_location', None)
self.__dict__.pop('location', None)
elif setting == 'MEDIA_URL':
self.__dict__.pop('base_url', None)
elif setting == 'FILE_UPLOAD_PERMISSIONS':
self.__dict__.pop('file_permissions_mode', None)
elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS':
self.__dict__.pop('directory_permissions_mode', None)
def _value_or_setting(self, value, setting):
return setting if value is None else value
@cached_property
def base_location(self):
return self._value_or_setting(self._location, settings.MEDIA_ROOT)
@cached_property
def location(self):
return os.path.abspath(self.base_location)
@cached_property
def base_url(self):
if self._base_url is not None and not self._base_url.endswith('/'):
self._base_url += '/'
return self._value_or_setting(self._base_url, settings.MEDIA_URL)
@cached_property
def file_permissions_mode(self):
return self._value_or_setting(self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS)
@cached_property
def directory_permissions_mode(self):
return self._value_or_setting(self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS)
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
directory = os.path.dirname(full_path)
try:
if self.directory_permissions_mode is not None:
# os.makedirs applies the global umask, so we reset it,
# for consistency with file_permissions_mode behavior.
old_umask = os.umask(0)
try:
os.makedirs(directory, self.directory_permissions_mode, exist_ok=True)
finally:
os.umask(old_umask)
else:
os.makedirs(directory, exist_ok=True)
except FileExistsError:
raise FileExistsError('%s exists and is not a directory.' % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
# This is a normal uploadedfile that we can stream.
else:
# The current umask value is masked out by os.open!
fd = os.open(full_path, self.OS_OPEN_FLAGS, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except FileExistsError:
# A new name is needed if the file exists.
name = self.get_available_name(name)
full_path = self.path(name)
else:
# OK, the file save worked. Break out of the loop.
break
if self.file_permissions_mode is not None:
os.chmod(full_path, self.file_permissions_mode)
# Store filenames with forward slashes, even on Windows.
return str(name).replace('\\', '/')
def delete(self, name):
assert name, "The name argument is not allowed to be empty."
name = self.path(name)
# If the file or directory exists, delete it from the filesystem.
try:
if os.path.isdir(name):
os.rmdir(name)
else:
os.remove(name)
except FileNotFoundError:
# FileNotFoundError is raised if the file or directory was removed
# concurrently.
pass
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.scandir(path):
if entry.is_dir():
directories.append(entry.name)
else:
files.append(entry.name)
return directories, files
def path(self, name):
return safe_join(self.location, name)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
url = filepath_to_uri(name)
if url is not None:
url = url.lstrip('/')
return urljoin(self.base_url, url)
def _datetime_from_timestamp(self, ts):
"""
If timezone support is enabled, make an aware datetime object in UTC;
otherwise make a naive one in the local timezone.
"""
if settings.USE_TZ:
# Safe to use .replace() because UTC doesn't have DST
return datetime.utcfromtimestamp(ts).replace(tzinfo=timezone.utc)
else:
return datetime.fromtimestamp(ts)
def get_accessed_time(self, name):
return self._datetime_from_timestamp(os.path.getatime(self.path(name)))
def get_created_time(self, name):
return self._datetime_from_timestamp(os.path.getctime(self.path(name)))
def get_modified_time(self, name):
return self._datetime_from_timestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
return import_string(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
| bsd-3-clause | -3,029,959,557,593,560,000 | 38.47139 | 115 | 0.613627 | false | 4.429969 | false | false | false |
devsam/hello-world | src/exam1.py | 1 | 5225 | # exam1.py
import sqlite3
import uuid
from flask import Flask, request, jsonify
DATABASE = 'exam.db'
app = Flask(__name__)
app.config.from_object(__name__)
"""
HTTP GET: list of all users
@Request: Nothing
@Response: Json list
[
{
"id": [10-digit integer],
"name": "[user name]",
"salary": [integer]
},
{
"id": 3645825710,
"name": "Mobigen2",
"salary": 20000
}
]
"""
@app.route('/users', methods=['GET'])
def list_all_users():
# table check
_check_table()
return _select_all_users()
# HTTP GET: list of specific user
# @Request: /users/<integer_user_id>
# @Response: Json
# {
# "id": [10-digit integer],
# "name": "[user name]",
# "salary": [integer]
# }
@app.route('/users/<int:user_id>', methods=['GET'])
def list_user(user_id=None):
# table check
_check_table()
return _select_user(user_id)
# HTTP POST: insert a new user
# @Request: Json
# {
# "name": "[user name]",
# "salary": [integer]
# }
# @Response: Json
# {
# "id": [10-digit integer]
# }
@app.route('/users', methods=['POST'])
def create_users():
# table check
_check_table()
return _insert_users(request.get_json())
# HTTP PUT: update a user
# @Request: /users/<integer_user_id>, Json(user info)
# {
# "name": "[user name]",
# "salary": [integer]
# }
# @Response: Json
# {
# "id": [10-digit integer]
# }
@app.route('/users/<user_id>', methods=['PUT'])
def modify_user(user_id=None):
# Table check
_check_table()
return _update_user(user_id, request.get_json())
# HTTP DELETE: delete a user
# @Request: /users/<integer_user_id>
# @Response: Json
# {
# "id": [10-digit integer]
# }
@app.route('/users/<user_id>', methods=['DELETE'])
def remove_user(user_id=None):
# Table check
return _delete_user(user_id)
# Check if the table exists.
def _check_table():
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
cur.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='user';")
rs = cur.fetchall()
if len(rs) <= 0:
# Create table when table doesn't exist.
_create_table()
cur.close()
conn.close()
# Create Table
def _create_table():
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
cur.execute(
"CREATE TABLE IF NOT EXISTS "
"user(id int PRIMARY KEY, name text, salary int);")
conn.commit()
cur.close()
conn.close()
print "CREATE TABLE"
return None
# Select all users and return it in json format.
def _select_all_users():
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
cur.execute("SELECT * FROM user;")
# return SQL table as JSON in python.
rv = [dict((cur.description[i][0], value) for i, value in enumerate(row))
for row in cur.fetchall()]
if len(rv) > 0:
cur.close()
conn.close()
return jsonify(rv)
else:
cur.close()
conn.close()
# If empty table return empty.
return jsonify({"HTTP": "GET", "status": "all_empty"})
# Select specific user and return it in json format.
def _select_user(reqdata):
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
cur.execute("SELECT * FROM user WHERE id=?;", (reqdata,))
# return SQL table as JSON in python.
rv = [dict((cur.description[i][0], value) for i, value in enumerate(row))
for row in cur.fetchall()]
if len(rv) > 0:
cur.close()
conn.close()
return jsonify(rv)
else:
cur.close()
conn.close()
# if empty table
return jsonify({"HTTP": "GET", "status": "empty"})
# Insert a new user and returns generated ID in json format.
def _insert_users(reqdata):
# If request body is empty.
if reqdata is None:
return jsonify({"HTTP": "POST", "status": "empty"})
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
# Generate 32bit integer UUID
int_uuid = uuid.uuid4().int & (1 << 32)-1
# Insert users data, id generated uuid.
cur.execute(
"insert into user values(?,?,?);",
(int_uuid, reqdata['name'], reqdata['salary']))
conn.commit()
cur.close()
conn.close()
return jsonify({"id": int_uuid})
# Update a user and return ID in json format.
def _update_user(user_id, reqdata):
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
cur.execute("UPDATE user SET name=?, salary=? WHERE id=?;",
(reqdata['name'], reqdata['salary'], user_id))
conn.commit()
cur.close()
conn.close()
return jsonify({"id": user_id})
# Delete a user and return ID in json format.
def _delete_user(user_id):
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
cur.execute("DELETE FROM user WHERE id=?;", (user_id,))
conn.commit()
cur.close()
conn.close()
return jsonify({"id": user_id})
# Drop Table: Only for testing.
@app.route('/reset')
def _drop_table():
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
cur.execute("DROP TABLE 'user';")
conn.commit()
cur.close()
conn.close()
return "DROP TABLE"
# Flask App running on localhost:8000
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000)
| unlicense | 7,909,483,822,100,553,000 | 22.642534 | 77 | 0.594641 | false | 3.306962 | false | false | false |
dylanleong/mysite | reference/calculate.py | 1 | 3726 | from math import sqrt, pi, sin, cos, tan, atan2 as arctan2
def grid_to_longlat(E,N):
#E, N are the British national grid coordinates - eastings and northings
a, b = 6377563.396, 6356256.909 #The Airy 180 semi-major and semi-minor axes used for OSGB36 (m)
F0 = 0.9996012717 #scale factor on the central meridian
lat0 = 49*pi/180 #Latitude of true origin (radians)
lon0 = -2*pi/180 #Longtitude of true origin and central meridian (radians)
N0, E0 = -100000, 400000 #Northing & easting of true origin (m)
e2 = 1 - (b*b)/(a*a) #eccentricity squared
n = (a-b)/(a+b)
#Initialise the iterative variables
lat,M = lat0, 0
while N-N0-M >= 0.00001: #Accurate to 0.01mm
lat = (N-N0-M)/(a*F0) + lat;
M1 = (1 + n + (5./4)*n**2 + (5./4)*n**3) * (lat-lat0)
M2 = (3*n + 3*n**2 + (21./8)*n**3) * sin(lat-lat0) * cos(lat+lat0)
M3 = ((15./8)*n**2 + (15./8)*n**3) * sin(2*(lat-lat0)) * cos(2*(lat+lat0))
M4 = (35./24)*n**3 * sin(3*(lat-lat0)) * cos(3*(lat+lat0))
#meridional arc
M = b * F0 * (M1 - M2 + M3 - M4)
#transverse radius of curvature
nu = a*F0/sqrt(1-e2*sin(lat)**2)
#meridional radius of curvature
rho = a*F0*(1-e2)*(1-e2*sin(lat)**2)**(-1.5)
eta2 = nu/rho-1
secLat = 1./cos(lat)
VII = tan(lat)/(2*rho*nu)
VIII = tan(lat)/(24*rho*nu**3)*(5+3*tan(lat)**2+eta2-9*tan(lat)**2*eta2)
IX = tan(lat)/(720*rho*nu**5)*(61+90*tan(lat)**2+45*tan(lat)**4)
X = secLat/nu
XI = secLat/(6*nu**3)*(nu/rho+2*tan(lat)**2)
XII = secLat/(120*nu**5)*(5+28*tan(lat)**2+24*tan(lat)**4)
XIIA = secLat/(5040*nu**7)*(61+662*tan(lat)**2+1320*tan(lat)**4+720*tan(lat)**6)
dE = E-E0
#These are on the wrong ellipsoid currently: Airy1830. (Denoted by _1)
lat_1 = lat - VII*dE**2 + VIII*dE**4 - IX*dE**6
lon_1 = lon0 + X*dE - XI*dE**3 + XII*dE**5 - XIIA*dE**7
#Want to convert to the GRS80 ellipsoid.
#First convert to cartesian from spherical polar coordinates
H = 0 #Third spherical coord.
x_1 = (nu/F0 + H)*cos(lat_1)*cos(lon_1)
y_1 = (nu/F0+ H)*cos(lat_1)*sin(lon_1)
z_1 = ((1-e2)*nu/F0 +H)*sin(lat_1)
#Perform Helmut transform (to go between Airy 1830 (_1) and GRS80 (_2))
s = -20.4894*10**-6 #The scale factor -1
tx, ty, tz = 446.448, -125.157, + 542.060 #The translations along x,y,z axes respectively
rxs,rys,rzs = 0.1502, 0.2470, 0.8421 #The rotations along x,y,z respectively, in seconds
rx, ry, rz = rxs*pi/(180*3600.), rys*pi/(180*3600.), rzs*pi/(180*3600.) #In radians
x_2 = tx + (1+s)*x_1 + (-rz)*y_1 + (ry)*z_1
y_2 = ty + (rz)*x_1 + (1+s)*y_1 + (-rx)*z_1
z_2 = tz + (-ry)*x_1 + (rx)*y_1 + (1+s)*z_1
#Back to spherical polar coordinates from cartesian
#Need some of the characteristics of the new ellipsoid
a_2, b_2 =6378137.000, 6356752.3141 #The GSR80 semi-major and semi-minor axes used for WGS84(m)
e2_2 = 1- (b_2*b_2)/(a_2*a_2) #The eccentricity of the GRS80 ellipsoid
p = sqrt(x_2**2 + y_2**2)
#Lat is obtained by an iterative proceedure:
lat = arctan2(z_2,(p*(1-e2_2))) #Initial value
latold = 2*pi
while abs(lat - latold)>10**-16:
lat, latold = latold, lat
nu_2 = a_2/sqrt(1-e2_2*sin(latold)**2)
lat = arctan2(z_2+e2_2*nu_2*sin(latold), p)
#Lon and height are then pretty easy
lon = arctan2(y_2,x_2)
H = p/cos(lat) - nu_2
#Uncomment this line if you want to print the results
#print [(lat-lat_1)*180/pi, (lon - lon_1)*180/pi]
#Convert to degrees
lat = lat*180/pi
lon = lon*180/pi
#Job's a good'n.
return lat, lon
| mit | -3,620,041,475,410,725,000 | 40.865169 | 104 | 0.564681 | false | 2.41791 | false | false | false |
cgwire/zou | zou/app/blueprints/crud/project.py | 1 | 3203 | from flask_jwt_extended import jwt_required
from flask_restful import reqparse
from zou.app.models.project import Project
from zou.app.models.project_status import ProjectStatus
from zou.app.services import (
deletion_service,
projects_service,
shots_service,
user_service,
)
from zou.app.utils import permissions, fields
from .base import BaseModelResource, BaseModelsResource
class ProjectsResource(BaseModelsResource):
def __init__(self):
BaseModelsResource.__init__(self, Project)
def add_project_permission_filter(self, query):
if permissions.has_admin_permissions():
return query
else:
return query.filter(user_service.build_related_projects_filter())
def check_read_permissions(self):
return True
def update_data(self, data):
open_status = projects_service.get_or_create_open_status()
if "project_status_id" not in data:
data["project_status_id"] = open_status["id"]
return data
def post_creation(self, project):
project_dict = project.serialize()
if project.production_type == "tvshow":
episode = shots_service.create_episode(project.id, "E01")
project_dict["first_episode_id"] = fields.serialize_value(
episode["id"]
)
user_service.clear_project_cache()
projects_service.clear_project_cache("")
return project_dict
class ProjectResource(BaseModelResource):
def __init__(self):
BaseModelResource.__init__(self, Project)
self.protected_fields.append("team")
def check_read_permissions(self, project):
user_service.check_project_access(project["id"])
def post_update(self, project_dict):
if project_dict["production_type"] == "tvshow":
episode = shots_service.get_or_create_first_episode(
project_dict["id"]
)
project_dict["first_episode_id"] = fields.serialize_value(
episode["id"]
)
projects_service.clear_project_cache(project_dict["id"])
return project_dict
def clean_get_result(self, data):
project_status = ProjectStatus.get(data["project_status_id"])
data["project_status_name"] = project_status.name
return data
def post_delete(self, project_dict):
projects_service.clear_project_cache(project_dict["id"])
@jwt_required
def delete(self, instance_id):
parser = reqparse.RequestParser()
parser.add_argument("force", default=False, type=bool)
args = parser.parse_args()
project = self.get_model_or_404(instance_id)
project_dict = project.serialize()
if projects_service.is_open(project_dict):
return {
"error": True,
"message": "Only closed projects can be deleted",
}, 400
else:
self.check_delete_permissions(project_dict)
if args["force"] == True:
deletion_service.remove_project(instance_id)
else:
project.delete()
self.post_delete(project_dict)
return "", 204
| agpl-3.0 | -8,433,000,899,857,017,000 | 32.715789 | 77 | 0.62098 | false | 4.111682 | false | false | false |
overide/Datastructure-and-Algorithm-with-Python | searching/hash_open_addressing.py | 1 | 2591 | #-------------------------------------------------------------------------------------------------------------------
#Name : Fixed Size Hash Table
#Purpose : Fixed size Hash Table implementation in python using open addressing method for educational purpose
#Author : Atul Kumar
#Created : 07/07/2016
#License : GPL V3
#Copyright : (c) 2016 Atul Kumar (www.facebook.com/atul.kr.007)
#Any corrections and suggestions for optimization are welcome :)
#-------------------------------------------------------------------------------------------------------------------
class HashTable:
def __init__(self):
self.size = 11 # size must be a prime number for collision resolution algo to work efficiently
self.slot = [None] * self.size
self.data = [None] * self.size
self.emptyCount = self.size #counts empty slot left in hash table
def hash_function(self,key,size):
try:
if key.isalnum(): #key is alpha numeric
sum = 0
for ch in key:
if ch.isdigit():
sum+=int(ch)
else:
sum+=ord(ch)
key = sum
except:
pass #key is integer
return key % size
def rehash(self,old_hash,size): #Collision resolution with linear probing
return (old_hash+1) % size
def put(self,key,data):
hash_value = self.hash_function(key,len(self.slot))
if self.slot[hash_value] == None:
self.slot[hash_value] =key
self.data[hash_value] = data
self.emptyCount -= 1
else:
if self.slot[hash_value] == key:
self.data[hash_value] = data #replace
else:
if not self.isAnyEmpty():
next_slot = self.rehash(hash_value,len(self.slot))
while(self.slot[next_slot] != None and self.slot[next_slot] != key):
next_slot = self.rehash(next_slot,len(self.slot))
if self.slot[next_slot] == None:
self.slot[next_slot] = key
self.data[next_slot] = data
self.emptyCount -= 1
else:
self.data[next_slot] = data #replace
else:
raise Exception("Hash table is full")
def get(self,key):
hash_value = self.hash_function(key,len(self.slot))
data = None
found = False
stop = False
pos = hash_value
while(self.slot[pos] != None and (not found and not stop)):
if self.slot[pos] == key:
found = True
data = self.data[pos]
else:
pos = self.rehash(pos,len(self.slot))
if pos == hash_value:
stop = True
return data
def isAnyEmpty(self):
return self.emptyCount == 0
def __getitem__(self,key):
return self.get(key)
def __setitem__(self,key,data):
self.put(key,data) | gpl-3.0 | -6,431,755,950,926,975,000 | 28.869048 | 116 | 0.573138 | false | 3.296438 | false | false | false |
henriquegemignani/randovania | randovania/game_description/game_patches.py | 1 | 3309 | import copy
import dataclasses
from dataclasses import dataclass
from typing import Dict, Tuple, Iterator
from randovania.game_description.area_location import AreaLocation
from randovania.game_description.assignment import PickupAssignment, GateAssignment, PickupTarget
from randovania.game_description.dock import DockWeakness, DockConnection
from randovania.game_description.echoes_game_specific import EchoesGameSpecific
from randovania.game_description.hint import Hint
from randovania.game_description.resources.logbook_asset import LogbookAsset
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.game_description.resources.resource_info import CurrentResources
from randovania.game_description.resources.resource_type import ResourceType
@dataclass(frozen=True)
class GamePatches:
"""Determines patches that are made to the game's data.
Currently we support:
* Swapping pickup locations
"""
player_index: int
pickup_assignment: PickupAssignment
elevator_connection: Dict[int, AreaLocation]
dock_connection: Dict[Tuple[int, int], DockConnection]
dock_weakness: Dict[Tuple[int, int], DockWeakness]
translator_gates: GateAssignment
starting_items: CurrentResources
starting_location: AreaLocation
hints: Dict[LogbookAsset, Hint]
game_specific: EchoesGameSpecific
def assign_new_pickups(self, assignments: Iterator[Tuple[PickupIndex, PickupTarget]]) -> "GamePatches":
new_pickup_assignment = copy.copy(self.pickup_assignment)
for index, pickup in assignments:
assert index not in new_pickup_assignment
new_pickup_assignment[index] = pickup
return dataclasses.replace(self, pickup_assignment=new_pickup_assignment)
def assign_pickup_assignment(self, assignment: PickupAssignment) -> "GamePatches":
items: Iterator[Tuple[PickupIndex, PickupTarget]] = assignment.items()
return self.assign_new_pickups(items)
def assign_gate_assignment(self, assignment: GateAssignment) -> "GamePatches":
new_translator_gates = copy.copy(self.translator_gates)
for gate, translator in assignment.items():
assert gate not in new_translator_gates
assert gate.resource_type == ResourceType.GATE_INDEX
new_translator_gates[gate] = translator
return dataclasses.replace(self, translator_gates=new_translator_gates)
def assign_starting_location(self, location: AreaLocation) -> "GamePatches":
return dataclasses.replace(self, starting_location=location)
def assign_extra_initial_items(self, new_resources: CurrentResources) -> "GamePatches":
current = copy.copy(self.starting_items)
for resource, quantity in new_resources.items():
if resource.resource_type != ResourceType.ITEM:
raise ValueError("Only ITEM is supported as extra initial items, got {}".format(resource.resource_type))
current[resource] = current.get(resource, 0) + quantity
return dataclasses.replace(self, starting_items=current)
def assign_hint(self, logbook: LogbookAsset, hint: Hint) -> "GamePatches":
current = copy.copy(self.hints)
current[logbook] = hint
return dataclasses.replace(self, hints=current)
| gpl-3.0 | 8,321,308,436,758,278,000 | 44.328767 | 120 | 0.74252 | false | 3.943981 | false | false | false |
smartpm/smart | smart/interfaces/qt/command.py | 3 | 3123 | #
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.interfaces.qt.interface import QtInterface
from smart.interfaces.qt import getPixmap, centerWindow
from smart import *
import time
import qt
class QtCommandInterface(QtInterface):
def __init__(self, ctrl, argv=None):
QtInterface.__init__(self, ctrl, argv)
self._status = QtStatus()
def showStatus(self, msg):
self._status.show(msg)
while qt.QApplication.eventLoop().hasPendingEvents():
qt.QApplication.eventLoop().processEvents(qt.QEventLoop.AllEvents)
def hideStatus(self):
self._status.hide()
while qt.QApplication.eventLoop().hasPendingEvents():
qt.QApplication.eventLoop().processEvents(qt.QEventLoop.AllEvents)
def run(self, command=None, argv=None):
result = QtInterface.run(self, command, argv)
self._status.wait()
while self._log.isVisible():
time.sleep(0.1)
while qt.QApplication.eventLoop().hasPendingEvents():
qt.QApplication.eventLoop().processEvents(qt.QEventLoop.AllEvents)
return result
class QtStatus(object):
def __init__(self):
self._window = qt.QDialog()
self._window.setIcon(getPixmap("smart"))
self._window.setCaption(_("Status"))
self._window.setModal(True)
self._vbox = qt.QVBox(self._window)
self._vbox.setMargin(20)
self._label = qt.QLabel(self._vbox)
self._label.show()
self._lastshown = 0
def show(self, msg):
self._label.setText(msg)
self._vbox.adjustSize()
self._window.adjustSize()
self._window.show()
centerWindow(self._window)
self._lastshown = time.time()
while qt.QApplication.eventLoop().hasPendingEvents():
qt.QApplication.eventLoop().processEvents(qt.QEventLoop.AllEvents)
def hide(self):
self._window.hide()
def isVisible(self):
return self._window.isVisible()
def wait(self):
while self.isVisible() and self._lastshown+3 > time.time():
time.sleep(0.3)
while qt.QApplication.eventLoop().hasPendingEvents():
qt.QApplication.eventLoop().processEvents(qt.QEventLoop.AllEvents)
# vim:ts=4:sw=4:et
| gpl-2.0 | 7,352,646,742,271,694,000 | 33.7 | 82 | 0.667947 | false | 3.894015 | false | false | false |
tensorflow/tpu | models/official/efficientnet/imagenet_input.py | 1 | 19927 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Efficient ImageNet input pipeline using tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
import os
from absl import logging
import six
import tensorflow.compat.v1 as tf
import preprocessing
def build_image_serving_input_fn(image_size,
batch_size=None,
resize_method=None):
"""Builds a serving input fn for raw images."""
def _image_serving_input_fn():
"""Serving input fn for raw images."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
image = preprocessing.preprocess_image(
image_bytes=image_bytes,
is_training=False,
image_size=image_size,
resize_method=resize_method)
return image
image_bytes_list = tf.placeholder(
shape=[batch_size],
dtype=tf.string,
)
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
images, {'image_bytes': image_bytes_list})
return _image_serving_input_fn
class ImageNetTFExampleInput(six.with_metaclass(abc.ABCMeta, object)):
"""Base class for ImageNet input_fn generator."""
def __init__(self,
is_training,
use_bfloat16,
num_cores=8,
image_size=224,
transpose_input=False,
num_label_classes=1000,
include_background_label=False,
augment_name=None,
mixup_alpha=0.0,
randaug_num_layers=None,
randaug_magnitude=None,
resize_method=None):
"""Constructor.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
num_cores: `int` for the number of TPU cores
image_size: `int` for image size (both width and height).
transpose_input: 'bool' for whether to use the double transpose trick
num_label_classes: number of label classes. Default to 1000 for ImageNet.
include_background_label: If true, label #0 is reserved for background.
augment_name: `string` that is the name of the augmentation method to
apply to the image. `autoaugment` if AutoAugment is to be used or
`randaugment` if RandAugment is to be used. If the value is `None` no no
augmentation method will be applied applied. See autoaugment.py for more
details.
mixup_alpha: float to control the strength of Mixup regularization, set to
0.0 to disable.
randaug_num_layers: 'int', if RandAug is used, what should the number of
layers be. See autoaugment.py for detailed description.
randaug_magnitude: 'int', if RandAug is used, what should the magnitude
be. See autoaugment.py for detailed description.
resize_method: If None, use bicubic in default.
"""
self.image_preprocessing_fn = preprocessing.preprocess_image
self.is_training = is_training
self.use_bfloat16 = use_bfloat16
self.num_cores = num_cores
self.transpose_input = transpose_input
self.image_size = image_size
self.include_background_label = include_background_label
self.num_label_classes = num_label_classes
if include_background_label:
self.num_label_classes += 1
self.augment_name = augment_name
self.mixup_alpha = mixup_alpha
self.randaug_num_layers = randaug_num_layers
self.randaug_magnitude = randaug_magnitude
self.resize_method = resize_method
def set_shapes(self, batch_size, images, labels):
"""Statically set the batch_size dimension."""
if self.transpose_input:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([None, None, None, batch_size])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size, None])))
# Convert to R1 tensors for fast transfer to device.
images = tf.reshape(images, [-1])
else:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size, None])))
return images, labels
def mixup(self, batch_size, alpha, images, labels):
"""Applies Mixup regularization to a batch of images and labels.
[1] Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz
Mixup: Beyond Empirical Risk Minimization.
ICLR'18, https://arxiv.org/abs/1710.09412
Arguments:
batch_size: The input batch size for images and labels.
alpha: Float that controls the strength of Mixup regularization.
images: A batch of images of shape [batch_size, ...]
labels: A batch of labels of shape [batch_size, num_classes]
Returns:
A tuple of (images, labels) with the same dimensions as the input with
Mixup regularization applied.
"""
mix_weight = tf.distributions.Beta(alpha, alpha).sample([batch_size, 1])
mix_weight = tf.maximum(mix_weight, 1. - mix_weight)
images_mix_weight = tf.cast(
tf.reshape(mix_weight, [batch_size, 1, 1, 1]), images.dtype)
# Mixup on a single batch is implemented by taking a weighted sum with the
# same batch in reverse.
images_mix = (
images * images_mix_weight + images[::-1] * (1. - images_mix_weight))
labels_mix = labels * mix_weight + labels[::-1] * (1. - mix_weight)
return images_mix, labels_mix
def dataset_parser(self, value):
"""Parses an image and its label from a serialized ResNet-50 TFExample.
Args:
value: serialized string containing an ImageNet TFExample.
Returns:
Returns a tuple of (image, label) from the TFExample.
"""
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, ''),
'image/class/label': tf.FixedLenFeature([], tf.int64, -1),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image = self.image_preprocessing_fn(
image_bytes=image_bytes,
is_training=self.is_training,
image_size=self.image_size,
use_bfloat16=self.use_bfloat16,
augment_name=self.augment_name,
randaug_num_layers=self.randaug_num_layers,
randaug_magnitude=self.randaug_magnitude,
resize_method=self.resize_method)
# The labels will be in range [1,1000], 0 is reserved for background
label = tf.cast(
tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32)
if not self.include_background_label:
# Subtract 1 if the background label is discarded.
label -= 1
onehot_label = tf.one_hot(label, self.num_label_classes)
return image, onehot_label
@abc.abstractmethod
def make_source_dataset(self, index, num_hosts):
"""Makes dataset of serialized TFExamples.
The returned dataset will contain `tf.string` tensors, but these strings are
serialized `TFExample` records that will be parsed by `dataset_parser`.
If self.is_training, the dataset should be infinite.
Args:
index: current host index.
num_hosts: total number of hosts.
Returns:
A `tf.data.Dataset` object.
"""
return
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `tf.data.Dataset` object.
"""
# Retrieves the batch size for the current shard. The # of shards is
# computed according to the input pipeline deployment. See
# tf.estimator.tpu.RunConfig for details.
batch_size = params['batch_size']
if 'context' in params:
current_host = params['context'].current_input_fn_deployment()[1]
num_hosts = params['context'].num_hosts
else:
current_host = 0
num_hosts = 1
dataset = self.make_source_dataset(current_host, num_hosts)
# Use the fused map-and-batch operation.
#
# For XLA, we must used fixed shapes. Because we repeat the source training
# dataset indefinitely, we can use `drop_remainder=True` to get fixed-size
# batches without dropping any training examples.
#
# When evaluating, `drop_remainder=True` prevents accidentally evaluating
# the same image twice by dropping the final batch if it is less than a full
# batch size. As long as this validation is done with consistent batch size,
# exactly the same images will be used.
dataset = dataset.map(self.dataset_parser, 64).batch(batch_size, True)
# Apply Mixup
if self.is_training and self.mixup_alpha > 0.0:
dataset = dataset.map(
functools.partial(self.mixup, batch_size, self.mixup_alpha),
num_parallel_calls=64)
# Transpose for performance on TPU
if self.transpose_input:
dataset = dataset.map(
lambda images, labels: (tf.transpose(images, [1, 2, 3, 0]), labels),
num_parallel_calls=64)
# Assign static batch size dimension
dataset = dataset.map(functools.partial(self.set_shapes, batch_size), 64)
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
options = tf.data.Options()
options.experimental_deterministic = False
options.experimental_threading.max_intra_op_parallelism = 1
options.experimental_threading.private_threadpool_size = 48
dataset = dataset.with_options(options)
return dataset
class ImageNetInput(ImageNetTFExampleInput):
"""Generates ImageNet input_fn from a series of TFRecord files.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
"""
def __init__(self,
is_training,
use_bfloat16,
transpose_input,
data_dir,
image_size=224,
num_parallel_calls=64,
cache=False,
num_label_classes=1000,
include_background_label=False,
augment_name=None,
mixup_alpha=0.0,
randaug_num_layers=None,
randaug_magnitude=None,
resize_method=None,
holdout_shards=None):
"""Create an input from TFRecord files.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
data_dir: `str` for the directory of the training and validation data;
if 'null' (the literal string 'null') or implicitly False
then construct a null pipeline, consisting of empty images
and blank labels.
image_size: `int` for image size (both width and height).
num_parallel_calls: concurrency level to use when reading data from disk.
cache: if true, fill the dataset by repeating from its cache.
num_label_classes: number of label classes. Default to 1000 for ImageNet.
include_background_label: if true, label #0 is reserved for background.
augment_name: `string` that is the name of the augmentation method
to apply to the image. `autoaugment` if AutoAugment is to be used or
`randaugment` if RandAugment is to be used. If the value is `None` no
no augmentation method will be applied applied. See autoaugment.py
for more details.
mixup_alpha: float to control the strength of Mixup regularization, set
to 0.0 to disable.
randaug_num_layers: 'int', if RandAug is used, what should the number of
layers be. See autoaugment.py for detailed description.
randaug_magnitude: 'int', if RandAug is used, what should the magnitude
be. See autoaugment.py for detailed description.
resize_method: If None, use bicubic in default.
holdout_shards: number of holdout training shards for validation.
"""
super(ImageNetInput, self).__init__(
is_training=is_training,
image_size=image_size,
use_bfloat16=use_bfloat16,
transpose_input=transpose_input,
num_label_classes=num_label_classes,
include_background_label=include_background_label,
augment_name=augment_name,
mixup_alpha=mixup_alpha,
randaug_num_layers=randaug_num_layers,
randaug_magnitude=randaug_magnitude)
self.data_dir = data_dir
if self.data_dir == 'null' or not self.data_dir:
self.data_dir = None
self.num_parallel_calls = num_parallel_calls
self.cache = cache
self.holdout_shards = holdout_shards
def _get_null_input(self, data):
"""Returns a null image (all black pixels).
Args:
data: element of a dataset, ignored in this method, since it produces
the same null image regardless of the element.
Returns:
a tensor representing a null image.
"""
del data # Unused since output is constant regardless of input
return tf.zeros([self.image_size, self.image_size, 3], tf.bfloat16
if self.use_bfloat16 else tf.float32)
def dataset_parser(self, value):
"""See base class."""
if not self.data_dir:
return value, tf.constant(0., tf.float32, (1000,))
return super(ImageNetInput, self).dataset_parser(value)
def make_source_dataset(self, index, num_hosts):
"""See base class."""
if not self.data_dir:
logging.info('Undefined data_dir implies null input')
return tf.data.Dataset.range(1).repeat().map(self._get_null_input)
if self.holdout_shards:
if self.is_training:
filenames = [
os.path.join(self.data_dir, 'train-%05d-of-01024' % i)
for i in range(self.holdout_shards, 1024)
]
else:
filenames = [
os.path.join(self.data_dir, 'train-%05d-of-01024' % i)
for i in range(0, self.holdout_shards)
]
for f in filenames[:10]:
logging.info('datafiles: %s', f)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
else:
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
logging.info('datafiles: %s', file_pattern)
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=False)
# For multi-host training, we want each hosts to always process the same
# subset of files. Each host only sees a subset of the entire dataset,
# allowing us to cache larger datasets in memory.
dataset = dataset.shard(num_hosts, index)
if self.is_training and not self.cache:
dataset = dataset.repeat()
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
# Read the data from disk in parallel
dataset = dataset.interleave(
fetch_dataset, cycle_length=self.num_parallel_calls,
num_parallel_calls=self.num_parallel_calls, deterministic=False)
if self.cache:
dataset = dataset.cache().shuffle(1024 * 16).repeat()
else:
dataset = dataset.shuffle(1024)
return dataset
# Defines a selection of data from a Cloud Bigtable.
BigtableSelection = collections.namedtuple('BigtableSelection', [
'project', 'instance', 'table', 'prefix', 'column_family',
'column_qualifier'
])
class ImageNetBigtableInput(ImageNetTFExampleInput):
"""Generates ImageNet input_fn from a Bigtable for training or evaluation.
"""
def __init__(self,
is_training,
use_bfloat16,
transpose_input,
selection,
augment_name=None,
num_label_classes=1000,
include_background_label=False,
mixup_alpha=0.0,
randaug_num_layers=None,
randaug_magnitude=None,
resize_method=None):
"""Constructs an ImageNet input from a BigtableSelection.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
selection: a BigtableSelection specifying a part of a Bigtable.
augment_name: `string` that is the name of the augmentation method
to apply to the image. `autoaugment` if AutoAugment is to be used or
`randaugment` if RandAugment is to be used. If the value is `None` no
no augmentation method will be applied applied. See autoaugment.py
for more details.
num_label_classes: number of label classes. Default to 1000 for ImageNet.
include_background_label: if true, label #0 is reserved for background.
mixup_alpha: float to control the strength of Mixup regularization, set
to 0.0 to disable.
randaug_num_layers: 'int', if RandAug is used, what should the number of
layers be. See autoaugment.py for detailed description.
randaug_magnitude: 'int', if RandAug is used, what should the magnitude
be. See autoaugment.py for detailed description.s
resize_method: if None, use bicubic.
"""
super(ImageNetBigtableInput, self).__init__(
is_training=is_training,
use_bfloat16=use_bfloat16,
transpose_input=transpose_input,
num_label_classes=num_label_classes,
include_background_label=include_background_label,
augment_name=augment_name,
mixup_alpha=mixup_alpha,
randaug_num_layers=randaug_num_layers,
randaug_magnitude=randaug_magnitude,
resize_method=resize_method)
self.selection = selection
def make_source_dataset(self, index, num_hosts):
"""See base class."""
try:
from tensorflow.contrib.cloud import BigtableClient # pylint: disable=g-import-not-at-top
except ImportError as e:
logging.exception('Bigtable is not supported in TensorFlow 2.x.')
raise e
data = self.selection
client = BigtableClient(data.project, data.instance)
table = client.table(data.table)
ds = table.parallel_scan_prefix(data.prefix,
columns=[(data.column_family,
data.column_qualifier)])
# The Bigtable datasets will have the shape (row_key, data)
ds_data = ds.map(lambda index, data: data)
if self.is_training:
ds_data = ds_data.repeat()
return ds_data
| apache-2.0 | -1,967,852,731,716,983,000 | 38.072549 | 96 | 0.65755 | false | 3.896558 | false | false | false |
pacoqueen/cican | formularios/peticiones_sin_asignar.py | 1 | 11733 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2008-2010 Francisco José Rodríguez Bogado #
# <[email protected]> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
'''
Created on 24/02/2011
@author: bogado
Peticiones sin laborante asignado. Desde aquí se podrán asignar laborantes
para recoger las muestras e imprimir un listado de ruta de cada uno.
'''
import pygtk
pygtk.require('2.0')
import gtk
import sys, os, datetime
if os.path.realpath(os.path.curdir).split(os.path.sep)[-1] == "formularios":
os.chdir("..")
sys.path.append(".")
from framework import pclases
from ventana_consulta import VentanaConsulta
from ventana_generica import _abrir_en_ventana_nueva as abrir, GALACTUS
import utils, utils.mapa
class PeticionesSinAsignar(VentanaConsulta):
def __init__(self, objeto = None, usuario = None, run = True,
fecha = datetime.date.today()):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
self.nombre_fichero_ventana = os.path.split(__file__)[-1]
__clase = pclases.Peticion
self.__usuario = usuario
if objeto:
fecha = self.objeto.fechaRecogida
VentanaConsulta.__init__(self,
usuario = usuario,
clase = __clase,
run = False,
ventana_marco="peticiones_sin_asignar.glade")
self.build_tabla_laborantes()
self.build_tabla_peticiones_sin_asignar()
self.build_tabla_peticiones_asignadas()
self.wids['b_asignar'].connect("clicked", self.asignar)
self.wids['calendario'].connect('month-changed',
marcar_dias_pendientes)
self.actualizar_ventana()
self.wids['calendario'].connect('day-selected',self.actualizar_ventana)
self.wids['calendario'].select_month(fecha.month - 1, fecha.year)
self.wids['calendario'].select_day(fecha.day)
self.mapa = utils.mapa.Mapa()
self.mapa.put_mapa(self.wids["vpaned1"])
sel = self.wids['tv_sin_asignar'].get_selection()
sel.connect("changed", self.actualizar_mapa)
sel = self.wids['tv_asignadas'].get_selection()
sel.connect("changed", self.actualizar_mapa, False)
if run:
gtk.main()
def actualizar_mapa(self, sel, track = True, flag = True):
model, paths = sel.get_selected_rows()
for path in paths:
puid = model[path][-1]
peticion = pclases.getObjetoPUID(puid)
d = peticion.direccion
if not d:
d = peticion.obra.direccion
try:
self.mapa.centrar_mapa(d.lat, d.lon, zoom = 12, track = track,
flag = flag)
except AttributeError, e:
# print e
pass # La obra/peticion no tiene dirección asignada.
def build_tabla_laborantes(self):
cols = (("Nombre", "gobject.TYPE_STRING", False, True, True, None),
("Recogidas asignadas",
"gobject.TYPE_STRING", False, True, False, None),
("PUID", "gobject.TYPE_STRING", False, False, False, None))
utils.ui.preparar_treeview(self.wids['tv_laborantes'], cols)
self.wids['tv_laborantes'].connect("row-activated",
self._abrir_en_ventana_nueva, self.__usuario, GALACTUS, None,
pclases.Empleado)
def build_tabla_peticiones_asignadas(self):
cols = (("Obra", "gobject.TYPE_STRING", False, True, True, None),
("Dirección", "gobject.TYPE_STRING", False, True, False, None),
("Material", "gobject.TYPE_STRING", False, True, False, None),
("PUID", "gobject.TYPE_STRING", False, False, False, None))
utils.ui.preparar_listview(self.wids['tv_sin_asignar'], cols,
multi = True)
self.wids['tv_sin_asignar'].connect("row-activated",
self._abrir_en_ventana_nueva, self.__usuario, GALACTUS, None,
pclases.Peticion)
def build_tabla_peticiones_sin_asignar(self):
cols = (("Obra", "gobject.TYPE_STRING", False, True, True, None),
("Dirección", "gobject.TYPE_STRING", False, True, False, None),
("Material", "gobject.TYPE_STRING", False, True, False, None),
("Laborante", "gobject.TYPE_STRING", False, True, False, None),
("PUID", "gobject.TYPE_STRING", False, False, False, None))
utils.ui.preparar_listview(self.wids['tv_asignadas'], cols)
self.wids['tv_asignadas'].connect("row-activated",
self._abrir_en_ventana_nueva, self.__usuario, GALACTUS, None,
pclases.Peticion)
def _abrir_en_ventana_nueva(self, *args, **kw):
abrir(*args, **kw)
self.actualizar_ventana()
def rellenar_widgets(self):
self.rellenar_tabla_laborantes()
self.rellenar_tablas_peticiones()
def rellenar_tabla_laborantes(self):
model = self.wids['tv_laborantes'].get_model()
model.clear()
padres = {}
for e in pclases.Empleado.buscar_laborantes():
padres[e] = model.append(None, (e.get_info(),
"",
e.get_puid()))
fecha_seleccionada = self.get_fecha_seleccionada()
for p in pclases.Peticion.selectBy(fechaRecogida = fecha_seleccionada):
laborante = p.empleado
try:
padre = padres[laborante]
except KeyError:
# El laborante ya no lo es, así que no lo listo.
pass
else:
model.append(padre, ("", p.get_info(), p.get_puid()))
try:
model[padre][1] = utils.numero.float2str(
utils.numero._float(model[padre][1]) + 1, precision=0)
except (TypeError, ValueError):
model[padre][1] = "1"
def get_fecha_seleccionada(self):
"""
Devuelve la fecha del gtk.Calendar pero como un datetime.
"""
y, m, d = self.wids['calendario'].get_date()
fecha = datetime.date(y, m+1, d) # Mes empieza en 0 en gtk.Calendar
return fecha
def rellenar_tablas_peticiones(self):
fecha_seleccionada = self.get_fecha_seleccionada()
self.wids['tv_sin_asignar'].get_model().clear()
self.wids['tv_asignadas'].get_model().clear()
for p in pclases.Peticion.selectBy(fechaRecogida = fecha_seleccionada):
fila = ((p.obra and p.obra.get_info() or "",
p.direccion and p.direccion.get_direccion_completa()
or "",
p.material and p.material.get_info() or ""))
if not p.empleado: # No asignada
model = self.wids['tv_sin_asignar'].get_model()
else:
model = self.wids['tv_asignadas'].get_model()
fila += (p.empleado.get_nombre_completo(), )
fila += (p.get_puid(), )
model.append(fila)
def asignar(self, boton):
model, iter = self.wids['tv_laborantes'].get_selection().get_selected()
if not iter:
utils.ui.dialogo_info(titulo = "SELECCIONE UN LABORANTE",
texto = "Debe seleccionar un laborante al que asignar las "
"peticiones de recogida de material.",
padre = self.wids['ventana'])
else:
empleado = pclases.getObjetoPUID(model[iter][-1])
sel = self.wids['tv_sin_asignar'].get_selection()
sel.selected_foreach(self.asiganda, empleado)
self.actualizar_ventana()
def asiganda(self, treemodel, path, iter, laborante):
p = pclases.getObjetoPUID(treemodel[iter][-1])
p.empleado = laborante
p.sync()
def imprimir(self, boton):
"""
Imprime una hoja de ruta por cada laborante. Si se ha seleccionado
alguno, entonces solo imprime su hoja de ruta.
"""
model, iter = self.wids['tv_laborantes'].get_selection().get_selected()
if not iter: # Imprimir para todos:
laborantes = []
for fila in model:
puid = fila[-1]
laborante = pclases.getObjetoPUID(puid)
laborantes.append(laborante)
else:
puid = model[iter][-1]
laborante = pclases.getObjetoPUID(puid)
laborantes = [laborante]
dia = self.get_fecha_seleccionada()
for laborante in laborantes:
abrir_hoja_de_ruta(laborante, dia)
def abrir_hoja_de_ruta(laborante, dia):
"""
Genera y abre un PDF con la hoja de ruta del laborante para el día
recibido.
"""
from reports import hoja_de_ruta
from utils.informes import abrir_pdf
peticiones = laborante.get_peticiones(dia)
pdf_hoja_ruta = hoja_de_ruta.hoja_ruta(laborante, peticiones)
abrir_pdf(pdf_hoja_ruta)
def marcar_dias_pendientes(calendario):
"""
Resalta los días en los que quedan peticiones pendientes de asignar
en el mes activo.
"""
calendario.clear_marks()
fecha_actual = calendario.get_date()
uno_del_mes = datetime.date(fecha_actual[0],
fecha_actual[1] + 1,
1)
mes_siguiente = fecha_actual[1] + 2
if mes_siguiente > 12:
anno = fecha_actual[0] + 1
mes_siguiente = mes_siguiente % 12
else:
anno = fecha_actual[0]
uno_del_siguiente = datetime.date(anno, mes_siguiente, 1)
for p in pclases.Peticion.select(pclases.AND(
pclases.Peticion.q.empleadoID==None,
pclases.Peticion.q.fechaRecogida >= uno_del_mes,
pclases.Peticion.q.fechaRecogida < uno_del_siguiente)):
calendario.mark_day(p.fechaRecogida.day)
def main():
from formularios.options_ventana import parse_options
params, opt_params = parse_options()
ventana = PeticionesSinAsignar(*params, **opt_params)
if __name__ == "__main__":
main()
| gpl-3.0 | 4,794,027,787,422,119,000 | 43.071429 | 80 | 0.544741 | false | 3.383261 | false | false | false |
voutilad/courtlistener | cl/visualizations/admin.py | 2 | 1600 | from cl.visualizations.models import SCOTUSMap, JSONVersion, Referer
from django.contrib import admin
class JSONVersionAdmin(admin.ModelAdmin):
readonly_fields = (
'date_created',
'date_modified',
)
raw_id_fields = (
'map',
)
class JSONVersionInline(admin.StackedInline):
model = JSONVersion
extra = 1
class RefererAdmin(admin.ModelAdmin):
readonly_fields = (
'date_created',
'date_modified',
)
raw_id_fields = (
'map',
)
list_filter = (
'display',
)
list_display = (
'__unicode__',
'display',
'date_created',
'date_modified',
)
search_fields = (
'id',
'url',
'page_title',
)
class RefererInline(admin.StackedInline):
model = Referer
extra = 1
class SCOTUSMapAdmin(admin.ModelAdmin):
inlines = (
JSONVersionInline,
RefererInline,
)
raw_id_fields = (
'clusters',
'cluster_start',
'cluster_end',
)
readonly_fields = (
'date_created',
'date_modified',
'generation_time',
)
list_display = (
'__unicode__',
'user_id',
'date_created',
'date_modified',
'view_count',
'published',
'deleted',
)
list_filter = (
'published',
'deleted',
)
search_fields = (
'id',
'title',
)
admin.site.register(SCOTUSMap, SCOTUSMapAdmin)
admin.site.register(JSONVersion, JSONVersionAdmin)
admin.site.register(Referer, RefererAdmin)
| agpl-3.0 | 6,831,527,185,631,235,000 | 17.823529 | 68 | 0.54 | false | 3.764706 | false | false | false |
noba3/KoTos | addons/script.module.youtube.dl/lib/youtube_dl/extractor/chaturbate.py | 13 | 1635 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class ChaturbateIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?chaturbate\.com/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.chaturbate.com/siswet19/',
'info_dict': {
'id': 'siswet19',
'ext': 'mp4',
'title': 're:^siswet19 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'age_limit': 18,
'is_live': True,
},
'params': {
'skip_download': True,
}
}, {
'url': 'https://en.chaturbate.com/siswet19/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m3u8_url = self._search_regex(
r'src=(["\'])(?P<url>http.+?\.m3u8.*?)\1', webpage,
'playlist', default=None, group='url')
if not m3u8_url:
error = self._search_regex(
r'<span[^>]+class=(["\'])desc_span\1[^>]*>(?P<error>[^<]+)</span>',
webpage, 'error', group='error')
raise ExtractorError(error, expected=True)
formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
return {
'id': video_id,
'title': self._live_title(video_id),
'thumbnail': 'https://cdn-s.highwebmedia.com/uHK3McUtGCG3SMFcd4ZJsRv8/roomimage/%s.jpg' % video_id,
'age_limit': self._rta_search(webpage),
'is_live': True,
'formats': formats,
}
| gpl-2.0 | -8,301,800,932,186,065,000 | 31.7 | 111 | 0.499694 | false | 3.199609 | false | false | false |
m-ober/byceps | byceps/services/board/aggregation_service.py | 1 | 1772 | """
byceps.services.board.aggregation_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from ...database import db
from .models.category import Category as DbCategory
from .models.posting import Posting as DbPosting
from .models.topic import Topic as DbTopic
def aggregate_category(category: DbCategory) -> None:
"""Update the category's count and latest fields."""
topic_count = DbTopic.query.for_category(category.id).without_hidden().count()
posting_query = DbPosting.query \
.without_hidden() \
.join(DbTopic) \
.filter_by(category=category)
posting_count = posting_query.count()
latest_posting = posting_query \
.filter(DbTopic.hidden == False) \
.latest_to_earliest() \
.first()
category.topic_count = topic_count
category.posting_count = posting_count
category.last_posting_updated_at = latest_posting.created_at \
if latest_posting else None
category.last_posting_updated_by_id = latest_posting.creator_id \
if latest_posting else None
db.session.commit()
def aggregate_topic(topic: DbTopic) -> None:
"""Update the topic's count and latest fields."""
posting_query = DbPosting.query.for_topic(topic.id).without_hidden()
posting_count = posting_query.count()
latest_posting = posting_query.latest_to_earliest().first()
topic.posting_count = posting_count
if latest_posting:
topic.last_updated_at = latest_posting.created_at
topic.last_updated_by_id = latest_posting.creator_id
db.session.commit()
aggregate_category(topic.category)
| bsd-3-clause | 760,638,886,676,078,000 | 30.087719 | 82 | 0.65237 | false | 3.964206 | true | false | false |
gnieboer/gnuradio | grc/python/Block.py | 4 | 7805 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from collections import defaultdict
from .. base.Block import Block as _Block
from .. gui.Block import Block as _GUIBlock
from . FlowGraph import _variable_matcher
import extract_docs
class Block(_Block, _GUIBlock):
def is_virtual_sink(self): return self.get_key() == 'virtual_sink'
def is_virtual_source(self): return self.get_key() == 'virtual_source'
##for make source to keep track of indexes
_source_count = 0
##for make sink to keep track of indexes
_sink_count = 0
def __init__(self, flow_graph, n):
"""
Make a new block from nested data.
Args:
flow: graph the parent element
n: the nested odict
Returns:
block a new block
"""
#grab the data
self._doc = n.find('doc') or ''
self._imports = map(lambda i: i.strip(), n.findall('import'))
self._make = n.find('make')
self._var_make = n.find('var_make')
self._checks = n.findall('check')
self._callbacks = n.findall('callback')
self._throttle = n.find('throttle') or ''
self._bus_structure_source = n.find('bus_structure_source') or ''
self._bus_structure_sink = n.find('bus_structure_sink') or ''
#build the block
_Block.__init__(
self,
flow_graph=flow_graph,
n=n,
)
_GUIBlock.__init__(self)
def get_bus_structure(self, direction):
if direction == 'source':
bus_structure = self._bus_structure_source;
else:
bus_structure = self._bus_structure_sink;
bus_structure = self.resolve_dependencies(bus_structure);
if not bus_structure: return ''
try:
clean_bus_structure = self.get_parent().evaluate(bus_structure)
return clean_bus_structure
except: return ''
def throttle(self): return bool(self._throttle)
def validate(self):
"""
Validate this block.
Call the base class validate.
Evaluate the checks: each check must evaluate to True.
"""
_Block.validate(self)
#evaluate the checks
for check in self._checks:
check_res = self.resolve_dependencies(check)
try:
if not self.get_parent().evaluate(check_res):
self.add_error_message('Check "%s" failed.'%check)
except: self.add_error_message('Check "%s" did not evaluate.'%check)
# for variables check the value (only if var_value is used
if _variable_matcher.match(self.get_key()) and self._var_value != '$value':
value = self._var_value
try:
value = self.get_var_value()
self.get_parent().evaluate(value)
except Exception as err:
self.add_error_message('Value "%s" cannot be evaluated:\n%s' % (value, err))
# check if this is a GUI block and matches the selected generate option
current_generate_option = self.get_parent().get_option('generate_options')
for label, option in (('WX GUI', 'wx_gui'), ('QT GUI', 'qt_gui')):
if self.get_name().startswith(label) and current_generate_option != option:
self.add_error_message("Can't generate this block in mode " + repr(option))
def rewrite(self):
"""
Add and remove ports to adjust for the nports.
"""
_Block.rewrite(self)
# adjust nports
for ports in (self.get_sources(), self.get_sinks()):
for i, master_port in enumerate(ports):
nports = master_port.get_nports() or 1
num_ports = 1 + len(master_port.get_clones())
if not nports and num_ports == 1: # not a master port and no left-over clones
continue
# remove excess cloned ports
for port in master_port.get_clones()[nports-1:]:
# remove excess connections
for connection in port.get_connections():
self.get_parent().remove_element(connection)
master_port.remove_clone(port)
ports.remove(port)
# add more cloned ports
for i in range(num_ports, nports):
port = master_port.add_clone()
ports.insert(ports.index(master_port) + i, port)
self.back_ofthe_bus(ports)
# renumber non-message/-msg ports
domain_specific_port_index = defaultdict(int)
for port in filter(lambda p: p.get_key().isdigit(), ports):
domain = port.get_domain()
port._key = str(domain_specific_port_index[domain])
domain_specific_port_index[domain] += 1
def port_controller_modify(self, direction):
"""
Change the port controller.
Args:
direction: +1 or -1
Returns:
true for change
"""
changed = False
#concat the nports string from the private nports settings of all ports
nports_str = ' '.join([port._nports for port in self.get_ports()])
#modify all params whose keys appear in the nports string
for param in self.get_params():
if param.is_enum() or param.get_key() not in nports_str: continue
#try to increment the port controller by direction
try:
value = param.get_evaluated()
value = value + direction
if 0 < value:
param.set_value(value)
changed = True
except: pass
return changed
def get_doc(self):
doc = self._doc.strip('\n').replace('\\\n', '')
#merge custom doc with doxygen docs
return '\n'.join([doc, extract_docs.extract(self.get_key())]).strip('\n')
def get_category(self):
return _Block.get_category(self)
def get_imports(self):
"""
Resolve all import statements.
Split each import statement at newlines.
Combine all import statments into a list.
Filter empty imports.
Returns:
a list of import statements
"""
return filter(lambda i: i, sum(map(lambda i: self.resolve_dependencies(i).split('\n'), self._imports), []))
def get_make(self): return self.resolve_dependencies(self._make)
def get_var_make(self): return self.resolve_dependencies(self._var_make)
def get_var_value(self): return self.resolve_dependencies(self._var_value)
def get_callbacks(self):
"""
Get a list of function callbacks for this block.
Returns:
a list of strings
"""
def make_callback(callback):
callback = self.resolve_dependencies(callback)
if 'self.' in callback: return callback
return 'self.%s.%s'%(self.get_id(), callback)
return map(make_callback, self._callbacks)
| gpl-3.0 | -1,592,589,407,358,603,500 | 37.073171 | 115 | 0.588469 | false | 4.241848 | false | false | false |
Azure/azure-sdk-for-python | sdk/eventhub/azure-eventhub/samples/sync_samples/recv_with_checkpoint_by_time_interval.py | 1 | 3023 | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
An example to show receiving events from an Event Hub with checkpoint store doing checkpoint
by every fixed time interval.
In the `receive` method of `EventHubConsumerClient`:
If no partition id is specified, the checkpoint_store are used for load-balance and checkpoint.
If partition id is specified, the checkpoint_store can only be used for checkpoint.
"""
import os
import time
from azure.eventhub import EventHubConsumerClient
from azure.eventhub.extensions.checkpointstoreblob import BlobCheckpointStore
CONNECTION_STR = os.environ["EVENT_HUB_CONN_STR"]
EVENTHUB_NAME = os.environ['EVENT_HUB_NAME']
STORAGE_CONNECTION_STR = os.environ["AZURE_STORAGE_CONN_STR"]
BLOB_CONTAINER_NAME = "your-blob-container-name" # Please make sure the blob container resource exists.
partition_last_checkpoint_time = dict()
checkpoint_time_interval = 15
def on_event(partition_context, event):
# Put your code here.
# Avoid time-consuming operations.
p_id = partition_context.partition_id
print("Received event from partition: {}".format(p_id))
now_time = time.time()
p_id = partition_context.partition_id
last_checkpoint_time = partition_last_checkpoint_time.get(p_id)
if last_checkpoint_time is None or (now_time - last_checkpoint_time) >= checkpoint_time_interval:
partition_context.update_checkpoint(event)
partition_last_checkpoint_time[p_id] = now_time
if __name__ == '__main__':
checkpoint_store = BlobCheckpointStore.from_connection_string(STORAGE_CONNECTION_STR, BLOB_CONTAINER_NAME)
consumer_client = EventHubConsumerClient.from_connection_string(
conn_str=CONNECTION_STR,
consumer_group='$Default',
eventhub_name=EVENTHUB_NAME,
checkpoint_store=checkpoint_store, # For load-balancing and checkpoint. Leave None for no load-balancing.
)
try:
with consumer_client:
"""
Without specified partition_id, the receive will try to receive events from all partitions and if provided
with a checkpoint store, the client will load-balance partition assignment with other EventHubConsumerClient
instances which also try to receive events from all partitions and use the same storage resource.
"""
consumer_client.receive(
on_event=on_event,
starting_position="-1", # "-1" is from the beginning of the partition.
)
# With specified partition_id, load-balance will be disabled, for example:
# client.receive(on_event=on_event, partition_id='0')
except KeyboardInterrupt:
print('Stopped receiving.')
| mit | -7,757,369,159,773,784,000 | 44.80303 | 120 | 0.669864 | false | 4.34964 | false | false | false |
miketamis/CouchPotatoServer | libs/apscheduler/threadpool.py | 138 | 3982 | """
Generic thread pool class. Modeled after Java's ThreadPoolExecutor.
Please note that this ThreadPool does *not* fully implement the PEP 3148
ThreadPool!
"""
from threading import Thread, Lock, currentThread
from weakref import ref
import logging
import atexit
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
logger = logging.getLogger(__name__)
_threadpools = set()
# Worker threads are daemonic in order to let the interpreter exit without
# an explicit shutdown of the thread pool. The following trick is necessary
# to allow worker threads to finish cleanly.
def _shutdown_all():
for pool_ref in tuple(_threadpools):
pool = pool_ref()
if pool:
pool.shutdown()
atexit.register(_shutdown_all)
class ThreadPool(object):
def __init__(self, core_threads=0, max_threads=20, keepalive=1):
"""
:param core_threads: maximum number of persistent threads in the pool
:param max_threads: maximum number of total threads in the pool
:param thread_class: callable that creates a Thread object
:param keepalive: seconds to keep non-core worker threads waiting
for new tasks
"""
self.core_threads = core_threads
self.max_threads = max(max_threads, core_threads, 1)
self.keepalive = keepalive
self._queue = Queue()
self._threads_lock = Lock()
self._threads = set()
self._shutdown = False
_threadpools.add(ref(self))
logger.info('Started thread pool with %d core threads and %s maximum '
'threads', core_threads, max_threads or 'unlimited')
def _adjust_threadcount(self):
self._threads_lock.acquire()
try:
if self.num_threads < self.max_threads:
self._add_thread(self.num_threads < self.core_threads)
finally:
self._threads_lock.release()
def _add_thread(self, core):
t = Thread(target=self._run_jobs, args=(core,))
t.setDaemon(True)
t.start()
self._threads.add(t)
def _run_jobs(self, core):
logger.debug('Started worker thread')
block = True
timeout = None
if not core:
block = self.keepalive > 0
timeout = self.keepalive
while True:
try:
func, args, kwargs = self._queue.get(block, timeout)
except Empty:
break
if self._shutdown:
break
try:
func(*args, **kwargs)
except:
logger.exception('Error in worker thread')
self._threads_lock.acquire()
self._threads.remove(currentThread())
self._threads_lock.release()
logger.debug('Exiting worker thread')
@property
def num_threads(self):
return len(self._threads)
def submit(self, func, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new tasks after shutdown')
self._queue.put((func, args, kwargs))
self._adjust_threadcount()
def shutdown(self, wait=True):
if self._shutdown:
return
logging.info('Shutting down thread pool')
self._shutdown = True
_threadpools.remove(ref(self))
self._threads_lock.acquire()
for _ in range(self.num_threads):
self._queue.put((None, None, None))
self._threads_lock.release()
if wait:
self._threads_lock.acquire()
threads = tuple(self._threads)
self._threads_lock.release()
for thread in threads:
thread.join()
def __repr__(self):
if self.max_threads:
threadcount = '%d/%d' % (self.num_threads, self.max_threads)
else:
threadcount = '%d' % self.num_threads
return '<ThreadPool at %x; threads=%s>' % (id(self), threadcount)
| gpl-3.0 | 7,794,484,241,993,857,000 | 28.93985 | 78 | 0.59342 | false | 4.240682 | false | false | false |
slongfield/simpleStack | randProgram.py | 1 | 2134 | #!/usr/bin/python3
"""randProgram generates random simpleStack programs, and evaluates them.
This is basically a really simple fuzzer for testing that no programs "go
wrong", where "go wrong" means "throw an exception".
Allows one exception to be thrown:
MemoryError if the stack grows beyond 10,000 elements. These programs just
tend to be randomly generated fork-bombs, which while technically valid,
aren't interesting, and don't violate the spirit of simpleStack.
Assumes that a 100 line program will reach its final state in 10,000 steps,
which is a completely invalid assumption.
"""
import random
import string
import simpleStack
_DEBUG = False
_SYMBOLS = ["PRINT", "DUP", "INV", "--", "++", "SUB", "MUL", "MOD", "SWP", "JNZ",
"GET", "PUT"]
def gen_program(min_size, max_size):
"""gen_program generates a random program."""
size = random.randrange(min_size, max_size)
prog = []
for _ in range(size):
# Randomly pick if we add a program symbol or random word.
if random.choice([True, False]):
prog.append(random.choice(_SYMBOLS))
else:
wordlen = random.randrange(0, 10)
prog.append(''.join(random.choice(string.ascii_letters +
string.digits) for _ in range(wordlen)))
return prog
if __name__ == "__main__":
# Generate 10000 programs, or 1 in debug mode
mem_errors = 0
num_runs = 1 if _DEBUG else 10000
for _ in range(num_runs):
prog = gen_program(10, 100)
# Run for 100,000 steps. If in debug mode, actually print, otherwise,
# don't.
if _DEBUG:
print("\n".join(prog))
def fake_print(_, end=None):
"""Fake print that does nothing"""
del end
try:
simpleStack.run_simple_stack(prog,
max_steps=10000,
printFun=print if _DEBUG else fake_print)
except MemoryError:
mem_errors += 1
print("Ran {} runs, with {} memory errors".format(num_runs, mem_errors))
| apache-2.0 | -6,438,950,789,896,655,000 | 33.983607 | 86 | 0.59747 | false | 3.988785 | false | false | false |
JeffBain/wedding-website | wedding_site/rsvp/views.py | 1 | 1512 | import json
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import TemplateView, View
from rsvp.models import Invitation, Guest, Meal, ReplyError
class RSVPView(TemplateView):
template_name='rsvp/rsvp_page.html'
def get_context_data(self):
context = super().get_context_data()
context['meal_choices'] = json.dumps([meal.toJSON() for meal in
Meal.objects.all()])
return context
class InvitationView(View):
@method_decorator(ensure_csrf_cookie)
def get(self, request, invite_code):
invite = get_object_or_404(Invitation, code=invite_code)
data = invite.toJSON()
return HttpResponse(json.dumps(data),
content_type='application/json')
@method_decorator(ensure_csrf_cookie)
def post(self, request, invite_code):
invite = get_object_or_404(Invitation, code=invite_code)
body_unicode = request.body.decode('utf-8')
data = json.loads(body_unicode)
try:
invite.handle_reply(data)
except ReplyError as e:
data = json.dumps(e.errors)
return HttpResponse(data,
status=400,
content_type='application/json')
else:
return HttpResponse()
| mit | 3,479,314,515,065,560,600 | 33.363636 | 71 | 0.623677 | false | 4.075472 | false | false | false |
cloew/KaoFlaskAuth | kao_flask_auth/auth_route_decorator.py | 1 | 1323 | from .invalid_auth import InvalidAuth
from .token_builder import VerifyToken, ExtractToken
from flask import current_app as app, request, jsonify
from functools import wraps
def authenticate(error):
resp = jsonify({'code':error.code, 'description':error.description})
resp.status_code = 401
return resp
class AuthRouteDecorator:
""" Helper to provide a decorator to require authorization for a route """
def __init__(self, UserCls):
""" Initialize with the UserProxy Class to use """
self.UserCls = UserCls
def findUser(self):
""" Find the User for the current request """
auth = request.headers.get('Authorization', None)
token = ExtractToken(auth)
try:
data = VerifyToken(token, app.config['SECRET_KEY'])
user = self.UserCls.query.get(data['id'])
return user
except InvalidAuth as e:
return authenticate(e)
def requires_auth(self, f):
@wraps(f)
def decorated(*args, **kwargs):
try:
user = self.findUser()
kwargs['user'] = user
return f(*args, **kwargs)
except InvalidAuth as e:
return authenticate(e)
return decorated | mit | -7,228,420,097,349,261,000 | 31.974359 | 78 | 0.582011 | false | 4.642105 | false | false | false |
detiber/ansible | lib/ansible/modules/cloud/ovirt/ovirt_nics.py | 21 | 8344 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_nics
short_description: Module to manage network interfaces of Virtual Machines in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage network interfaces of Virtual Machines in oVirt."
options:
name:
description:
- "Name of the network interface to manage."
required: true
vm:
description:
- "Name of the Virtual Machine to manage."
required: true
state:
description:
- "Should the Virtual Machine NIC be present/absent/plugged/unplugged."
choices: ['present', 'absent', 'plugged', 'unplugged']
default: present
network:
description:
- "Logical network to which the VM network interface should use,
by default Empty network is used if network is not specified."
profile:
description:
- "Virtual network interface profile to be attached to VM network interface."
interface:
description:
- "Type of the network interface."
choices: ['virtio', 'e1000', 'rtl8139', 'pci_passthrough', 'rtl8139_virtio', 'spapr_vlan']
default: 'virtio'
mac_address:
description:
- "Custom MAC address of the network interface, by default it's obtained from MAC pool."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add NIC to VM
- ovirt_nics:
state: present
vm: myvm
name: mynic
interface: e1000
mac_address: 00:1a:4a:16:01:56
profile: ovirtmgmt
network: ovirtmgmt
# Plug NIC to VM
- ovirt_nics:
state: plugged
vm: myvm
name: mynic
# Unplug NIC from VM
- ovirt_nics:
state: unplugged
vm: myvm
name: mynic
# Remove NIC from VM
- ovirt_nics:
state: absent
vm: myvm
name: mynic
'''
RETURN = '''
id:
description: ID of the network interface which is managed
returned: On success if network interface is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
nic:
description: "Dictionary of all the network interface attributes. Network interface attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/nic."
returned: On success if network interface is found.
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
search_by_name,
)
class VmNicsModule(BaseModule):
def __init__(self, *args, **kwargs):
super(VmNicsModule, self).__init__(*args, **kwargs)
self.vnic_id = None
@property
def vnic_id(self):
return self._vnic_id
@vnic_id.setter
def vnic_id(self, vnic_id):
self._vnic_id = vnic_id
def build_entity(self):
return otypes.Nic(
name=self._module.params.get('name'),
interface=otypes.NicInterface(
self._module.params.get('interface')
) if self._module.params.get('interface') else None,
vnic_profile=otypes.VnicProfile(
id=self.vnic_id,
) if self.vnic_id else None,
mac=otypes.Mac(
address=self._module.params.get('mac_address')
) if self._module.params.get('mac_address') else None,
)
def update_check(self, entity):
return (
equal(self._module.params.get('interface'), str(entity.interface)) and
equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile)) and
equal(self._module.params.get('mac_address'), entity.mac.address)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'plugged', 'unplugged'],
default='present'
),
vm=dict(required=True),
name=dict(required=True),
interface=dict(default=None),
profile=dict(default=None),
network=dict(default=None),
mac_address=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
# Locate the service that manages the virtual machines and use it to
# search for the NIC:
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
# Locate the VM, where we will manage NICs:
vm_name = module.params.get('vm')
vm = search_by_name(vms_service, vm_name)
if vm is None:
raise Exception("VM '%s' was not found." % vm_name)
# Locate the service that manages the virtual machines NICs:
vm_service = vms_service.vm_service(vm.id)
nics_service = vm_service.nics_service()
vmnics_module = VmNicsModule(
connection=connection,
module=module,
service=nics_service,
)
# Find vNIC id of the network interface (if any):
profile = module.params.get('profile')
if profile and module.params['network']:
cluster_name = get_link_name(connection, vm.cluster)
dcs_service = connection.system_service().data_centers_service()
dc = dcs_service.list(search='Clusters.name=%s' % cluster_name)[0]
networks_service = dcs_service.service(dc.id).networks_service()
network = next(
(n for n in networks_service.list()
if n.name == module.params['network']),
None
)
if network is None:
raise Exception(
"Network '%s' was not found in datacenter '%s'." % (
module.params['network'],
dc.name
)
)
for vnic in connection.system_service().vnic_profiles_service().list():
if vnic.name == profile and vnic.network.id == network.id:
vmnics_module.vnic_id = vnic.id
# Handle appropriate action:
state = module.params['state']
if state == 'present':
ret = vmnics_module.create()
elif state == 'absent':
ret = vmnics_module.remove()
elif state == 'plugged':
vmnics_module.create()
ret = vmnics_module.action(
action='activate',
action_condition=lambda nic: not nic.plugged,
wait_condition=lambda nic: nic.plugged,
)
elif state == 'unplugged':
vmnics_module.create()
ret = vmnics_module.action(
action='deactivate',
action_condition=lambda nic: nic.plugged,
wait_condition=lambda nic: not nic.plugged,
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 | -2,644,823,634,523,632,600 | 31.216216 | 134 | 0.604147 | false | 3.998083 | false | false | false |
wwj718/xigua | today/models.py | 4 | 2015 | #coding=utf-8
from time import time
from urlparse import urlparse
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from mezzanine.core.models import Displayable, Ownable
from mezzanine.generic.models import Rating
from mezzanine.generic.fields import RatingField, CommentsField
class Link(Displayable, Ownable):
c=(('hc','清真餐厅'),('yc','一餐厅'),('ec','二餐厅'),('sc','三餐厅'),('jby','聚博园'),('other','未分类'))
canteen=models.CharField(max_length=20,choices=c,default='ec')
link = models.URLField(blank=True) #这个根本不需要,不要删除吧,免得麻烦,只要不让它出现就行,完成
rating = RatingField()
comments = CommentsField()
solved = models.BooleanField(default=False)
@models.permalink
def get_absolute_url(self):
return ("link_detail", (), {"slug": self.slug})
@property
def domain(self):
return urlparse(self.link).netloc
class Profile(models.Model):
user = models.OneToOneField("auth.User")
website = models.URLField(blank=True)
bio = models.TextField(blank=True)
karma = models.IntegerField(default=0, editable=False)
def __unicode__(self):
return "%s (%s)" % (self.user, self.karma)
@receiver(post_save, sender=Rating)
def karma(sender, **kwargs):
"""
Each time a rating is saved, check its value and modify the
profile karma for the related object's user accordingly.
Since ratings are either +1/-1, if a rating is being edited,
we can assume that the existing rating is in the other direction,
so we multiply the karma modifier by 2.
"""
rating = kwargs["instance"]
value = int(rating.value)
if not kwargs["created"]:
value *= 2
content_object = rating.content_object
if rating.user != content_object.user:
queryset = Profile.objects.filter(user=content_object.user)
queryset.update(karma=models.F("karma") + value)
| bsd-2-clause | 4,043,699,385,481,912,000 | 32.631579 | 90 | 0.687533 | false | 3.232715 | false | false | false |
mitchellzen/pops | satchmo/projects/skeleton/manage.py | 1 | 1111 | #!/usr/bin/env python
import os.path
import sys
DIRNAME = os.path.dirname(__file__)
# trick to get the two-levels up directory, which for the "simple" project should be the satchmo dir
_parent = lambda x: os.path.normpath(os.path.join(x, '..'))
SATCHMO_DIRNAME = _parent(_parent(DIRNAME))
SATCHMO_APPS = os.path.join(SATCHMO_DIRNAME, 'apps')
if not SATCHMO_APPS in sys.path:
sys.path.append(SATCHMO_APPS)
if not DIRNAME in sys.path:
sys.path.append(DIRNAME)
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
print "sysarv",sys.argv
execute_from_command_line(sys.argv) | bsd-3-clause | 6,294,482,186,318,063,000 | 36.066667 | 299 | 0.718272 | false | 3.286982 | false | false | false |
bendudson/BOUT | tools/tokamak_grids/pyGridGen/aeqdsk.py | 4 | 18365 | #!/usr/bin/env python
import re
import numpy
"""
@brief A-Eqdsk reader class
@version $Id$
Copyright © 2006-2008, Tech-X Corporation, Boulder, CO
See LICENSE file for conditions of use.
The official document describing a-eqdsk files:
http://fusion.gat.com/THEORY/efit/a_eqdsk.html
"""
class Aeqdsk:
def __init__(self):
"""
Constructor
"""
self.data = {}
def openFile(self, filename):
"""
open aeqdsk file and parse its content
"""
fmt_1060 = r'^\s*\*\s*([\w\.\-]+)\s+(\d+)\s+(\d+)\s([\w]+)\s+(\d+)\s+(\d+)\s([\w ]+)\s+\d+\s+\d+\s*$'
fmt_1040 = r'^\s*' + 4*r'([\s\-]\d+\.\d+[Ee][\+\-]\d\d)'
fmt_1041 = r'^' + 4*r'\s+([ \-]\d+)'
lines = open(filename, 'r').readlines()
counter = 0
m = None
while m == None:
line = lines[counter]
m = re.match(fmt_1060, line)
counter += 1
# read (neqdsk,1060) time(jj),jflag(jj),lflag,limloc(jj), mco2v,mco2r,qmflag
if m:
self.data['time'] = float(m.group(1)), 'time ms'
self.data['jflag'] = int(m.group(2)), '0 if error'
self.data['lflag'] = int(m.group(3)), '>0 if error'
self.data['limloc'] = m.group(4), 'IN/OUT/TOP/BOT: limiter inside/outside/top/bot SNT/SNB: single null top/bottom DN: double null'
self.data['mco2v'] = int(m.group(5)), 'number of vertical CO2 density chords'
self.data['mco2r'] = int(m.group(6)), 'number of radial CO2 density chords'
self.data['qmflag'] = m.group(7), 'axial q(0) flag, FIX if constrained and CLC for float'
else:
raise 'Read error at line %d' % (counter-1)
# read (neqdsk,1040) tsaisq(jj),rcencm,bcentr(jj),pasmat(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['tsaisq'] = float(m.group(1)), "total chi2 from magnetic probes, flux loops, Rogowskiand external coils"
self.data['rcencm'] = float(m.group(2)), "major radius in cm for vacuum field BCENTR"
self.data['bcentr'] = float(m.group(3)), "vacuum toroidal magnetic field in Tesla at RCENCM"
self.data['pasmat'] = float(m.group(4)), "measured plasma toroidal current in Ampere"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) cpasma(jj),rout(jj),zout(jj),aout(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['cpasma'] = float(m.group(1)), "fitted plasma toroidal current in Ampere-turn"
self.data['rout'] = float(m.group(2)), "major radius of geometric center in cm"
self.data['zout'] = float(m.group(3)), "Z of geometric center in cm"
self.data['aout'] = float(m.group(4)), "plasma minor radius in cm"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) eout(jj),doutu(jj),doutl(jj),vout(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['eout'] = float(m.group(1)), "Plasma boundary elongation"
self.data['doutu'] = float(m.group(2)), "upper triangularity"
self.data['doutl'] = float(m.group(3)), "lower triangularity"
self.data['vout'] = float(m.group(4)), "plasma volume in cm3"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) rcurrt(jj),zcurrt(jj),qsta(jj),betat(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['rcurrt'] = float(m.group(1)), "major radius in cm of current centroid"
self.data['zcurrt'] = float(m.group(2)), "Z in cm at current centroid"
self.data['qsta'] = float(m.group(3)), "equivalent safety factor q*"
self.data['betat'] = float(m.group(4)), "toroidal b in %"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) betap(jj),ali(jj),oleft(jj),oright(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['betap'] = float(m.group(1)), "poloidal b with normalization average poloidal magnetic BPOLAV defined through Ampere's law"
self.data['ali'] = float(m.group(2)), "li with normalization average poloidal magnetic defined through Ampere's law"
self.data['oleft'] = float(m.group(3)), "plasma inner gap in cm"
self.data['oright'] = float(m.group(4)), "plasma outer gap in cm"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) otop(jj),obott(jj),qpsib(jj),vertn(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['otop'] = float(m.group(1)), "plasma top gap in cm"
self.data['obott'] = float(m.group(2)), "plasma bottom gap in cm"
self.data['qpsib'] = float(m.group(3)), "q at 95% of poloidal flux"
self.data['vertn'] = float(m.group(4)), "vacuum field index at current centroid"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
mco2v = self.data['mco2v'][0]
print 'mco2v=', mco2v
# read (neqdsk,1040) (rco2v(k,jj),k=1,mco2v)
data = []
while len(data) < mco2v:
line = lines[counter]
data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']')
counter += 1
self.data['rco2v'] = numpy.array(data), "path length in cm of vertical CO2 density chord"
# read (neqdsk,1040) (dco2v(jj,k),k=1,mco2v)
data = []
while len(data) < mco2v:
line = lines[counter]
data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']')
counter += 1
self.data['dco2v'] = numpy.array(data), "line average electron density in cm3 from vertical CO2 chord"
mco2r = self.data['mco2r'][0]
print 'mco2r=', mco2r
# read (neqdsk,1040) (rco2r(k,jj),k=1,mco2r)
data = []
while len(data) < mco2r:
line = lines[counter]
data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']')
counter += 1
self.data['rco2r'] = numpy.array(data), "path length in cm of radial CO2 density chord"
# read (neqdsk,1040) (dco2r(jj,k),k=1,mco2r)
data = []
while len(data) < mco2r:
line = lines[counter]
data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']')
counter += 1
self.data['dco2r'] = numpy.array(data), "line average electron density in cm3 from radial CO2 chord"
# read (neqdsk,1040) shearb(jj),bpolav(jj),s1(jj),s2(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['shearb'] = float(m.group(1)), ""
self.data['bpolav'] = float(m.group(2)), "average poloidal magnetic field in Tesla defined through Ampere's law"
self.data['s1'] = float(m.group(3)), "Shafranov boundary line integrals"
self.data['s2'] = float(m.group(4)), "Shafranov boundary line integrals"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) s3(jj),qout(jj),olefs(jj),orighs(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['s3'] = float(m.group(1)), "Shafranov boundary line integrals"
self.data['qout'] = float(m.group(2)), "q at plasma boundary"
self.data['olefs'] = float(m.group(3)), ""
self.data['orighs'] = float(m.group(4)), "outer gap of external second separatrix in cm"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) otops(jj),sibdry(jj),areao(jj),wplasm(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['otops'] = float(m.group(1)), "top gap of external second separatrix in cm"
self.data['sibdry'] = float(m.group(2)), ""
self.data['areao'] = float(m.group(3)), "cross sectional area in cm2"
self.data['wplasm'] = float(m.group(4)), ""
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) terror(jj),elongm(jj),qqmagx(jj),cdflux(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['terror'] = float(m.group(1)), "equilibrium convergence error"
self.data['elongm'] = float(m.group(2)), "elongation at magnetic axis"
self.data['qqmagx'] = float(m.group(3)), "axial safety factor q(0)"
self.data['cdflux'] = float(m.group(4)), "computed diamagnetic flux in Volt-sec"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) alpha(jj),rttt(jj),psiref(jj),xndnt(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['alpha'] = float(m.group(1)), "Shafranov boundary line integral parameter"
self.data['rttt'] = float(m.group(2)), "Shafranov boundary line integral parameter"
self.data['psiref'] = float(m.group(3)), "reference poloidal flux in VS/rad"
self.data['xndnt'] = float(m.group(4)), "vertical stability parameter, vacuum field index normalized to critical index value"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) rseps(1,jj),zseps(1,jj),rseps(2,jj),zseps(2,jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['rseps1'] = float(m.group(1)), "major radius of x point in cm"
self.data['zseps1'] = float(m.group(2)), ""
self.data['rseps2'] = float(m.group(3)), "major radius of x point in cm"
self.data['zseps2'] = float(m.group(4)), ""
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) sepexp(jj),obots(jj),btaxp(jj),btaxv(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['sepexp'] = float(m.group(1)), "separatrix radial expansion in cm"
self.data['obots'] = float(m.group(2)), "bottom gap of external second separatrix in cm"
self.data['btaxp'] = float(m.group(3)), "toroidal magnetic field at magnetic axis in Tesla"
self.data['btaxv'] = float(m.group(4)), "vacuum toroidal magnetic field at magnetic axis in Tesla"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) aaq1(jj),aaq2(jj),aaq3(jj),seplim(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['aaq1'] = float(m.group(1)), "minor radius of q=1 surface in cm, 100 if not found"
self.data['aaq2'] = float(m.group(2)), "minor radius of q=2 surface in cm, 100 if not found"
self.data['aaq3'] = float(m.group(3)), "minor radius of q=3 surface in cm, 100 if not found"
self.data['seplim'] = float(m.group(4)), "> 0 for minimum gap in cm in divertor configurations, < 0 absolute value for minimum distance to external separatrix in limiter configurations"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040) rmagx(jj),zmagx(jj),simagx(jj),taumhd(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['rmagx'] = float(m.group(1)), "major radius in cm at magnetic axis"
self.data['zmagx'] = float(m.group(2)), ""
self.data['simagx'] = float(m.group(3)), ""
self.data['taumhd'] = float(m.group(4)), "energy confinement time in ms"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040,err=380) betapd(jj),betatd(jj),wplasmd(jj),diamag(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['betapd'] = float(m.group(1)), "diamagnetic poloidal b"
self.data['betatd'] = float(m.group(2)), "diamagnetic toroidal b in %"
self.data['wplasmd'] = float(m.group(3)), "diamagnetic plasma stored energy in Joule"
self.data['fluxx'] = float(m.group(4)), "measured diamagnetic flux in Volt-sec"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# read (neqdsk,1040,err=380) vloopt(jj),taudia(jj),qmerci(jj),tavem(jj)
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['vloopt'] = float(m.group(1)), "measured loop voltage in volt"
self.data['taudia'] = float(m.group(2)), "diamagnetic energy confinement time in ms"
self.data['qmerci'] = float(m.group(3)), "Mercier stability criterion on axial q(0), q(0) > QMERCI for stability"
self.data['tavem'] = float(m.group(4)), "average time in ms for magnetic and MSE data"
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
# ishot > 91000
line = lines[counter]
m = re.match(fmt_1041, line)
if m:
self.data['nsilop'] = int(m.group(1)), ""
self.data['magpri'] = int(m.group(2)), ""
self.data['nfcoil'] = int(m.group(3)), ""
self.data['nesum'] = int(m.group(4)), ""
counter += 1
else:
raise 'Read error at line %d:%s' % (counter, line)
nsilop = self.data['nsilop'][0]
magpri = self.data['magpri'][0]
print 'nsilop=', nsilop, ' magpri=', magpri
data = []
while len(data) < nsilop + magpri:
line = lines[counter]
data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']')
counter += 1
self.data['csilop'] = numpy.array( data[:nsilop] ), "computed flux loop signals in Weber"
self.data['cmpr2'] = numpy.array( data[nsilop:] ), ""
#
data = []
nfcoil = self.data['nfcoil'][0]
while len(data) < nfcoil:
line = lines[counter]
data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']')
counter += 1
self.data['ccbrsp'] = numpy.array(data), "computed external coil currents in Ampere"
data = []
nesum = self.data['nesum'][0]
while len(data) < nesum:
line = lines[counter]
data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']')
counter += 1
self.data['eccurt'] = numpy.array(data), "measured E-coil current in Ampere"
#
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['pbinj'] = float(m.group(1)), "neutral beam injection power in Watts"
self.data['rvsin'] = float(m.group(2)), "major radius of vessel inner hit spot in cm"
self.data['zvsin'] = float(m.group(3)), "Z of vessel inner hit spot in cm"
self.data['rvsout'] = float(m.group(4)), "major radius of vessel outer hit spot in cm"
counter += 1
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['zvsout'] = float(m.group(1)), "Z of vessel outer hit spot in cm"
self.data['vsurfa'] = float(m.group(2)), "plasma surface loop voltage in volt, E EQDSK only"
self.data['wpdot'] = float(m.group(3)), "time derivative of plasma stored energy in Watt, E EQDSK only"
self.data['wbdot'] = float(m.group(4)), "time derivative of poloidal magnetic energy in Watt, E EQDSK only"
counter += 1
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['slantu'] = float(m.group(1)), ""
self.data['slantl'] = float(m.group(2)), ""
self.data['zuperts'] = float(m.group(3)), ""
self.data['chipre'] = float(m.group(4)), "total chi2 pressure"
counter += 1
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['cjor95'] = float(m.group(1)), ""
self.data['pp95'] = float(m.group(2)), "normalized P'(y) at 95% normalized poloidal flux"
self.data['ssep'] = float(m.group(3)), ""
self.data['yyy2'] = float(m.group(4)), "Shafranov Y2 current moment"
counter += 1
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['xnnc'] = float(m.group(1)), ""
self.data['cprof'] = float(m.group(2)), "current profile parametrization parameter"
#self.data['oring'] = float(m.group(3)), "" (not used)
self.data['cjor0'] = float(m.group(4)), "normalized flux surface average current density at 99% of normalized poloidal flux"
counter += 1
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['fexpan'] = float(m.group(1)), "flux expansion at x point"
self.data['qqmin'] = float(m.group(2)), "minimum safety factor qmin"
self.data['chigamt'] = float(m.group(3)), "total chi2 MSE"
self.data['ssi01'] = float(m.group(4)), "magnetic shear at 1% of normalized poloidal flux"
counter += 1
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['fexpvs'] = float(m.group(1)), "flux expansion at outer lower vessel hit spot"
self.data['sepnose'] = float(m.group(2)), "radial distance in cm between x point and external field line at ZNOSE"
self.data['ssi95'] = float(m.group(3)), "magnetic shear at 95% of normalized poloidal flux"
self.data['rqqmin'] = float(m.group(4)), "normalized radius of qmin , square root of normalized volume"
counter += 1
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['cjor99'] = float(m.group(1)), ""
self.data['cj1ave'] = float(m.group(2)), "normalized average current density in plasma outer 5% normalized poloidal flux region"
self.data['rmidin'] = float(m.group(3)), "inner major radius in m at Z=0.0"
self.data['rmidout'] = float(m.group(4)), "outer major radius in m at Z=0.0"
counter += 1
line = lines[counter]
m = re.match(fmt_1040, line)
if m:
self.data['psurfa'] = float(m.group(1)), "plasma boundary surface area in m2"
#self.data[''] = float(m.group(2)), ""
#self.data[''] = float(m.group(3)), ""
#self.data[''] = float(m.group(4)), ""
counter += 1
def getAll(self):
return self.data
def getAllVars(self):
return self.data.keys()
def get(self, varname):
return self.data[varname]
################################
def main():
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="g-eqdsk file", default="")
parser.add_option("-a", "--all", dest="all",
help="display all variables", action="store_true",)
parser.add_option("-v", "--vars", dest="vars",
help="comma separated list of variables (use '-v \"*\"' for all)", default="*")
#parser.add_option("-p", "--plot", dest="plot",
# help="plot all variables", action="store_true",)
parser.add_option("-i", "--inquire", dest="inquire",
help="inquire list of variables", action="store_true",)
options, args = parser.parse_args()
if not options.filename:
parser.error("MUST provide filename (type -h for list of options)")
eq = Aeqdsk()
eq.openFile(options.filename)
if options.inquire:
print eq.getAllVars()
if options.all:
print eq.getAll()
vs = eq.getAllVars()
if options.vars != '*':
vs = options.vars.split(',')
for v in vs:
print '%s: %s'% (v, str(eq.get(v)))
if __name__ == '__main__': main()
| gpl-3.0 | 9,152,135,535,150,446,000 | 37.826638 | 190 | 0.61443 | false | 2.622448 | false | false | false |
valentin8709/AES_El-Gamal | keyExpansion.py | 1 | 1429 | from aes_base import sbox
from aes_base import Rcon
# Get a key from user password
def keyExpansion(key, key_size):
Nb = 4
if(key_size == 128):
Nk = 4
Nr = 10
elif(key_size == 192):
Nk = 6
Nr = 12
elif(key_size == 256):
Nk = 8
Nr = 14
else:
raise valueError("keyExpansion: bad key size")
key = process_key(key, Nk)
w = []
for word in key:
w.append(word[:])
i = Nk
while i < Nb * (Nr + 1):
temp = w[i-1][:]
if i % Nk == 0:
temp = SubWord(RotWord(temp))
temp[0] ^= Rcon[(i//Nk)]
elif Nk > 6 and i % Nk == 4:
temp = SubWord(temp)
for j in range(len(temp)):
temp[j] ^= w[i-Nk][j]
w.append(temp[:])
i += 1
return w
def SubWord(word):
return [sbox[byte] for byte in word]
def RotWord(word):
return word[1:] + word[0:1]
def process_key(key, Nk):
try:
key = key.replace(" ", "")
return [[int(key[i*8+j*2:i*8+j*2+2], 16) for j in range(4)]
for i in range(Nk)]
except:
print ("Password must be hexadecimal.")
exit()
| unlicense | 9,186,544,115,072,306,000 | 24.517857 | 75 | 0.398181 | false | 3.645408 | false | false | false |
snowflakedb/snowflake-sqlalchemy | test/test_copy.py | 1 | 15255 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
import pytest
from snowflake.sqlalchemy import (
AWSBucket,
AzureContainer,
CopyFormatter,
CopyIntoStorage,
CSVFormatter,
ExternalStage,
JSONFormatter,
PARQUETFormatter,
)
from sqlalchemy import Column, Integer, MetaData, Sequence, String, Table
from sqlalchemy.sql import select, text
def test_external_stage(sql_compiler):
assert ExternalStage.prepare_namespace("something") == "something."
assert ExternalStage.prepare_path("prefix") == "/prefix"
# All arguments are handled
assert (
sql_compiler(ExternalStage(name="name", path="prefix/path", namespace="namespace")) == "@namespace.name/prefix/path"
)
# defaults don't ruin things
assert sql_compiler(ExternalStage(name="name", path=None, namespace=None)) == "@name"
def test_copy_into_location(engine_testaccount, sql_compiler):
meta = MetaData()
conn = engine_testaccount.connect()
food_items = Table("python_tests_foods", meta,
Column('id', Integer, Sequence('new_user_id_seq'), primary_key=True),
Column('name', String),
Column('quantity', Integer))
meta.create_all(engine_testaccount)
copy_stmt_1 = CopyIntoStorage(from_=food_items,
into=AWSBucket.from_uri('s3://backup').encryption_aws_sse_kms(
'1234abcd-12ab-34cd-56ef-1234567890ab'),
formatter=CSVFormatter().record_delimiter('|').escape(None).null_if(['null', 'Null']))
assert (sql_compiler(copy_stmt_1) == "COPY INTO 's3://backup' FROM python_tests_foods FILE_FORMAT=(TYPE=csv "
"ESCAPE=None NULL_IF=('null', 'Null') RECORD_DELIMITER='|') ENCRYPTION="
"(KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab' TYPE='AWS_SSE_KMS')")
copy_stmt_2 = CopyIntoStorage(from_=select([food_items]).where(food_items.c.id == 1), # Test sub-query
into=AWSBucket.from_uri('s3://backup').credentials(
aws_role='some_iam_role').encryption_aws_sse_s3(),
formatter=JSONFormatter().file_extension('json').compression('zstd'))
assert (sql_compiler(copy_stmt_2) == "COPY INTO 's3://backup' FROM (SELECT python_tests_foods.id, "
"python_tests_foods.name, python_tests_foods.quantity FROM python_tests_foods "
"WHERE python_tests_foods.id = 1) FILE_FORMAT=(TYPE=json COMPRESSION='zstd' "
"FILE_EXTENSION='json') CREDENTIALS=(AWS_ROLE='some_iam_role') "
"ENCRYPTION=(TYPE='AWS_SSE_S3')")
copy_stmt_3 = CopyIntoStorage(from_=food_items,
into=AzureContainer.from_uri(
'azure://snowflake.blob.core.windows.net/snowpile/backup'
).credentials('token'),
formatter=PARQUETFormatter().snappy_compression(True))
assert (sql_compiler(copy_stmt_3) == "COPY INTO 'azure://snowflake.blob.core.windows.net/snowpile/backup' "
"FROM python_tests_foods FILE_FORMAT=(TYPE=parquet SNAPPY_COMPRESSION=true) "
"CREDENTIALS=(AZURE_SAS_TOKEN='token')")
copy_stmt_3.maxfilesize(50000000)
assert (sql_compiler(copy_stmt_3) == "COPY INTO 'azure://snowflake.blob.core.windows.net/snowpile/backup' "
"FROM python_tests_foods FILE_FORMAT=(TYPE=parquet SNAPPY_COMPRESSION=true) "
"MAX_FILE_SIZE = 50000000 "
"CREDENTIALS=(AZURE_SAS_TOKEN='token')")
copy_stmt_4 = CopyIntoStorage(from_=AWSBucket.from_uri('s3://backup').encryption_aws_sse_kms(
'1234abcd-12ab-34cd-56ef-1234567890ab'),
into=food_items,
formatter=CSVFormatter().record_delimiter('|').escape(None).null_if(['null', 'Null']))
assert (sql_compiler(copy_stmt_4) == "COPY INTO python_tests_foods FROM 's3://backup' FILE_FORMAT=(TYPE=csv "
"ESCAPE=None NULL_IF=('null', 'Null') RECORD_DELIMITER='|') ENCRYPTION="
"(KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab' TYPE='AWS_SSE_KMS')")
copy_stmt_5 = CopyIntoStorage(from_=AWSBucket.from_uri('s3://backup').encryption_aws_sse_kms(
'1234abcd-12ab-34cd-56ef-1234567890ab'),
into=food_items,
formatter=CSVFormatter().field_delimiter(','))
assert (sql_compiler(copy_stmt_5) == "COPY INTO python_tests_foods FROM 's3://backup' FILE_FORMAT=(TYPE=csv "
"FIELD_DELIMITER=',') ENCRYPTION="
"(KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab' TYPE='AWS_SSE_KMS')")
copy_stmt_6 = CopyIntoStorage(from_=food_items, into=ExternalStage(name="stage_name"), formatter=CSVFormatter())
assert sql_compiler(copy_stmt_6) == "COPY INTO @stage_name FROM python_tests_foods FILE_FORMAT=(TYPE=csv)"
copy_stmt_7 = CopyIntoStorage(from_=food_items, into=ExternalStage(name="stage_name", path="prefix/file", namespace="name"), formatter=CSVFormatter())
assert sql_compiler(copy_stmt_7) == "COPY INTO @name.stage_name/prefix/file FROM python_tests_foods FILE_FORMAT=(TYPE=csv)"
# NOTE Other than expect known compiled text, submit it to RegressionTests environment and expect them to fail, but
# because of the right reasons
try:
acceptable_exc_reasons = {'Failure using stage area',
'AWS_ROLE credentials are not allowed for this account.',
'AWS_ROLE credentials are invalid'}
for stmnt in (copy_stmt_1, copy_stmt_2, copy_stmt_3, copy_stmt_4):
with pytest.raises(Exception) as exc:
conn.execute(stmnt)
if not any(map(lambda reason: reason in str(exc) or reason in str(exc.value), acceptable_exc_reasons)):
raise Exception("Not acceptable exception: {} {}".format(str(exc), str(exc.value)))
finally:
conn.close()
food_items.drop(engine_testaccount)
def test_copy_into_storage_csv_extended(sql_compiler):
"""
This test compiles the SQL to read CSV data from a stage and insert it into a
table.
The CSV formatting statements are inserted inline, i.e. no explicit SQL definition
of that format is necessary.
The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was
executed before. This way, the COPY INTO statement does not need to know any
security details (credentials or tokens)
"""
# target table definition (NB: this could be omitted for the test, since the
# SQL statement copies the whole CSV and assumes the target structure matches)
metadata = MetaData()
target_table = Table(
"TEST_IMPORT",
metadata,
Column("COL1", Integer, primary_key=True),
Column("COL2", String),
)
# define a source stage (root path)
root_stage = ExternalStage(
name="AZURE_STAGE",
namespace="ML_POC.PUBLIC",
)
# define a CSV formatter
formatter = (
CSVFormatter()
.compression("AUTO")
.field_delimiter(",")
.record_delimiter(r"\n")
.field_optionally_enclosed_by(None)
.escape(None)
.escape_unenclosed_field(r"\134")
.date_format("AUTO")
.null_if([r"\N"])
.skip_header(1)
.trim_space(False)
.error_on_column_count_mismatch(True)
)
# define CopyInto object; reads all CSV data (=> pattern) from
# the sub-path "testdata" beneath the root stage
copy_into = CopyIntoStorage(
from_=ExternalStage.from_parent_stage(root_stage, "testdata"),
into=target_table,
formatter=formatter
)
copy_into.copy_options = {"pattern": "'.*csv'", "force": "TRUE"}
# check that the result is as expected
result = sql_compiler(copy_into)
expected = (
r"COPY INTO TEST_IMPORT "
r"FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata "
r"FILE_FORMAT=(TYPE=csv COMPRESSION='auto' DATE_FORMAT='AUTO' "
r"ERROR_ON_COLUMN_COUNT_MISMATCH=True ESCAPE=None "
r"ESCAPE_UNENCLOSED_FIELD='\134' FIELD_DELIMITER=',' "
r"FIELD_OPTIONALLY_ENCLOSED_BY=None NULL_IF=('\N') RECORD_DELIMITER='\n' "
r"SKIP_HEADER=1 TRIM_SPACE=False) force = TRUE pattern = '.*csv'"
)
assert result == expected
def test_copy_into_storage_parquet_named_format(sql_compiler):
"""
This test compiles the SQL to read Parquet data from a stage and insert it into a
table. The source file is accessed using a SELECT statement.
The Parquet formatting definitions are defined in a named format which was
explicitly created before.
The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was
executed before. This way, the COPY INTO statement does not need to know any
security details (credentials or tokens)
"""
# target table definition (NB: this could be omitted for the test, as long as
# the statement is not executed)
metadata = MetaData()
target_table = Table(
"TEST_IMPORT",
metadata,
Column("COL1", Integer, primary_key=True),
Column("COL2", String),
)
# define a source stage (root path)
root_stage = ExternalStage(
name="AZURE_STAGE",
namespace="ML_POC.PUBLIC",
)
# define the SELECT statement to access the source file.
# we can probably defined source table metadata and use SQLAlchemy Column objects
# instead of texts, but this seems to be the easiest way.
sel_statement = select(
text("$1:COL1::number"),
text("$1:COL2::varchar")
).select_from(
ExternalStage.from_parent_stage(root_stage, "testdata/out.parquet")
)
# use an existing source format.
formatter = CopyFormatter(format_name="parquet_file_format")
# setup CopyInto object
copy_into = CopyIntoStorage(
from_=sel_statement,
into=target_table,
formatter=formatter
)
copy_into.copy_options = {"force": "TRUE"}
# compile and check the result
result = sql_compiler(copy_into)
expected = (
"COPY INTO TEST_IMPORT "
"FROM (SELECT $1:COL1::number, $1:COL2::varchar "
"FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata/out.parquet) "
"FILE_FORMAT=(format_name = parquet_file_format) force = TRUE"
)
assert result == expected
def test_copy_into_storage_parquet_files(sql_compiler):
"""
This test compiles the SQL to read Parquet data from a stage and insert it into a
table. The source file is accessed using a SELECT statement.
The Parquet formatting definitions are defined in a named format which was
explicitly created before. The format is specified as a property of the stage,
not the CopyInto object.
The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was
executed before. This way, the COPY INTO statement does not need to know any
security details (credentials or tokens).
The FORCE option is set using the corresponding function in CopyInto.
The FILES option is set to choose the files to upload
"""
# target table definition (NB: this could be omitted for the test, as long as
# the statement is not executed)
metadata = MetaData()
target_table = Table(
"TEST_IMPORT",
metadata,
Column("COL1", Integer, primary_key=True),
Column("COL2", String),
)
# define a source stage (root path)
root_stage = ExternalStage(
name="AZURE_STAGE",
namespace="ML_POC.PUBLIC",
)
# define the SELECT statement to access the source file.
# we can probably defined source table metadata and use SQLAlchemy Column objects
# instead of texts, but this seems to be the easiest way.
sel_statement = select(
text("$1:COL1::number"),
text("$1:COL2::varchar")
).select_from(
ExternalStage.from_parent_stage(root_stage, "testdata/out.parquet", file_format="parquet_file_format")
)
# setup CopyInto object
copy_into = CopyIntoStorage(
from_=sel_statement,
into=target_table,
).force(True).files(["foo.txt", "bar.txt"])
# compile and check the result
result = sql_compiler(copy_into)
expected = (
"COPY INTO TEST_IMPORT "
"FROM (SELECT $1:COL1::number, $1:COL2::varchar "
"FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata/out.parquet "
"(file_format => parquet_file_format)) FILES = ('foo.txt','bar.txt') "
"FORCE = true"
)
assert result == expected
def test_copy_into_storage_parquet_pattern(sql_compiler):
"""
This test compiles the SQL to read Parquet data from a stage and insert it into a
table. The source file is accessed using a SELECT statement.
The Parquet formatting definitions are defined in a named format which was
explicitly created before. The format is specified as a property of the stage,
not the CopyInto object.
The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was
executed before. This way, the COPY INTO statement does not need to know any
security details (credentials or tokens).
The FORCE option is set using the corresponding function in CopyInto.
The PATTERN option is set to choose multiple files
"""
# target table definition (NB: this could be omitted for the test, as long as
# the statement is not executed)
metadata = MetaData()
target_table = Table(
"TEST_IMPORT",
metadata,
Column("COL1", Integer, primary_key=True),
Column("COL2", String),
)
# define a source stage (root path)
root_stage = ExternalStage(
name="AZURE_STAGE",
namespace="ML_POC.PUBLIC",
)
# define the SELECT statement to access the source file.
# we can probably defined source table metadata and use SQLAlchemy Column objects
# instead of texts, but this seems to be the easiest way.
sel_statement = select(
text("$1:COL1::number"),
text("$1:COL2::varchar")
).select_from(
ExternalStage.from_parent_stage(root_stage, "testdata/out.parquet", file_format="parquet_file_format")
)
# setup CopyInto object
copy_into = CopyIntoStorage(
from_=sel_statement,
into=target_table,
).force(True).pattern("'.*csv'")
# compile and check the result
result = sql_compiler(copy_into)
expected = (
"COPY INTO TEST_IMPORT "
"FROM (SELECT $1:COL1::number, $1:COL2::varchar "
"FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata/out.parquet "
"(file_format => parquet_file_format)) FORCE = true PATTERN = '.*csv'"
)
assert result == expected
| apache-2.0 | -487,141,385,498,174,500 | 43.34593 | 154 | 0.623271 | false | 3.917565 | true | false | false |
aramusss/contableplus | controller/userLogin.py | 1 | 4041 | __author__ = 'adria'
#!/usr/bin/python
from dataBase import *
import sys
sys.path.insert(0, '../model') #sino no deixa importar...
from owner import *
class UserLogin:
def __init__(self, owner):
self.owner = owner
self.db = DataBase()
self.registered = False #si l'usuari ja ha fet loguin o no
def enterLogin(self):
"""Like the 'login' method, but asks for the user data to be written in the terminal"""
self.askUserData()
while True:
result = self.login()
if result == 1:
self.askUserData()
elif result == 2:
create = input("Would you like to create it?(Y/N): ")
if create.lower() == "y" or create.lower() == "":
self.db.afegeixUsuari(self.owner.dni, self.owner.nombre, self.owner.apellidos)
break
else:
break
def askUserData(self):
"""Sets the self.owner information with the parameters the user writes on the terminal"""
while True:
print("Insert your personal information to log in:")
name = input("Name: ")
surname = input("Surname: ")
dni = input("DNI: ")
if name and surname and dni:
self.owner = Owner(dni, surname, name)
break
else:
print("Error, one or more of the fields is empty, write it again:\n")
def login(self, owner=None):
"""Checks if the user is on the database and logs in"""
result = 0
if owner is not None:
self.owner = owner
if self.userExists():
if self.checkUser():
self.registered = True
print("You have succesfully logged in\n")
else:
print("Error! name or surname incorrect\n")
result = 1
else:
print("Error, user with DNI "+self.owner.dni+" doesn't exist\n")
result = 2
return result
def llistaDNI(self):
"""Lists all DNI's"""
llista = []
llistacompleta = self.db.llistaUsers()
for user in llistacompleta:
llista.append(user[0])
return llista
def userExists(self, dni = None):
"""Checks if a user exists by searching the DNI in the database"""
if dni is None:
dni = self.owner.dni
exists = False
for dniactual in self.llistaDNI():
if dniactual == dni:
exists = True
return exists
def checkUser(self):
"""Checks if self.owner data is correct"""
result = False
for user in self.db.llistaUsers():
dni = user[0]
name = user[1]
surname = user[2]
if dni == self.owner.dni:
if name == self.owner.nombre and surname == self.owner.apellidos:
result = True
break
return result
def isLogged(self):
"""Returns if the user is logged in or not"""
return self.registered
def guardaUsuari(self, owner=None):
"""Saves owner to the database if it doesn't exist"""
if owner is None:
owner = self.owner
if self.userExists(owner.dni):
print("User with DNI '"+owner.dni+"' already exists!")
else:
result = self.db.afegeixUsuari(owner.dni, owner.nombre, owner.apellidos)
if result:
print("User "+owner.nombre+" added!")
else:
print("User could not be added")
def getIbanList(self):
"""Returns a list of the IBAN codes of the owners' accounts"""
llista = self.db.llistaComptes()
ibanList = []
for account in llista:
for user in account[3:]:
if user == self.owner.dni:
ibanList.append(account[0])
break
return ibanList
def getOwner(self):
return self.owner | apache-2.0 | 7,223,837,244,720,342,000 | 32.683333 | 98 | 0.528829 | false | 4.0249 | false | false | false |
Infinidat/infi.clickhouse_orm | src/infi/clickhouse_orm/fields.py | 1 | 24194 | from __future__ import unicode_literals
import datetime
import iso8601
import pytz
from calendar import timegm
from decimal import Decimal, localcontext
from uuid import UUID
from logging import getLogger
from pytz import BaseTzInfo
from .utils import escape, parse_array, comma_join, string_or_func, get_subclass_names
from .funcs import F, FunctionOperatorsMixin
from ipaddress import IPv4Address, IPv6Address
logger = getLogger('clickhouse_orm')
class Field(FunctionOperatorsMixin):
'''
Abstract base class for all field types.
'''
name = None # this is set by the parent model
parent = None # this is set by the parent model
creation_counter = 0 # used for keeping the model fields ordered
class_default = 0 # should be overridden by concrete subclasses
db_type = None # should be overridden by concrete subclasses
def __init__(self, default=None, alias=None, materialized=None, readonly=None, codec=None):
assert [default, alias, materialized].count(None) >= 2, \
"Only one of default, alias and materialized parameters can be given"
assert alias is None or isinstance(alias, F) or isinstance(alias, str) and alias != "",\
"Alias parameter must be a string or function object, if given"
assert materialized is None or isinstance(materialized, F) or isinstance(materialized, str) and materialized != "",\
"Materialized parameter must be a string or function object, if given"
assert readonly is None or type(readonly) is bool, "readonly parameter must be bool if given"
assert codec is None or isinstance(codec, str) and codec != "", \
"Codec field must be string, if given"
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.default = self.class_default if default is None else default
self.alias = alias
self.materialized = materialized
self.readonly = bool(self.alias or self.materialized or readonly)
self.codec = codec
def __str__(self):
return self.name
def __repr__(self):
return '<%s>' % self.__class__.__name__
def to_python(self, value, timezone_in_use):
'''
Converts the input value into the expected Python data type, raising ValueError if the
data can't be converted. Returns the converted value. Subclasses should override this.
The timezone_in_use parameter should be consulted when parsing datetime fields.
'''
return value # pragma: no cover
def validate(self, value):
'''
Called after to_python to validate that the value is suitable for the field's database type.
Subclasses should override this.
'''
pass
def _range_check(self, value, min_value, max_value):
'''
Utility method to check that the given value is between min_value and max_value.
'''
if value < min_value or value > max_value:
raise ValueError('%s out of range - %s is not between %s and %s' % (self.__class__.__name__, value, min_value, max_value))
def to_db_string(self, value, quote=True):
'''
Returns the field's value prepared for writing to the database.
When quote is true, strings are surrounded by single quotes.
'''
return escape(value, quote)
def get_sql(self, with_default_expression=True, db=None):
'''
Returns an SQL expression describing the field (e.g. for CREATE TABLE).
- `with_default_expression`: If True, adds default value to sql.
It doesn't affect fields with alias and materialized values.
- `db`: Database, used for checking supported features.
'''
sql = self.db_type
args = self.get_db_type_args()
if args:
sql += '(%s)' % comma_join(args)
if with_default_expression:
sql += self._extra_params(db)
return sql
def get_db_type_args(self):
"""Returns field type arguments"""
return []
def _extra_params(self, db):
sql = ''
if self.alias:
sql += ' ALIAS %s' % string_or_func(self.alias)
elif self.materialized:
sql += ' MATERIALIZED %s' % string_or_func(self.materialized)
elif isinstance(self.default, F):
sql += ' DEFAULT %s' % self.default.to_sql()
elif self.default:
default = self.to_db_string(self.default)
sql += ' DEFAULT %s' % default
if self.codec and db and db.has_codec_support:
sql += ' CODEC(%s)' % self.codec
return sql
def isinstance(self, types):
"""
Checks if the instance if one of the types provided or if any of the inner_field child is one of the types
provided, returns True if field or any inner_field is one of ths provided, False otherwise
- `types`: Iterable of types to check inclusion of instance
Returns: Boolean
"""
if isinstance(self, types):
return True
inner_field = getattr(self, 'inner_field', None)
while inner_field:
if isinstance(inner_field, types):
return True
inner_field = getattr(inner_field, 'inner_field', None)
return False
class StringField(Field):
class_default = ''
db_type = 'String'
def to_python(self, value, timezone_in_use):
if isinstance(value, str):
return value
if isinstance(value, bytes):
return value.decode('UTF-8')
raise ValueError('Invalid value for %s: %r' % (self.__class__.__name__, value))
class FixedStringField(StringField):
def __init__(self, length, default=None, alias=None, materialized=None, readonly=None):
self._length = length
self.db_type = 'FixedString(%d)' % length
super(FixedStringField, self).__init__(default, alias, materialized, readonly)
def to_python(self, value, timezone_in_use):
value = super(FixedStringField, self).to_python(value, timezone_in_use)
return value.rstrip('\0')
def validate(self, value):
if isinstance(value, str):
value = value.encode('UTF-8')
if len(value) > self._length:
raise ValueError('Value of %d bytes is too long for FixedStringField(%d)' % (len(value), self._length))
class DateField(Field):
min_value = datetime.date(1970, 1, 1)
max_value = datetime.date(2105, 12, 31)
class_default = min_value
db_type = 'Date'
def to_python(self, value, timezone_in_use):
if isinstance(value, datetime.datetime):
return value.astimezone(pytz.utc).date() if value.tzinfo else value.date()
if isinstance(value, datetime.date):
return value
if isinstance(value, int):
return DateField.class_default + datetime.timedelta(days=value)
if isinstance(value, str):
if value == '0000-00-00':
return DateField.min_value
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
raise ValueError('Invalid value for %s - %r' % (self.__class__.__name__, value))
def validate(self, value):
self._range_check(value, DateField.min_value, DateField.max_value)
def to_db_string(self, value, quote=True):
return escape(value.isoformat(), quote)
class DateTimeField(Field):
class_default = datetime.datetime.fromtimestamp(0, pytz.utc)
db_type = 'DateTime'
def __init__(self, default=None, alias=None, materialized=None, readonly=None, codec=None,
timezone=None):
super().__init__(default, alias, materialized, readonly, codec)
# assert not timezone, 'Temporarily field timezone is not supported'
if timezone:
timezone = timezone if isinstance(timezone, BaseTzInfo) else pytz.timezone(timezone)
self.timezone = timezone
def get_db_type_args(self):
args = []
if self.timezone:
args.append(escape(self.timezone.zone))
return args
def to_python(self, value, timezone_in_use):
if isinstance(value, datetime.datetime):
return value if value.tzinfo else value.replace(tzinfo=pytz.utc)
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day, tzinfo=pytz.utc)
if isinstance(value, int):
return datetime.datetime.utcfromtimestamp(value).replace(tzinfo=pytz.utc)
if isinstance(value, str):
if value == '0000-00-00 00:00:00':
return self.class_default
if len(value) == 10:
try:
value = int(value)
return datetime.datetime.utcfromtimestamp(value).replace(tzinfo=pytz.utc)
except ValueError:
pass
try:
# left the date naive in case of no tzinfo set
dt = iso8601.parse_date(value, default_timezone=None)
except iso8601.ParseError as e:
raise ValueError(str(e))
# convert naive to aware
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
dt = timezone_in_use.localize(dt)
return dt
raise ValueError('Invalid value for %s - %r' % (self.__class__.__name__, value))
def to_db_string(self, value, quote=True):
return escape('%010d' % timegm(value.utctimetuple()), quote)
class DateTime64Field(DateTimeField):
db_type = 'DateTime64'
def __init__(self, default=None, alias=None, materialized=None, readonly=None, codec=None,
timezone=None, precision=6):
super().__init__(default, alias, materialized, readonly, codec, timezone)
assert precision is None or isinstance(precision, int), 'Precision must be int type'
self.precision = precision
def get_db_type_args(self):
args = [str(self.precision)]
if self.timezone:
args.append(escape(self.timezone.zone))
return args
def to_db_string(self, value, quote=True):
"""
Returns the field's value prepared for writing to the database
Returns string in 0000000000.000000 format, where remainder digits count is equal to precision
"""
return escape(
'{timestamp:0{width}.{precision}f}'.format(
timestamp=value.timestamp(),
width=11 + self.precision,
precision=self.precision),
quote
)
def to_python(self, value, timezone_in_use):
try:
return super().to_python(value, timezone_in_use)
except ValueError:
if isinstance(value, (int, float)):
return datetime.datetime.utcfromtimestamp(value).replace(tzinfo=pytz.utc)
if isinstance(value, str):
left_part = value.split('.')[0]
if left_part == '0000-00-00 00:00:00':
return self.class_default
if len(left_part) == 10:
try:
value = float(value)
return datetime.datetime.utcfromtimestamp(value).replace(tzinfo=pytz.utc)
except ValueError:
pass
raise
class BaseIntField(Field):
'''
Abstract base class for all integer-type fields.
'''
def to_python(self, value, timezone_in_use):
try:
return int(value)
except:
raise ValueError('Invalid value for %s - %r' % (self.__class__.__name__, value))
def to_db_string(self, value, quote=True):
# There's no need to call escape since numbers do not contain
# special characters, and never need quoting
return str(value)
def validate(self, value):
self._range_check(value, self.min_value, self.max_value)
class UInt8Field(BaseIntField):
min_value = 0
max_value = 2**8 - 1
db_type = 'UInt8'
class UInt16Field(BaseIntField):
min_value = 0
max_value = 2**16 - 1
db_type = 'UInt16'
class UInt32Field(BaseIntField):
min_value = 0
max_value = 2**32 - 1
db_type = 'UInt32'
class UInt64Field(BaseIntField):
min_value = 0
max_value = 2**64 - 1
db_type = 'UInt64'
class Int8Field(BaseIntField):
min_value = -2**7
max_value = 2**7 - 1
db_type = 'Int8'
class Int16Field(BaseIntField):
min_value = -2**15
max_value = 2**15 - 1
db_type = 'Int16'
class Int32Field(BaseIntField):
min_value = -2**31
max_value = 2**31 - 1
db_type = 'Int32'
class Int64Field(BaseIntField):
min_value = -2**63
max_value = 2**63 - 1
db_type = 'Int64'
class BaseFloatField(Field):
'''
Abstract base class for all float-type fields.
'''
def to_python(self, value, timezone_in_use):
try:
return float(value)
except:
raise ValueError('Invalid value for %s - %r' % (self.__class__.__name__, value))
def to_db_string(self, value, quote=True):
# There's no need to call escape since numbers do not contain
# special characters, and never need quoting
return str(value)
class Float32Field(BaseFloatField):
db_type = 'Float32'
class Float64Field(BaseFloatField):
db_type = 'Float64'
class DecimalField(Field):
'''
Base class for all decimal fields. Can also be used directly.
'''
def __init__(self, precision, scale, default=None, alias=None, materialized=None, readonly=None):
assert 1 <= precision <= 38, 'Precision must be between 1 and 38'
assert 0 <= scale <= precision, 'Scale must be between 0 and the given precision'
self.precision = precision
self.scale = scale
self.db_type = 'Decimal(%d,%d)' % (self.precision, self.scale)
with localcontext() as ctx:
ctx.prec = 38
self.exp = Decimal(10) ** -self.scale # for rounding to the required scale
self.max_value = Decimal(10 ** (self.precision - self.scale)) - self.exp
self.min_value = -self.max_value
super(DecimalField, self).__init__(default, alias, materialized, readonly)
def to_python(self, value, timezone_in_use):
if not isinstance(value, Decimal):
try:
value = Decimal(value)
except:
raise ValueError('Invalid value for %s - %r' % (self.__class__.__name__, value))
if not value.is_finite():
raise ValueError('Non-finite value for %s - %r' % (self.__class__.__name__, value))
return self._round(value)
def to_db_string(self, value, quote=True):
# There's no need to call escape since numbers do not contain
# special characters, and never need quoting
return str(value)
def _round(self, value):
return value.quantize(self.exp)
def validate(self, value):
self._range_check(value, self.min_value, self.max_value)
class Decimal32Field(DecimalField):
def __init__(self, scale, default=None, alias=None, materialized=None, readonly=None):
super(Decimal32Field, self).__init__(9, scale, default, alias, materialized, readonly)
self.db_type = 'Decimal32(%d)' % scale
class Decimal64Field(DecimalField):
def __init__(self, scale, default=None, alias=None, materialized=None, readonly=None):
super(Decimal64Field, self).__init__(18, scale, default, alias, materialized, readonly)
self.db_type = 'Decimal64(%d)' % scale
class Decimal128Field(DecimalField):
def __init__(self, scale, default=None, alias=None, materialized=None, readonly=None):
super(Decimal128Field, self).__init__(38, scale, default, alias, materialized, readonly)
self.db_type = 'Decimal128(%d)' % scale
class BaseEnumField(Field):
'''
Abstract base class for all enum-type fields.
'''
def __init__(self, enum_cls, default=None, alias=None, materialized=None, readonly=None, codec=None):
self.enum_cls = enum_cls
if default is None:
default = list(enum_cls)[0]
super(BaseEnumField, self).__init__(default, alias, materialized, readonly, codec)
def to_python(self, value, timezone_in_use):
if isinstance(value, self.enum_cls):
return value
try:
if isinstance(value, str):
try:
return self.enum_cls[value]
except Exception:
return self.enum_cls(value)
if isinstance(value, bytes):
decoded = value.decode('UTF-8')
try:
return self.enum_cls[decoded]
except Exception:
return self.enum_cls(decoded)
if isinstance(value, int):
return self.enum_cls(value)
except (KeyError, ValueError):
pass
raise ValueError('Invalid value for %s: %r' % (self.enum_cls.__name__, value))
def to_db_string(self, value, quote=True):
return escape(value.name, quote)
def get_db_type_args(self):
return ['%s = %d' % (escape(item.name), item.value) for item in self.enum_cls]
@classmethod
def create_ad_hoc_field(cls, db_type):
'''
Give an SQL column description such as "Enum8('apple' = 1, 'banana' = 2, 'orange' = 3)"
this method returns a matching enum field.
'''
import re
from enum import Enum
members = {}
for match in re.finditer(r"'([\w ]+)' = (-?\d+)", db_type):
members[match.group(1)] = int(match.group(2))
enum_cls = Enum('AdHocEnum', members)
field_class = Enum8Field if db_type.startswith('Enum8') else Enum16Field
return field_class(enum_cls)
class Enum8Field(BaseEnumField):
db_type = 'Enum8'
class Enum16Field(BaseEnumField):
db_type = 'Enum16'
class ArrayField(Field):
class_default = []
def __init__(self, inner_field, default=None, alias=None, materialized=None, readonly=None, codec=None):
assert isinstance(inner_field, Field), "The first argument of ArrayField must be a Field instance"
assert not isinstance(inner_field, ArrayField), "Multidimensional array fields are not supported by the ORM"
self.inner_field = inner_field
super(ArrayField, self).__init__(default, alias, materialized, readonly, codec)
def to_python(self, value, timezone_in_use):
if isinstance(value, str):
value = parse_array(value)
elif isinstance(value, bytes):
value = parse_array(value.decode('UTF-8'))
elif not isinstance(value, (list, tuple)):
raise ValueError('ArrayField expects list or tuple, not %s' % type(value))
return [self.inner_field.to_python(v, timezone_in_use) for v in value]
def validate(self, value):
for v in value:
self.inner_field.validate(v)
def to_db_string(self, value, quote=True):
array = [self.inner_field.to_db_string(v, quote=True) for v in value]
return '[' + comma_join(array) + ']'
def get_sql(self, with_default_expression=True, db=None):
sql = 'Array(%s)' % self.inner_field.get_sql(with_default_expression=False, db=db)
if with_default_expression and self.codec and db and db.has_codec_support:
sql+= ' CODEC(%s)' % self.codec
return sql
class UUIDField(Field):
class_default = UUID(int=0)
db_type = 'UUID'
def to_python(self, value, timezone_in_use):
if isinstance(value, UUID):
return value
elif isinstance(value, bytes):
return UUID(bytes=value)
elif isinstance(value, str):
return UUID(value)
elif isinstance(value, int):
return UUID(int=value)
elif isinstance(value, tuple):
return UUID(fields=value)
else:
raise ValueError('Invalid value for UUIDField: %r' % value)
def to_db_string(self, value, quote=True):
return escape(str(value), quote)
class IPv4Field(Field):
class_default = 0
db_type = 'IPv4'
def to_python(self, value, timezone_in_use):
if isinstance(value, IPv4Address):
return value
elif isinstance(value, (bytes, str, int)):
return IPv4Address(value)
else:
raise ValueError('Invalid value for IPv4Address: %r' % value)
def to_db_string(self, value, quote=True):
return escape(str(value), quote)
class IPv6Field(Field):
class_default = 0
db_type = 'IPv6'
def to_python(self, value, timezone_in_use):
if isinstance(value, IPv6Address):
return value
elif isinstance(value, (bytes, str, int)):
return IPv6Address(value)
else:
raise ValueError('Invalid value for IPv6Address: %r' % value)
def to_db_string(self, value, quote=True):
return escape(str(value), quote)
class NullableField(Field):
class_default = None
def __init__(self, inner_field, default=None, alias=None, materialized=None,
extra_null_values=None, codec=None):
assert isinstance(inner_field, Field), "The first argument of NullableField must be a Field instance. Not: {}".format(inner_field)
self.inner_field = inner_field
self._null_values = [None]
if extra_null_values:
self._null_values.extend(extra_null_values)
super(NullableField, self).__init__(default, alias, materialized, readonly=None, codec=codec)
def to_python(self, value, timezone_in_use):
if value == '\\N' or value in self._null_values:
return None
return self.inner_field.to_python(value, timezone_in_use)
def validate(self, value):
value in self._null_values or self.inner_field.validate(value)
def to_db_string(self, value, quote=True):
if value in self._null_values:
return '\\N'
return self.inner_field.to_db_string(value, quote=quote)
def get_sql(self, with_default_expression=True, db=None):
sql = 'Nullable(%s)' % self.inner_field.get_sql(with_default_expression=False, db=db)
if with_default_expression:
sql += self._extra_params(db)
return sql
class LowCardinalityField(Field):
def __init__(self, inner_field, default=None, alias=None, materialized=None, readonly=None, codec=None):
assert isinstance(inner_field, Field), "The first argument of LowCardinalityField must be a Field instance. Not: {}".format(inner_field)
assert not isinstance(inner_field, LowCardinalityField), "LowCardinality inner fields are not supported by the ORM"
assert not isinstance(inner_field, ArrayField), "Array field inside LowCardinality are not supported by the ORM. Use Array(LowCardinality) instead"
self.inner_field = inner_field
self.class_default = self.inner_field.class_default
super(LowCardinalityField, self).__init__(default, alias, materialized, readonly, codec)
def to_python(self, value, timezone_in_use):
return self.inner_field.to_python(value, timezone_in_use)
def validate(self, value):
self.inner_field.validate(value)
def to_db_string(self, value, quote=True):
return self.inner_field.to_db_string(value, quote=quote)
def get_sql(self, with_default_expression=True, db=None):
if db and db.has_low_cardinality_support:
sql = 'LowCardinality(%s)' % self.inner_field.get_sql(with_default_expression=False)
else:
sql = self.inner_field.get_sql(with_default_expression=False)
logger.warning('LowCardinalityField not supported on clickhouse-server version < 19.0 using {} as fallback'.format(self.inner_field.__class__.__name__))
if with_default_expression:
sql += self._extra_params(db)
return sql
# Expose only relevant classes in import *
__all__ = get_subclass_names(locals(), Field)
| bsd-3-clause | 3,360,560,357,268,833,000 | 34.896142 | 164 | 0.615111 | false | 3.985175 | false | false | false |
burnettk/delete-docker-registry-image | delete_docker_registry_image.py | 1 | 16505 | #!/usr/bin/env python
"""
Usage:
Shut down your registry service to avoid race conditions and possible data loss
and then run the command with an image repo like this:
delete_docker_registry_image.py --image awesomeimage --dry-run
"""
import argparse
import json
import logging
import os
import sys
import shutil
import glob
logger = logging.getLogger(__name__)
def del_empty_dirs(s_dir, top_level):
"""recursively delete empty directories"""
b_empty = True
for s_target in os.listdir(s_dir):
s_path = os.path.join(s_dir, s_target)
if os.path.isdir(s_path):
if not del_empty_dirs(s_path, False):
b_empty = False
else:
b_empty = False
if b_empty:
logger.debug("Deleting empty directory '%s'", s_dir)
if not top_level:
os.rmdir(s_dir)
return b_empty
def get_layers_from_blob(path):
"""parse json blob and get set of layer digests"""
try:
with open(path, "r") as blob:
data_raw = blob.read()
data = json.loads(data_raw)
if data["schemaVersion"] == 1:
result = set([entry["blobSum"].split(":")[1] for entry in data["fsLayers"]])
else:
result = set([entry["digest"].split(":")[1] for entry in data["layers"]])
if "config" in data:
result.add(data["config"]["digest"].split(":")[1])
return result
except Exception as error:
logger.critical("Failed to read layers from blob:%s", error)
return set()
def get_digest_from_blob(path):
"""parse file and get digest"""
try:
with open(path, "r") as blob:
return blob.read().split(":")[1]
except Exception as error:
logger.critical("Failed to read digest from blob:%s", error)
return ""
def get_links(path, _filter=None):
"""recursively walk `path` and parse every link inside"""
result = []
for root, _, files in os.walk(path):
for each in files:
if each == "link":
filepath = os.path.join(root, each)
if not _filter or _filter in filepath:
result.append(get_digest_from_blob(filepath))
return result
class RegistryCleanerError(Exception):
pass
class RegistryCleaner(object):
"""Clean registry"""
def __init__(self, registry_data_dir, dry_run=False):
self.registry_data_dir = registry_data_dir
if not os.path.isdir(self.registry_data_dir):
raise RegistryCleanerError("No repositories directory found inside " \
"REGISTRY_DATA_DIR '{0}'.".
format(self.registry_data_dir))
self.dry_run = dry_run
def _delete_layer(self, repo, digest):
"""remove blob directory from filesystem"""
path = os.path.join(self.registry_data_dir, "repositories", repo, "_layers/sha256", digest)
self._delete_dir(path)
def _delete_blob(self, digest):
"""remove blob directory from filesystem"""
path = os.path.join(self.registry_data_dir, "blobs/sha256", digest[0:2], digest)
self._delete_dir(path)
def _blob_path_for_revision(self, digest):
"""where we can find the blob that contains the json describing this digest"""
return os.path.join(self.registry_data_dir, "blobs/sha256",
digest[0:2], digest, "data")
def _blob_path_for_revision_is_missing(self, digest):
"""for each revision, there should be a blob describing it"""
return not os.path.isfile(self._blob_path_for_revision(digest))
def _get_layers_from_blob(self, digest):
"""get layers from blob by digest"""
return get_layers_from_blob(self._blob_path_for_revision(digest))
def _delete_dir(self, path):
"""remove directory from filesystem"""
if self.dry_run:
logger.info("DRY_RUN: would have deleted %s", path)
else:
logger.info("Deleting %s", path)
try:
shutil.rmtree(path)
except Exception as error:
logger.critical("Failed to delete directory:%s", error)
def _delete_from_tag_index_for_revision(self, repo, digest):
"""delete revision from tag indexes"""
paths = glob.glob(
os.path.join(self.registry_data_dir, "repositories", repo,
"_manifests/tags/*/index/sha256", digest)
)
for path in paths:
self._delete_dir(path)
def _delete_revisions(self, repo, revisions, blobs_to_keep=None):
"""delete revisions from list of directories"""
if blobs_to_keep is None:
blobs_to_keep = []
for revision_dir in revisions:
digests = get_links(revision_dir)
for digest in digests:
self._delete_from_tag_index_for_revision(repo, digest)
if digest not in blobs_to_keep:
self._delete_blob(digest)
self._delete_dir(revision_dir)
def _get_tags(self, repo):
"""get all tags for given repository"""
path = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags")
if not os.path.isdir(path):
logger.critical("No repository '%s' found in repositories directory %s",
repo, self.registry_data_dir)
return None
result = []
for each in os.listdir(path):
filepath = os.path.join(path, each)
if os.path.isdir(filepath):
result.append(each)
return result
def _get_repositories(self):
"""get all repository repos"""
result = []
root = os.path.join(self.registry_data_dir, "repositories")
for each in os.listdir(root):
filepath = os.path.join(root, each)
if os.path.isdir(filepath):
inside = os.listdir(filepath)
if "_layers" in inside:
result.append(each)
else:
for inner in inside:
result.append(os.path.join(each, inner))
return result
def _get_all_links(self, except_repo=""):
"""get links for every repository"""
result = []
repositories = self._get_repositories()
for repo in [r for r in repositories if r != except_repo]:
path = os.path.join(self.registry_data_dir, "repositories", repo)
for link in get_links(path):
result.append(link)
return result
def prune(self):
"""delete all empty directories in registry_data_dir"""
del_empty_dirs(self.registry_data_dir, True)
def _layer_in_same_repo(self, repo, tag, layer):
"""check if layer is found in other tags of same repository"""
for other_tag in [t for t in self._get_tags(repo) if t != tag]:
path = os.path.join(self.registry_data_dir, "repositories", repo,
"_manifests/tags", other_tag, "current/link")
manifest = get_digest_from_blob(path)
try:
layers = self._get_layers_from_blob(manifest)
if layer in layers:
return True
except IOError:
if self._blob_path_for_revision_is_missing(manifest):
logger.warn("Blob for digest %s does not exist. Deleting tag manifest: %s", manifest, other_tag)
tag_dir = os.path.join(self.registry_data_dir, "repositories", repo,
"_manifests/tags", other_tag)
self._delete_dir(tag_dir)
else:
raise
return False
def _manifest_in_same_repo(self, repo, tag, manifest):
"""check if manifest is found in other tags of same repository"""
for other_tag in [t for t in self._get_tags(repo) if t != tag]:
path = os.path.join(self.registry_data_dir, "repositories", repo,
"_manifests/tags", other_tag, "current/link")
other_manifest = get_digest_from_blob(path)
if other_manifest == manifest:
return True
return False
def delete_entire_repository(self, repo):
"""delete all blobs for given repository repo"""
logger.debug("Deleting entire repository '%s'", repo)
repo_dir = os.path.join(self.registry_data_dir, "repositories", repo)
if not os.path.isdir(repo_dir):
raise RegistryCleanerError("No repository '{0}' found in repositories "
"directory {1}/repositories".
format(repo, self.registry_data_dir))
links = set(get_links(repo_dir))
all_links_but_current = set(self._get_all_links(except_repo=repo))
for layer in links:
if layer in all_links_but_current:
logger.debug("Blob found in another repository. Not deleting: %s", layer)
else:
self._delete_blob(layer)
self._delete_dir(repo_dir)
def delete_repository_tag(self, repo, tag):
"""delete all blobs only for given tag of repository"""
logger.debug("Deleting repository '%s' with tag '%s'", repo, tag)
tag_dir = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags", tag)
if not os.path.isdir(tag_dir):
raise RegistryCleanerError("No repository '{0}' tag '{1}' found in repositories "
"directory {2}/repositories".
format(repo, tag, self.registry_data_dir))
manifests_for_tag = set(get_links(tag_dir))
revisions_to_delete = []
blobs_to_keep = []
layers = []
all_links_not_in_current_repo = set(self._get_all_links(except_repo=repo))
for manifest in manifests_for_tag:
logger.debug("Looking up filesystem layers for manifest digest %s", manifest)
if self._manifest_in_same_repo(repo, tag, manifest):
logger.debug("Not deleting since we found another tag using manifest: %s", manifest)
continue
else:
revisions_to_delete.append(
os.path.join(self.registry_data_dir, "repositories", repo,
"_manifests/revisions/sha256", manifest)
)
if manifest in all_links_not_in_current_repo:
logger.debug("Not deleting the blob data since we found another repo using manifest: %s", manifest)
blobs_to_keep.append(manifest)
layers.extend(self._get_layers_from_blob(manifest))
layers_uniq = set(layers)
for layer in layers_uniq:
if self._layer_in_same_repo(repo, tag, layer):
logger.debug("Not deleting since we found another tag using digest: %s", layer)
continue
self._delete_layer(repo, layer)
if layer in all_links_not_in_current_repo:
logger.debug("Blob found in another repository. Not deleting: %s", layer)
else:
self._delete_blob(layer)
self._delete_revisions(repo, revisions_to_delete, blobs_to_keep)
self._delete_dir(tag_dir)
def delete_untagged(self, repo):
"""delete all untagged data from repo"""
logger.debug("Deleting utagged data from repository '%s'", repo)
repositories_dir = os.path.join(self.registry_data_dir, "repositories")
repo_dir = os.path.join(repositories_dir, repo)
if not os.path.isdir(repo_dir):
raise RegistryCleanerError("No repository '{0}' found in repositories "
"directory {1}/repositories".
format(repo, self.registry_data_dir))
tagged_links = set(get_links(repositories_dir, _filter="current"))
layers_to_protect = []
for link in tagged_links:
layers_to_protect.extend(self._get_layers_from_blob(link))
unique_layers_to_protect = set(layers_to_protect)
for layer in unique_layers_to_protect:
logger.debug("layer_to_protect: %s", layer)
tagged_revisions = set(get_links(repo_dir, _filter="current"))
revisions_to_delete = []
layers_to_delete = []
dir_for_revisions = os.path.join(repo_dir, "_manifests/revisions/sha256")
for rev in os.listdir(dir_for_revisions):
if rev not in tagged_revisions:
revisions_to_delete.append(os.path.join(dir_for_revisions, rev))
for layer in self._get_layers_from_blob(rev):
if layer not in unique_layers_to_protect:
layers_to_delete.append(layer)
unique_layers_to_delete = set(layers_to_delete)
self._delete_revisions(repo, revisions_to_delete)
for layer in unique_layers_to_delete:
self._delete_blob(layer)
self._delete_layer(repo, layer)
def get_tag_count(self, repo):
logger.debug("Get tag count of repository '%s'", repo)
repo_dir = os.path.join(self.registry_data_dir, "repositories", repo)
tags_dir = os.path.join(repo_dir, "_manifests/tags")
if os.path.isdir(tags_dir):
tags = os.listdir(tags_dir)
return len(tags)
else:
logger.info("Tags directory does not exist: '%s'", tags_dir)
return -1
def main():
"""cli entrypoint"""
parser = argparse.ArgumentParser(description="Cleanup docker registry")
parser.add_argument("-i", "--image",
dest="image",
required=True,
help="Docker image to cleanup")
parser.add_argument("-v", "--verbose",
dest="verbose",
action="store_true",
help="verbose")
parser.add_argument("-n", "--dry-run",
dest="dry_run",
action="store_true",
help="Dry run")
parser.add_argument("-f", "--force",
dest="force",
action="store_true",
help="Force delete (deprecated)")
parser.add_argument("-p", "--prune",
dest="prune",
action="store_true",
help="Prune")
parser.add_argument("-u", "--untagged",
dest="untagged",
action="store_true",
help="Delete all untagged blobs for image")
args = parser.parse_args()
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(u'%(levelname)-8s [%(asctime)s] %(message)s'))
logger.addHandler(handler)
if args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# make sure not to log before logging is setup. that'll hose your logging config.
if args.force:
logger.info(
"You supplied the force switch, which is deprecated. It has no effect now, and the script defaults to doing what used to be only happen when force was true")
splitted = args.image.split(":")
if len(splitted) == 2:
image = splitted[0]
tag = splitted[1]
else:
image = args.image
tag = None
if 'REGISTRY_DATA_DIR' in os.environ:
registry_data_dir = os.environ['REGISTRY_DATA_DIR']
else:
registry_data_dir = "/opt/registry_data/docker/registry/v2"
try:
cleaner = RegistryCleaner(registry_data_dir, dry_run=args.dry_run)
if args.untagged:
cleaner.delete_untagged(image)
else:
if tag:
tag_count = cleaner.get_tag_count(image)
if tag_count == 1:
cleaner.delete_entire_repository(image)
else:
cleaner.delete_repository_tag(image, tag)
else:
cleaner.delete_entire_repository(image)
if args.prune:
cleaner.prune()
except RegistryCleanerError as error:
logger.fatal(error)
sys.exit(1)
if __name__ == "__main__":
main()
| mit | 8,046,304,554,548,775,000 | 38.485646 | 169 | 0.560679 | false | 4.136591 | false | false | false |
tatsuhirosatou/JMdictDB | python/lib/jelparse.py | 1 | 39671 |
#######################################################################
# This file is part of JMdictDB.
# Copyright (c) 2008-2010 Stuart McGraw
#
# JMdictDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# JMdictDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with JMdictDB; if not, write to the Free Software Foundation,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#######################################################################
import sys, ply.yacc, re, unicodedata, pdb
from collections import defaultdict
import jellex, jdb
from objects import *
class ParseError (ValueError):
def __init__ (self, msg, loc=None, token=None):
self.args = (msg,)
self.loc = loc
self.token = token
precedence = []
# -------------- RULES ----------------
def p_entr_1(p):
'''entr : preentr'''
p.lexer.begin('INITIAL')
e = p[1]
# The Freq objects on the readings are inependent of
# those on the kanjis. The following function merges
# common values.
merge_freqs (e)
# Set the foreign key ids since they will be used
# needed by mk_restrs() below.
jdb.setkeys (e, None)
# The reading and sense restrictions here are simple
# lists of text strings that give the allowed readings
# or kanji. mk_restrs() converts those to the canonical
# format which uses the index number of the disallowed
# readings or kanji.
if hasattr (e, '_rdng') and hasattr (e, '_kanj'):
err = mk_restrs ("_RESTR", e._rdng, e._kanj)
if err: perror (p, err, loc=False)
if hasattr (e, '_sens') and hasattr (e, '_kanj'):
err = mk_restrs ("_STAGK", e._sens, e._kanj)
if err: perror (p, err, loc=False)
if hasattr (e, '_sens') and hasattr (e, '_rdng'):
err = mk_restrs ("_STAGR", e._sens, e._rdng)
if err: perror (p, err, loc=False)
# Note that the entry object returned may have an _XREF list
# on its senses but the supplied xref records are not
# complete. We do not assume database access is available
# when parsing so we cannot look up the xrefs to find the
# the target entry id numbers, validate that the kanji
# reading (if given) are unique, or the target senses exist,
# etc. It is expected that the caller will do this resolution
# on the xrefs using something like jdb.resolv_xref() prior
# to using the object.
p[0] = e
def p_preentr_1(p):
'''preentr : kanjsect FF rdngsect FF senses'''
p[0] = jdb.Entr(_kanj=p[1], _rdng=p[3], _sens=p[5])
def p_preentr_2(p):
'''preentr : FF rdngsect FF senses'''
p[0] = jdb.Entr(_rdng=p[2], _sens=p[4])
def p_preentr_3(p):
'''preentr : kanjsect FF FF senses'''
p[0] = jdb.Entr(_kanj=p[1], _sens=p[4])
def p_kanjsect_1(p):
'''kanjsect : kanjitem'''
p[0] = [p[1]]
def p_kanjsect_2(p):
'''kanjsect : kanjsect SEMI kanjitem'''
p[0] = p[1]; p[0].append (p[3])
def p_kanjitem_1(p):
'''kanjitem : krtext'''
p[0] = jdb.Kanj(txt=p[1])
def p_kanjitem_2(p):
'''kanjitem : krtext taglists'''
kanj = jdb.Kanj(txt=p[1])
err = bld_kanj (kanj, p[2])
if err: perror (p, err)
p[0] = kanj
def p_rdngsect_1(p):
'''rdngsect : rdngitem'''
p[0] = [p[1]]
def p_rdngsect_2(p):
'''rdngsect : rdngsect SEMI rdngitem'''
p[0] = p[1]; p[0].append (p[3])
def p_rdngitem_1(p):
'''rdngitem : krtext'''
p[0] = jdb.Rdng(txt=p[1])
def p_rdngitem_2(p):
'''rdngitem : krtext taglists'''
rdng = jdb.Rdng(txt=p[1])
err = bld_rdng (rdng, p[2])
if err: perror (p, err)
p[0] = rdng
def p_krtext_1(p):
'''krtext : KTEXT'''
p[0] = p[1]
def p_krtext_2(p):
'''krtext : RTEXT'''
p[0] = p[1]
def p_senses_1(p):
'''senses : sense'''
p[0] = [p[1]]
def p_senses_2(p):
'''senses : senses sense'''
p[0] = p[1]; p[0].append(p[2])
def p_sense_1(p):
'''sense : SNUM glosses'''
sens = jdb.Sens()
err = bld_sens (sens, p[2])
if err: perror (p, "Unable to build sense %s\n%s" % (p[1], err))
p[0] = sens
def p_glosses_1(p):
'''glosses : gloss'''
p[0] = [p[1]]
def p_glosses_2(p):
'''glosses : glosses SEMI gloss'''
p[0] = p[1]; p[0].append (p[3])
def p_gloss_1(p):
'''gloss : GTEXT'''
p[0] = [p[1], []]
def p_gloss_2(p):
'''gloss : GTEXT taglists'''
p[0] = [p[1], p[2]]
def p_gloss_3(p):
'''gloss : taglists GTEXT'''
p[0] = [p[2], p[1]]
def p_gloss_4(p):
'''gloss : taglists GTEXT taglists'''
p[0] = [p[2], p[1] + p[3]]
def p_taglists_1(p):
'''taglists : taglist'''
p[0] = p[1]
def p_taglists_2(p):
'''taglists : taglists taglist'''
p[0] = p[1]
p[0].extend(p[2])
def p_taglist_1(p):
'''taglist : BRKTL tags BRKTR'''
p[0] = p[2]
def p_tags_1(p):
'''tags : tagitem'''
p[0] = p[1]
def p_tags_2(p):
'''tags : tags COMMA tagitem'''
p[0] = p[1]
p[0].extend (p[3])
def p_tagitem_1(p):
'''tagitem : KTEXT'''
p[0] = [['RESTR', None, p[1]]]
def p_tagitem_2(p):
'''tagitem : RTEXT'''
p[0] = [['RESTR', p[1], None]]
def p_tagitem_3(p):
'''tagitem : TEXT'''
if p[1] == 'nokanji':
p[0] = [['RESTR', 'nokanji', None]]
else:
x = lookup_tag (p[1])
if not x: perror (p, "Unknown keyword: '%s'" % p[1])
else: p[0] = [[None, p[1]]]
def p_tagitem_4(p):
'''tagitem : QTEXT'''
# FIXME: why isn''t a QTEXT already cleaned up by jellex?
txt = jellex.qcleanup (p[1][1:-1])
# FIXME: we should check for ascii text here and treat
# that as TEXT above.
if jdb.jstr_keb (txt): p[0] = [['RESTR', None, txt]]
else: p[0] = [['RESTR', txt, None]]
def p_tagitem_5(p):
'''tagitem : TEXT EQL TEXT'''
p[0] = [tag_eql_text (p, p[1], p[3])]
def p_tagitem_6(p):
'''tagitem : TEXT EQL TEXT COLON'''
KW = jdb.KW
if p[1] != "lsrc": perror (p, "Keyword must be \"lsrc\"")
la = KW.LANG.get(p[3])
if not la: perror (p, "Unrecognised language '%s'" % p[3])
p[0] = [["lsrc", None, la.id, None]]
def p_tagitem_7(p):
'''tagitem : TEXT EQL TEXT COLON atext'''
KW = jdb.KW
lsrc_flags = None; lang = None
if p[1] in ["lsrc"]:
la = KW.LANG.get(p[3])
if not la:
if p[3] not in ('w','p','wp','pw'):
perror (p, "Unrecognised language '%s'" % p[3])
else: lsrc_flags = p[3]
else: lang = la.id
else: perror (p, "Keyword not \"lsrc\", \"lit\", or \"expl\"")
p[0] = [["lsrc", p[5], lang, lsrc_flags]]
def p_tagitem_8(p):
'''tagitem : TEXT EQL TEXT SLASH TEXT COLON'''
KW = jdb.KW
if p[1] != "lsrc": perror (p, "Keyword not \"lsrc\"")
la = KW.LANG.get(p[3])
if not la: perror (p, "Unrecognised language '%s'" % p[3])
if p[5] not in ('w','p','wp','pw'):
perror (p, "Bad lsrc flags '%s', must be 'w' (wasei), "
"'p' (partial),or both" % p[5])
p[0] = [["lsrc", '', la.id, p[5]]]
def p_tagitem_9(p):
'''tagitem : TEXT EQL TEXT SLASH TEXT COLON atext'''
KW = jdb.KW
if p[1] != "lsrc": perror (p, "Keyword not \"lsrc\"")
la = KW.LANG.get(p[3])
if not la: perror (p, "Unrecognised language '%s'" % p[3])
if p[5] not in ('w','p','wp','pw'):
perror (p, "Bad lsrc flags '%s', must be 'w' (wasei), "
"'p' (partial),or both" % p[5])
p[0] = [["lsrc", p[7], la.id, p[5]]]
def p_tagitem_10(p):
'''tagitem : TEXT EQL jrefs'''
tag = p[1]; taglist = []; tagtype = 'XREF'; KW = jdb.KW
for jref in p[3]:
dotlist, slist, seq, corpus = jref
if tag in [x.kw for x in KW.recs('XREF')]:
# FIXME: instead of using XREF kw''s directly, do we want to
# change to an lsrc syntax like, "xref=cf:..."
# (possibly keeping "see" and "ant" as direct keywords)?
if len (dotlist) == 1:
if jdb.jstr_keb (dotlist[0]):
taglist.append (['XREF', tag, None, dotlist[0], slist, seq, corpus])
else:
taglist.append (['XREF', tag, dotlist[0], None, slist, seq, corpus])
elif len (dotlist) == 2:
taglist.append (['XREF', tag, dotlist[1], dotlist[0], slist, seq, corpus])
elif len(dotlist) == 0:
taglist.append (['XREF', tag, None, None, slist, seq, corpus])
else: perror ("No more than on kanji and one reading string can be given in an xref.")
continue
# The full 'jref' syntax is only used by xrefs (above)
# so if we get here, complain if the 'jref' item has
# any xref-specific elements.
if seq or corpus or slist:
perror ("Seq number, corpus, or a sense list can only be given with xref tags")
# Xrefs are also the only contruct that uses the middot character
# syntactically. Since we don''t have an xref, then the midots are
# just characters in the text, so put the original text string back
# together.
txt = u'\u30FB'.join (dotlist)
if tag == 'restr':
if jdb.jstr_keb (txt):
taglist.append (['RESTR', None, txt])
else:
taglist.append (['RESTR', txt, None])
else:
# This must be a tag=QTEXT contruct.
taglist.append (tag_eql_text (p, tag, txt))
p[0] = taglist
def p_atext_1(p):
'''atext : TEXT'''
p[0] = p[1]
def p_atext_2(p):
'''atext : QTEXT'''
p[0] = jellex.qcleanup (p[1][1:-1])
def p_jrefs_1(p):
'''jrefs : jref'''
p[0] = [p[1]]
def p_jrefs_2(p):
'''jrefs : jrefs SEMI jref'''
p[0] = p[1]; p[0].append (p[3])
def p_jref_1(p):
'''jref : xrefnum'''
p[0] = [[],[]] + p[1]
def p_jref_2(p):
'''jref : xrefnum slist'''
p[0] = [[],p[2]] + p[1]
def p_jref_3(p):
'''jref : xrefnum DOT jitem'''
p[0] = p[3] + p[1]
def p_jref_4(p):
'''jref : jitem'''
p[0] = p[1] + [None,'']
def p_jitem_1(p):
'''jitem : dotlist'''
p[0] = [p[1], None]
def p_jitem_2(p):
'''jitem : dotlist slist'''
p[0] = [p[1], p[2]]
def p_dotlist_1(p):
'''dotlist : jtext'''
p[0] = [p[1]]
def p_dotlist_2(p):
'''dotlist : dotlist DOT jtext'''
p[0] = p[1]; p[0].append (p[3])
def p_jtext_1(p):
'''jtext : KTEXT'''
p[0] = p[1]
def p_jtext_2(p):
'''jtext : RTEXT'''
p[0] = p[1]
def p_jtext_3(p):
'''jtext : QTEXT'''
p[0] = jellex.qcleanup (p[1][1:-1])
def p_xrefnum_1(p):
'''xrefnum : NUMBER'''
p[0] = [toint(p[1]), '']
def p_xrefnum_2(p):
'''xrefnum : NUMBER HASH'''
p[0] = [toint(p[1]), None]
def p_xrefnum_3(p):
'''xrefnum : NUMBER TEXT'''
p[0] = [toint(p[1]), p[2]]
def p_slist_1(p):
'''slist : BRKTL snums BRKTR'''
p[0] = p[2]
def p_snums_1(p):
'''snums : NUMBER'''
n = int(p[1])
if n<1 or n>99:
perror (p, "Invalid sense number: '%s' % n")
p[0] = [n]
def p_snums_2(p):
'''snums : snums COMMA NUMBER'''
n = int(p[3])
if n<1 or n>99:
perror (p, "Invalid sense number: '%s' % n")
p[0] = p[1] + [n]
# -------------- RULES END ----------------
def p_error (token):
# Ply insists on having a p_error function that takes
# exactly one argument so provide a wrapper around perror.
perror (token)
def perror (t_or_p, msg="Syntax Error", loc=True):
# 't_or_p' is either a YaccProduction (if called from
# jelparse code), a LexToken (if called by Ply), or None
# (if called by Ply at end-of-text).
if loc:
errpos = -1
if t_or_p is None: errpos = None
elif hasattr (t_or_p, 'stack'):
# 't_or_p' is a production. Replace with a real token or
# grammar symbol from the parser stack.
t_or_p = t_or_p.stack[-1]
# Grammar symbols will have a "endlexpos" attribute (presuming
# that the parse() function was called with argument: tracking=True).
if hasattr (t_or_p, 'endlexpos'):
errpos = t_or_p.endlexpos
# LexTokens will have a "lexpos" attribute.
elif hasattr (t_or_p, 'lexpos'):
errpos = t_or_p.lexpos
if errpos == -1:
raise ValueError ("Unable to get lexer error position. "
"Was parser called with tracking=True?")
t = errloc (errpos)
loc_text = '\n'.join (t)
else:
loc_text = None
raise ParseError (msg, loc_text)
def errloc (errpos):
# Return a list of text lines that consitute the parser
# input text (or more accurately the input text to the
# lexer used by the parser) with an inserted line containing
# a caret character that points to the lexer position when
# the error was detected. 'errpos' is the character offset
# in the input text of the error, or None if the error was
# at the end of input.
# Note: Function create_parser() makes the parser it creates
# global (in JelParser) and also make the lexer availble as
# attribute '.lexer' of the parser, both of whech we rely on
# here.
global JelParser
input = JelParser.lexer.lexdata
if errpos is None: errpos = len (input)
lines = input.splitlines (True)
eol = 0; out = []
for line in lines:
out.append (line.rstrip('\n\r'))
eol += len (line)
if eol >= errpos and errpos >= 0:
# Calculate 'errcol', the error position relative
# to the start of the current line.
errcol = len(line) + errpos - eol
# The line may contain double-width characters. Count
# (in 'adj') the number of them that occur up to (but
# not past) 'errcol'.
adj = 0
for chr in line[:errcol]:
w = unicodedata.east_asian_width (chr)
if w == "W" or w == "F": adj += 1
# This assume that the width of a space is the same as
# regular characters, and exactly half of a double-width
# character, but that is the best we can do here.
out.append ((' ' * (errcol+adj)) + '^')
errpos = -1 # Ignore errpos on subsequent loops.
return out
def tag_eql_text (p, tag, text):
# Process a tag=text syntax contructs as they are parsed.
# We extract this activity into a function since, in the
# "tagitem" section, we do it both for the TEXT=TEXT rule,
# and TEXT=QTEXT (which is a possible condition in the
# TEXT=jrefs rule.)
if tag in ["note","lsrc","restr"]:
if tag == "restr":
if text != "nokanji":
perror (p, "Bad restr value (expected \"nokanji\"): '%s'" % p[3])
r = ["RESTR", "nokanji", None]
else: r = [tag, text, 1, None]
else:
x = lookup_tag (text, tag)
if x and len(x) > 1:
raise ValueError ("Unexpected return value from lookup_tag()")
if x is None: perror (p, "Unknown keyword type '%s'" % tag)
elif not x: perror (p, "Unknown %s keyword '%s'" % (tag,text))
else: r = x[0]
return r
def lookup_tag (tag, typs=None):
# Lookup 'tag' (given as a string) in the keyword tables
# and return the kw id number. If 'typs' is given it
# should be a string or list of strings and gives the
# specific KW domain(s) (e.g. FREQ, KINF, etc) that 'tag'
# should be looked for in.
# The return value is:
# None -- A non-existent KW domain was given in'typs'.
# [] -- (Empty list) The 'tag' was not found in any of
# the doimains given in 'typs'.
# [[typ1,id1],[typ2,id2],...] -- A list of lists. Each
# item represents a domain in which 'tag' was found.
# The first item of each item is a string giving
# the domain name. The second item gives the id
# number of that tag in the domain. In the case of
# the FREQ keyword, the item will be a 3-list
# consisting of "FREQ", the freq kw id, and the
# a number for the freq value. E.g. lookup_tag('nf23')
# will return [["FREQ",5,23]] (assuming that the "nf"
# kw has the id value of 5 in the kwfreq table.)
KW = jdb.KW
matched = []
if not typs:
typs = [x for x in KW.attrs()]
if isinstance (typs, str): typs = [typs]
for typ in typs:
typ = typ.upper(); val = None
if typ == "FREQ":
mo = re.search (r'^([^0-9]+)(\d+)$', tag)
if mo:
tagbase = mo.group(1)
val = int (mo.group(2))
else: tagbase = tag
try:
x = (getattr (KW, typ))[tagbase]
except AttributeError:
return None
except KeyError: pass
else:
if not val: matched.append ([typ, x.id])
else: matched.append ([typ, x.id, val])
return matched
def bld_sens (sens, glosses):
# Build a sense record. 'glosses' is a list of gloss items.
# Each gloss item is a 2-tuple: the first item is the gloss
# record and the second, a list of sense tags.
# Each of the sense tag items is an n-tuple. The first item
# in an n-tuple is either a string giving the type of the tag
# ('KINF', 'POS'. 'lsrc', etc) or None indicating the type was
# not specified (for example, the input text contained a single
# keyword like "vi" rather than "pos=vi"). The second and any
# further items are dependent on the the tag type.
# Our job is to iterate though this list, and put each item
# on the appropriate sense list: e.g. all the "gloss" items go
# into the list @{$sens->{_gloss}}, all the "POS" keyword items
# go on @{$sens->{_pos}}, etc.
KW = jdb.KW
errs = []; sens._gloss = []
for gtxt, tags in glosses:
gloss = jdb.Gloss (txt=jellex.gcleanup(gtxt))
sens._gloss.append (gloss)
if tags: errs.extend (sens_tags (sens, gloss, tags))
if gloss.ginf is None: gloss.ginf = KW.GINF['equ'].id
if gloss.lang is None: gloss.lang = KW.LANG['eng'].id
return "\n".join (errs)
def sens_tags (sens, gloss, tags):
# See the comments in the "taglist" production for a description
# of the format of 'taglist'.
KW = jdb.KW
errs = []
for t in tags:
# Each tag, t, is a list where t[0] is the tag type (aka
# domain) as a string, or None if it is unknown. There
# will be one or more additional items in the list, the
# numner depending on what type of tag it is.
vals = None
typ = t.pop(0) # Get the item type.
if typ is None:
# Unknown domain (that is, user gave a simple unadorned
# tag like [n] rather than [pos=n]) so figure it what
# domain it belongs to...
# First, if we can interpret the tag as a sense tag, do so.
candidates = lookup_tag (t[0], ('POS','MISC','FLD','DIAL'))
if candidates and len(candidates) > 1:
errs.append (
"Sense tag '%s' is ambiguous, may be either any of %s."
" Please specify tag explicity, using, for instance,"
" \"%s=%s\"" % (t[0], ','.join([x[0] for x in candidates]),
candidates[0][0], t[0]))
continue
if candidates:
typ, t = candidates[0][0], [candidates[0][1]]
if typ is None:
candidates = lookup_tag (t[0], ('GINF','LANG'))
if candidates:
# There is currently only one ambiguity: "lit" may
# be either GINF "literal" or LANG "Lithuanian".
# We unilaterally choose the former interpretation
# as it is much more common than the latter, and
# the latter when needed can be specified as
# [lang=lit].
candidate = candidates[0]
typ = candidate[0]; t = [candidate[1]]
if typ is None:
errs.append ("Unknown tag '%s'" % t)
continue
if typ in ('POS','MISC','FLD','DIAL'):
assert len(t)==1, "invalid length"
assert type(t[0])==int, "Unresolved kw"
if typ == 'POS': o = Pos(kw=t[0])
elif typ == 'MISC': o = Misc(kw=t[0])
elif typ == 'FLD': o = Fld(kw=t[0])
elif typ == 'DIAL': o = Dial(kw=t[0])
append (sens, "_"+typ.lower(), o)
elif typ == 'RESTR':
# We can't create real _stagk or _stagr lists here
# because the readings and kanji we are given by the user
# are allowed ones, but we need to store disallowed ones.
# To get the disallowed ones, we need access to all the
# readings/kanji for this entry and we don't have that
# info at this point. So we do what checking we can. and
# save the texts as given, and will fix later after the
# full entry is built and we have access to the entry's
# readings and kanji.
rtxt,ktxt = t
#if num or corp:
if ((rtxt and ktxt) or (not rtxt and not ktxt)):
errs.append ("Sense restrictions must have a "
"reading or kanji (but not both): "
+ fmt_xitem (t))
if ktxt: append (sens, '_STAGK', ktxt)
if rtxt: append (sens, '_STAGR', rtxt)
elif typ == 'lsrc':
wasei = t[2] and 'w' in t[2]
partial = t[2] and 'p' in t[2]
append (sens, '_lsrc',
jdb.Lsrc(txt=t[0] or '', lang=(t[1] or lang_en),
part=partial, wasei=wasei))
elif typ == 'note':
if getattr (sens, 'notes', None):
errs.append ("Only one sense note allowed")
sens.notes = t[0]
elif typ == 'XREF':
kw = KW.XREF[t[0]].id
t[0] = kw
append (sens, '_XREF', t)
elif typ == 'GINF':
t = t[0] # GINF tags have only one value, the ginf code.
if getattr (gloss, 'ginf', None):
errs.append (
"Warning, duplicate GINF tag '%s' ignored\n" % KW.GINF[t].kw)
else: gloss.ginf = t
elif typ == 'LANG':
t = t[0] # LANG tags have only one value, the lang code.
assert isinstance(t,int)
if getattr (gloss, 'lang', None):
errs.append (
"Warning, duplicate LANG tag '%s' ignored\n" % KW.LANG[t].kw)
else: gloss.lang = t
elif typ:
errs.append ("Cannot use '%s' tag in a sense" % typ)
return errs
def bld_rdng (r, taglist=[]):
errs = []; nokanj = False
for t in taglist:
typ = t.pop(0)
if typ is None:
v = lookup_tag (t[0], ('RINF','FREQ'))
if not v:
typ = None
errs.append ("Unknown reading tag '%s'" % t[0])
else:
typ, t = v[0][0], v[0][1:]
if typ == 'RINF': append (r, '_inf', jdb.Rinf(kw=t[0]))
elif typ == 'FREQ':
# _freq objects are referenced by both the reading and
# kanji _freq lists. Since we don't have access to
# the kanj here, temporarily save the freq (kw, value)
# tuple in attribute "._FREQ". When the full entry is
# processed, the info in here will be removed, merged
# with parallel info from the kanj objects, and proper
# ._freq objects created.
append (r, '_FREQ', (t[0], t[1]))
elif typ == 'RESTR':
# We can't generate real restr records here because the real
# records are the disallowed kanji. We have the allowed
# kanji here and need the set of all kanji in order to get
# the disallowed set, and we don't have that now. So we
# just save the allowed kanji as given, and will convert it
# after the full entry is built and we have all the info we
# need.
#for xitem in t[0]:
# An xitem represents a reference to another entry
# or other info within an entry, in textual form. It
# is used for xrefs and restr info. It is a 5-seq
# with the following values:
# [0] -- Reading text
# [1] -- Kanji text
# For a reading restr, it is expected to contain only
# a kanji text.
rtxt,ktxt = t
if rtxt == "nokanji":
nokanj = True
r._NOKANJI = 1
continue
if rtxt:
errs.append ("Reading restrictions must be kanji only: " + rtxt)
append (r, "_RESTR", ktxt)
if hasattr (r,'_RESTR') and nokanj:
errs.append ("Can't use both kanji and \"nokanji\" in 'restr' tags")
elif typ:
errs.append ("Cannot use '%s' tag in a reading" % typ)
return "\n".join (errs)
def bld_kanj (k, taglist=[]):
errs = []
for t in taglist:
typ = t.pop(0)
if typ is None:
v = lookup_tag (t[0], ('KINF','FREQ'))
if not v: perror ("Unknown kanji tag '%s'" % t[0])
# Warning: The following simply uses the first resolved tag in
# the candidates list returned by lookup_tag(). This assumes
# there are no possible tags that are ambiguous in the KINF and
# FREQ which could cause lookup_tag() to return more than one
# candidate tags.
typ, t = v[0][0], v[0][1:]
if typ == "KINF": append (k, "_inf", jdb.Kinf(kw=t[0]))
elif typ == "FREQ":
# _freq objects are referenced by both the reading and
# kanji _freq lists. Since we don't have access to
# the rdng here, temporarily save the freq (kw, value)
# tuple in attribute "._FREQ". When the full entry is
# processed, the info in here will be removed, merged
# with parallel info from the rdng objects, and proper
# ._freq objects created.
append (k, "_FREQ", (t[0], t[1]))
else:
errs.append ("Cannot use '%s' tag in kanji section" % typ);
return "\n".join (errs)
def mk_restrs (listkey, rdngs, kanjs):
# Note: mk_restrs() are used for all three
# types of restriction info: restr, stagr, stagk. However to
# simplify things, the comments and variable names assume use
# with reading restrictions (restr).
#
# What we do is take a list of restr text items received from
# a user which list the kanji (a subset of all the kanji for
# the entry) that are valid with this reading, and turn it
# into a list of restr records that identify the kanji that
# are *invalid* with this reading. The restr records identify
# kanji by id number rather than text.
#
# listkey -- Name of the key used to get the list of text
# restr items from 'rdngs'. These are the text strings
# provided by the user. Should be "_RESTR", "_STAGR",
# or "_STAGK".
# rdngs -- List of rdng or sens records depending on whether
# we're doing restr or stagr/stagk restrictions.
# kanjs -- List of the entry's kanji or reading records
# depending on whether we are doing restr/stagk or stagr
# restrictions.
errs = []
ktxts = [x.txt for x in kanjs]
for n,r in enumerate (rdngs):
# Get the list of restr text strings and nokanji flag and
# delete them from the rdng object since they aren't part
# of the standard api.
restrtxt = getattr (r, listkey, None)
if restrtxt: delattr (r, listkey)
nokanj = getattr (r, '_NOKANJI', None)
if nokanj: delattr (r, '_NOKANJI')
# Continue with next reading if nothing to be done
# with this one.
if not nokanj and not restrtxt: continue
# bld_rdngs() guarantees that {_NOKANJI} and {_RESTR}
# won't both be present on the same rdng.
if nokanj and restrtxt:
# Only rdng-kanj restriction should have "nokanji" tag, so
# message can hardwire "reading" and "kanji" text even though
# this function in also used for sens-rdng and sens-kanj
# restrictions.
errs.append ("Reading %d has 'nokanji' tag but entry has no kanji" % (n+1))
continue
if nokanj: restrtxt = None
z = jdb.txt2restr (restrtxt, r, kanjs, listkey.lower())
# Check for kanji erroneously in the 'restrtxt' but not in
# 'kanjs'. As an optimization, we only do this check if the
# number of Restr objects created (len(z)) plus the number of
# 'restrtxt's are not equal to the number of 'kanjs's. (This
# criterion my not be valid in some corner cases.)
if restrtxt is not None and len (z) + len (restrtxt) != len (kanjs):
nomatch = [x for x in restrtxt if x not in ktxts]
if nomatch:
if listkey == "_RESTR": not_found_in = "kanji"
elif listkey == "_STAGR": not_found_in = "readings"
elif listkey == "_STAGK": not_found_in = "kanji"
errs.append ("restr value(s) '" +
"','".join (nomatch) +
"' not in the entry's %s" % not_found_in)
return "\n".join (errs)
def resolv_xrefs (
cur, # An open DBAPI cursor to the current JMdictDB database.
entr # An entry with ._XREF tuples.
):
"""\
Convert any jelparser generated _XREF lists that are attached
to any of the senses in 'entr' to a normal augmented xref list.
An _XREF list is a list of 6-tuples:
[0] -- The type of xref per id number in table kwxref.
[1] -- Reading text of the xref target entry or None.
[2] -- Kanji text of the target xref or None.
[3] -- A list of ints specifying the target senses in
in the target entry.
[4] -- None or a number, either seq or entry id.
[5] -- None, '', or a corpus name. None means 'number'
is a entry id, '' means it is a seq number in the
corpus 'entr.src', otherwise it is the name or id
number of a corpus in which to try resolving the
xref.
At least one of [1], [2], or [4] must be non-None.\
"""
errs = []
for s in getattr (entr, '_sens', []):
if not hasattr (s, '_XREF'): continue
xrefs = []; xunrs = []
for typ, rtxt, ktxt, slist, seq, corp in s._XREF:
if corp == '': corp = entr.src
xrf, xunr = find_xref (cur, typ, rtxt, ktxt, slist, seq, corp)
if xrf: xrefs.extend (xrf)
else:
xunrs.append (xunr)
errs.append (xunr.msg)
if xrefs: s._xref = xrefs
if xunrs: s._xrslv = xunrs
del s._XREF
return errs
def find_xref (cur, typ, rtxt, ktxt, slist, seq, corp,
corpcache={}, clearcache=False):
xrfs = []; xunrs = None; msg = ''
if clearcache: corpcache.clear()
if isinstance (corp, str):
if corpcache.get (corp, None): corpid = corpcache[corp]
else:
rs = jdb.dbread (cur, "SELECT id FROM kwsrc WHERE kw=%s", [corp])
if len(rs) != 1: raise ValueError ("Invalid corpus name: '%s'" % corp)
corpid = corpcache[corp] = rs[0][0]
else: corpid = corp
try:
xrfs = jdb.resolv_xref (cur, typ, rtxt, ktxt, slist, seq, corpid)
except ValueError as e:
msg = e.args[0]
xunrs = jdb.Xrslv (typ=typ, ktxt=ktxt, rtxt=rtxt,tsens=None)
xunrs.msg = msg
return xrfs, xunrs
def merge_freqs (entr):
# This function is used by code that contructs Entr objects
# by parsing a textual entry description. Generally such code
# will parse freq (a.k.a. prio) tags for readings and kanji
# individually. Before the entry is used, these independent
# tags must be combined so that a rdng/kanj pairs with the
# same freq tag point to a single Freq object. This function
# does that merging.
# It expects the entry's Rdng and Kanj objects to have a temp
# attribute named "_FREQ" that contains a list of 2-tuples.
# Each 2-tuple contains the freq table kw id number, and the
# freq value. After merge_freqs() runs, all those .FREQ
# attributes will have been deleted, and .freq attributes
# created with equivalent, properly linked Freq objects.
fmap = defaultdict (lambda:([list(),list()]))
# Collect the info in .FREQ attributes from all the readings.
for r in getattr (entr, '_rdng', []):
for kw_val in getattr (r, '_FREQ', []):
# 'kw_val' is a 2-tuple denoting the freq as a freq table
# keyword id and freq value pair.
rlist = fmap[(kw_val)][0]
# Add 'r' to rlist if it is not there already.
# Use first() as a "in" operator that uses "is" rather
# than "==" as compare function.
if not jdb.isin (r, rlist): rlist.append (r)
if hasattr (r, '_FREQ'): del r._FREQ
# Collect the info in .FREQ attributes from all the kanji.
# This works on kanj's the same as above section works on
# rdng's and comments above apply here too.
for k in getattr (entr, '_kanj', []):
for kw_val in getattr (k, '_FREQ', []):
klist = fmap[(kw_val)][1]
if not jdb.isin (k, klist): klist.append (k)
if hasattr (k, '_FREQ'): del k._FREQ
# 'fmap' now has one entry for every unique freq (kw,value) tuple
# which is a pair of sets. The first set consists of all Rdng
# objects that (kw,value) freq spec applies to. The second is
# the set of all kanji it applies to. We take all combinations
# of readings with kanji, and create a Freq object for each.
errs = jdb.make_freq_objs (fmap, entr)
return errs
def append (sens, key, item):
# Append $item to the list, @{$sens->{$key}}, creating
# the latter if needed.
v = []
try: v = getattr (sens, key)
except AttributeError: setattr (sens, key, v)
v.append (item)
_uni_numeric = {
'\uFF10':'0','\uFF11':'1','\uFF12':'2','\uFF13':'3',
'\uFF14':'4','\uFF15':'5','\uFF16':'6','\uFF17':'7',
'\uFF18':'8','\uFF19':'9',}
def toint (s):
n = int (s.translate (_uni_numeric))
return n
def fmt_xitem (xitem):
typ = None
if len (xitem) == 6: typ = xitem.pop (0)
if len (xitem) == 5: rtxt, ktxt, slist, num, corp = xitem
else: rtxt, ktxt, slist, num, corp = xitem + [[], None, None]
k = ktxt or ''; r = rtxt or ''; n = num or ''
if num:
if corp: c = ' ' + corp
else: c = '#' if corp is None else ''
n = n + c
else: c = ''
kr = k + (u'\u30FB' if k and r else '') + r
t = n + (u'\u30FB' if n and kr else '') + kr
s = ('[%s]' % ','.join(slist)) if slist else ''
return t + s
def parse_grp (grpstr):
rv = []; KWGRP = jdb.KW.GRP
if not grpstr.strip(): return rv
# FIXME: Handle grp.notes which is currently ignored.
for g in grpstr.split (';'):
grp, x, ord = g.strip().partition ('.')
if grp.isdigit(): grp = int(grp)
grp = KWGRP[grp].id
ord = int(ord)
rv.append (Grp (kw=grp, ord=ord))
return rv
def create_parser (lexer, toks, **args):
# Set global JelParser since we need access to it
# from error handling function p_error() and I don't
# know any other way to make it available there.
global tokens, JelParser
# The tokens also have to be global because Ply
# doesn't believe in user function parameters for
# argument passing.
tokens = toks
# The following sets default keyword arguments to
# to Ply's parser factory function. These are
# intended to cause it to use the "jelparse_tab.py"
# file that should be in sys.path somewhere (either
# in the development dir's python/lib, or in the
# web lib dir.) so as to prevent Ply from trying
# to rebuild it, and worse, writing it like bird
# droppings wherever we happen to be running.
if 'module' not in args: args['module'] = sys.modules['jelparse']
if 'tabmodule' not in args: args['tabmodule'] = 'jelparse_tab'
if 'write_tables' not in args: args['write_tables'] = 1
if 'optimize' not in args: args['optimize'] = 1
if 'debug' not in args: args['debug'] = 0
JelParser = ply.yacc.yacc (**args)
JelParser.lexer = lexer # Access to lexer needed in error handler.
return JelParser
| gpl-2.0 | 3,156,281,540,188,936,700 | 39.031282 | 98 | 0.517582 | false | 3.421093 | false | false | false |
cloudify-cosmo/softlayer-python | SoftLayer/CLI/ticket/__init__.py | 1 | 1445 | """Support tickets."""
from SoftLayer.CLI import formatting
from SoftLayer import utils
import click
TEMPLATE_MSG = "***** SoftLayer Ticket Content ******"
def get_ticket_results(mgr, ticket_id, update_count=1):
"""Get output about a ticket.
:param integer id: the ticket ID
:param integer update_count: number of entries to retrieve from ticket
:returns: a KeyValue table containing the details of the ticket
"""
result = mgr.get_ticket(ticket_id)
result = utils.NestedDict(result)
table = formatting.KeyValueTable(['Name', 'Value'])
table.align['Name'] = 'r'
table.align['Value'] = 'l'
table.add_row(['id', result['id']])
table.add_row(['title', result['title']])
if result['assignedUser']:
table.add_row(['assignedUser',
"%s %s" % (result['assignedUser']['firstName'],
result['assignedUser']['lastName'])])
table.add_row(['createDate', result['createDate']])
table.add_row(['lastEditDate', result['lastEditDate']])
total_update_count = result['updateCount']
count = min(total_update_count, update_count)
for i, update in enumerate(result['updates'][:count]):
# NOTE(kmcdonald): Windows new-line characters need to be stripped out
wrapped_entry = click.wrap_text(update['entry'].replace('\r', ''))
table.add_row(['Update %s' % (i + 1,), wrapped_entry])
return table
| mit | -2,703,160,309,967,666,000 | 32.604651 | 78 | 0.627682 | false | 3.873995 | false | false | false |
bruteforce1/cryptopals | set2/ch12/decrypt_ecb_simple.py | 1 | 4778 | #!/usr/bin/python3
"""
Copy your oracle function to a new function that encrypts buffers under
ECB mode using a consistent but unknown key (for instance, assign a
single random key, once, to a global variable).
Now take that same function and have it append to the plaintext,
BEFORE ENCRYPTING, the following string:
Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg
aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq
dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg
YnkK
|-------------------------------------------|
|Spoiler alert. |
| |
|Do not decode this string now. Don't do it.|
|-------------------------------------------|
Base64 decode the string before appending it. Do not base64 decode the
string by hand; make your code do it. The point is that you don't know
its contents.
What you have now is a function that produces:
AES-128-ECB(your-string || unknown-string, random-key)
It turns out: you can decrypt "unknown-string" with repeated calls to
the oracle function!
Here's roughly how:
Feed identical bytes of your-string to the function 1 at a time ---
start with 1 byte ("A"), then "AA", then "AAA" and so on. Discover the
block size of the cipher. You know it, but do this step anyway.
Detect that the function is using ECB. You already know, but do
this step anyways.
Knowing the block size, craft an input block that is exactly 1 byte
short (for instance, if the block size is 8 bytes, make "AAAAAAA").
Think about what the oracle function is going to put in that last byte
position.
Make a dictionary of every possible last byte by feeding different
strings to the oracle; for instance, "AAAAAAAA", "AAAAAAAB",
"AAAAAAAC", remembering the first block of each invocation.
Match the output of the one-byte-short input to one of the entries
in your dictionary. You've now discovered the first byte of
unknown-string.
Repeat for the next byte.
"""
import argparse
import base64
import random
import sys
from utils.cpset2 import aes_ecb, gen_random_bytes, test_aes_ecb
random.seed(1)
GLOBAL_KEY = gen_random_bytes(16)
def is_oracle_ecb(block):
if test_aes_ecb('A' * block * 10):
return True
return False
def convert_to_bytes(text):
if type(text).__name__ == 'str':
t = text.encode('utf-8')
elif type(text).__name__ == 'bytes':
t = text
else:
raise TypeError('Bad type passed to encryption_oracle')
return t
def decrypt_ecb(block):
ans = b''
mult = 0
ctlen = len(base64.b64decode(encryption_oracle('')))
while len(ans) < ctlen:
if len(ans) % block == 0:
mult += 1
pad = b'A' * (block - (len(ans)%block + 1))
oracle = encryption_oracle(pad)
found = 0
for test in range(0,255):
te = pad + ans + bytes([test])
enc = encryption_oracle(te)
if base64.b64decode(enc)[:block*mult] == base64.b64decode(oracle)[:block*mult]:
ans += bytes([test])
found = 1
break
if not found:
break
pad = int(ans[-1])
if ans[-pad:] != bytes((pad,))*pad:
print('Issue removing final pad.')
print('Decrypted text: ')
print(ans)
return ''
return ans[:-pad].decode('utf-8')
def get_oracle_block_size():
l = 0
resize = 0
cnt = 0
for i in range(1,100):
test = b'A' * i
tl = len(encryption_oracle(test))
if l == 0:
l = tl
elif resize == 0:
if tl != l:
cnt = 1
l = tl
resize = 1
elif l == tl:
cnt += 1
else:
return cnt
return -1
def manage_decrypt_aes_ecb():
bs = get_oracle_block_size()
if bs:
ecb = is_oracle_ecb(bs)
if ecb:
return decrypt_ecb(bs)
return ''
def encryption_oracle(text):
crypt = 'Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg'
crypt += 'aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq'
crypt += 'dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg'
crypt += 'YnkK'
return aes_ecb(convert_to_bytes(text) + base64.b64decode(crypt),
convert_to_bytes(GLOBAL_KEY),1)
def main():
ans = manage_decrypt_aes_ecb()
if ans:
print(ans)
return 0
print('Fail.')
return -1
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Uses an oracle to decrypt AES in ECB mode, one byte at \
a time. This is the simple approach.'
)
args = parser.parse_args()
sys.exit(main())
| mit | -857,997,610,606,686,100 | 26.94152 | 91 | 0.611762 | false | 3.343597 | true | false | false |
sgallagher/anaconda | pyanaconda/ui/gui/spokes/welcome.py | 2 | 15444 | # Welcome spoke classes
#
# Copyright (C) 2011-2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import sys
import re
import os
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from pyanaconda.ui.gui.hubs.summary import SummaryHub
from pyanaconda.ui.gui.spokes import StandaloneSpoke
from pyanaconda.ui.gui.utils import setup_gtk_direction, escape_markup
from pyanaconda.core.async_utils import async_action_wait
from pyanaconda.ui.gui.spokes.lib.lang_locale_handler import LangLocaleHandler
from pyanaconda.ui.gui.spokes.lib.unsupported_hardware import UnsupportedHardwareDialog
from pyanaconda import localization
from pyanaconda.product import distributionText, isFinal, productName, productVersion
from pyanaconda import flags
from pyanaconda import geoloc
from pyanaconda.core.i18n import _, C_
from pyanaconda.core.util import ipmi_abort
from pyanaconda.core.constants import DEFAULT_LANG, WINDOW_TITLE_TEXT
from pyanaconda.modules.common.constants.services import TIMEZONE, LOCALIZATION
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
__all__ = ["WelcomeLanguageSpoke"]
class WelcomeLanguageSpoke(StandaloneSpoke, LangLocaleHandler):
"""
.. inheritance-diagram:: WelcomeLanguageSpoke
:parts: 3
"""
mainWidgetName = "welcomeWindow"
focusWidgetName = "languageEntry"
uiFile = "spokes/welcome.glade"
helpFile = "WelcomeSpoke.xml"
builderObjects = ["languageStore", "languageStoreFilter", "localeStore",
"welcomeWindow", "betaWarnDialog"]
preForHub = SummaryHub
priority = 0
def __init__(self, *args, **kwargs):
StandaloneSpoke.__init__(self, *args, **kwargs)
LangLocaleHandler.__init__(self)
self._origStrings = {}
self._l12_module = LOCALIZATION.get_proxy()
def apply(self):
(store, itr) = self._localeSelection.get_selected()
if not itr:
log.warning("No locale is selected. Skip.")
return
locale = store[itr][1]
locale = localization.setup_locale(locale, self._l12_module, text_mode=False)
self._set_lang(locale)
# Skip timezone and keyboard default setting for kickstart installs.
# The user may have provided these values via kickstart and if not, we
# need to prompt for them. But do continue if geolocation-with-kickstart
# is enabled.
if flags.flags.automatedInstall and not geoloc.geoloc.enabled:
return
timezone_proxy = TIMEZONE.get_proxy()
loc_timezones = localization.get_locale_timezones(self._l12_module.Language)
if geoloc.geoloc.result.timezone:
# (the geolocation module makes sure that the returned timezone is
# either a valid timezone or None)
log.info("using timezone determined by geolocation")
timezone_proxy.SetTimezone(geoloc.geoloc.result.timezone)
# Either this is an interactive install and timezone.seen propagates
# from the interactive default kickstart, or this is a kickstart
# install where the user explicitly requested geolocation to be used.
# So set timezone.seen to True, so that the user isn't forced to
# enter the Date & Time spoke to acknowledge the timezone detected
# by geolocation before continuing the installation.
timezone_proxy.SetKickstarted(True)
elif loc_timezones and not timezone_proxy.Timezone:
# no data is provided by Geolocation, try to get timezone from the
# current language
log.info("geolocation not finished in time, using default timezone")
timezone_proxy.SetTimezone(loc_timezones[0])
@property
def completed(self):
# Skip the welcome screen if we are in single language mode
# If language has not been set the default language (en_US)
# will be used for the installation and for the installed system.
if flags.flags.singlelang:
return True
if flags.flags.automatedInstall and self._l12_module.LanguageKickstarted:
return bool(self._l12_module.Language)
def _row_is_separator(self, model, itr, *args):
return model[itr][3]
def initialize(self):
self.initialize_start()
self._languageStore = self.builder.get_object("languageStore")
self._languageStoreFilter = self.builder.get_object("languageStoreFilter")
self._languageEntry = self.builder.get_object("languageEntry")
self._langSelection = self.builder.get_object("languageViewSelection")
self._langSelectedRenderer = self.builder.get_object("langSelectedRenderer")
self._langSelectedColumn = self.builder.get_object("langSelectedColumn")
self._langView = self.builder.get_object("languageView")
self._localeView = self.builder.get_object("localeView")
self._localeStore = self.builder.get_object("localeStore")
self._localeSelection = self.builder.get_object("localeViewSelection")
LangLocaleHandler.initialize(self)
# We need to tell the view whether something is a separator or not.
self._langView.set_row_separator_func(self._row_is_separator, None)
# We can use the territory from geolocation here
# to preselect the translation, when it's available.
#
# But as the lookup might still be in progress we need to make sure
# to wait for it to finish. If the lookup has already finished
# the wait function is basically a noop.
geoloc.geoloc.wait_for_refresh_to_finish()
# the lookup should be done now, get the teorritory
territory = geoloc.geoloc.result.territory_code
# bootopts and kickstart have priority over geoip
language = self._l12_module.Language
if language and self._l12_module.LanguageKickstarted:
locales = [language]
else:
locales = localization.get_territory_locales(territory) or [DEFAULT_LANG]
# get the data models
filter_store = self._languageStoreFilter
store = filter_store.get_model()
# get language codes for the locales
langs = [localization.get_language_id(locale) for locale in locales]
# check which of the geolocated languages have translations
# and store the iterators for those languages in a dictionary
langs_with_translations = {}
itr = store.get_iter_first()
while itr:
row_lang = store[itr][2]
if row_lang in langs:
langs_with_translations[row_lang] = itr
itr = store.iter_next(itr)
# if there are no translations for the given locales,
# use default
if not langs_with_translations:
self._set_lang(DEFAULT_LANG)
localization.setup_locale(DEFAULT_LANG, self._l12_module, text_mode=False)
lang_itr, _locale_itr = self._select_locale(self._l12_module.Language)
langs_with_translations[DEFAULT_LANG] = lang_itr
locales = [DEFAULT_LANG]
# go over all geolocated languages in reverse order
# and move those we have translation for to the top of the
# list, above the separator
for lang in reversed(langs):
itr = langs_with_translations.get(lang)
if itr:
store.move_after(itr, None)
else:
# we don't have translation for this language,
# so dump all locales for it
locales = [l for l in locales if localization.get_language_id(l) != lang]
# And then we add a separator after the selected best language
# and any additional languages (that have translations) from geoip
newItr = store.insert(len(langs_with_translations))
store.set(newItr, 0, "", 1, "", 2, "", 3, True)
# setup the "best" locale
locale = localization.setup_locale(locales[0], self._l12_module)
self._set_lang(locale)
self._select_locale(self._l12_module.Language)
# report that we are done
self.initialize_done()
def _retranslate_one(self, widgetName, context=None):
widget = self.builder.get_object(widgetName)
if not widget:
return
if widget not in self._origStrings:
self._origStrings[widget] = widget.get_label()
before = self._origStrings[widget]
if context is not None:
widget.set_label(C_(context, before))
else:
widget.set_label(_(before))
def retranslate(self):
# Change the translations on labels and buttons that do not have
# substitution text.
for name in ["pickLanguageLabel", "betaWarnTitle", "betaWarnDesc"]:
self._retranslate_one(name)
# It would be nice to be able to read the translation context from the
# widget, but we live in an imperfect world.
# See also: https://bugzilla.gnome.org/show_bug.cgi?id=729066
for name in ["quitButton", "continueButton"]:
self._retranslate_one(name, "GUI|Welcome|Beta Warn Dialog")
# The welcome label is special - it has text that needs to be
# substituted.
welcomeLabel = self.builder.get_object("welcomeLabel")
welcomeLabel.set_text(_("WELCOME TO %(name)s %(version)s.") %
{"name" : productName.upper(), "version" : productVersion}) # pylint: disable=no-member
# Retranslate the language (filtering) entry's placeholder text
languageEntry = self.builder.get_object("languageEntry")
if languageEntry not in self._origStrings:
self._origStrings[languageEntry] = languageEntry.get_placeholder_text()
languageEntry.set_placeholder_text(_(self._origStrings[languageEntry]))
# And of course, don't forget the underlying window.
self.window.set_property("distribution", distributionText().upper())
self.window.retranslate()
# Retranslate the window title text
# - it looks like that the main window object is not yet
# properly initialized during the first run of the
# retranslate method (it is always triggered at startup)
# so make sure the object is actually what we think it is
# - ignoring this run is OK as the initial title is
# already translated to the initial language
if isinstance(self.main_window, Gtk.Window):
self.main_window.set_title(_(WINDOW_TITLE_TEXT))
# Correct the language attributes for labels
self.main_window.reapply_language()
def refresh(self):
self._select_locale(self._l12_module.Language)
self._languageEntry.set_text("")
self._languageStoreFilter.refilter()
def _add_language(self, store, native, english, lang):
native_span = '<span lang="%s">%s</span>' % \
(escape_markup(lang),
escape_markup(native))
store.append([native_span, english, lang, False])
def _add_locale(self, store, native, locale):
native_span = '<span lang="%s">%s</span>' % \
(escape_markup(re.sub(r'\..*', '', locale)),
escape_markup(native))
store.append([native_span, locale])
# Signal handlers.
def on_lang_selection_changed(self, selection):
(_store, selected) = selection.get_selected_rows()
LangLocaleHandler.on_lang_selection_changed(self, selection)
if not selected and hasattr(self.window, "set_may_continue"):
self.window.set_may_continue(False)
def on_locale_selection_changed(self, selection):
(store, selected) = selection.get_selected_rows()
if hasattr(self.window, "set_may_continue"):
self.window.set_may_continue(len(selected) > 0)
if selected:
lang = store[selected[0]][1]
lang = localization.setup_locale(lang)
self._set_lang(lang)
self.retranslate()
# Reset the text direction
setup_gtk_direction()
# Redraw the window to reset the sidebar to where it needs to be
self.window.queue_draw()
# Override the default in StandaloneSpoke so we can display the beta
# warning dialog first.
def _on_continue_clicked(self, window, user_data=None):
# Don't display the betanag dialog if this is the final release or
# when autostep has been requested as betanag breaks the autostep logic.
if not isFinal and not self.data.autostep.seen:
dlg = self.builder.get_object("betaWarnDialog")
with self.main_window.enlightbox(dlg):
rc = dlg.run()
dlg.hide()
if rc != 1:
ipmi_abort(scripts=self.data.scripts)
sys.exit(0)
dialog = UnsupportedHardwareDialog(self.data)
if not dialog.supported:
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
rc = dialog.run()
if rc != 1:
ipmi_abort(scripts=self.data.scripts)
sys.exit(0)
StandaloneSpoke._on_continue_clicked(self, window, user_data)
@async_action_wait
def _set_lang(self, lang):
# This is *hopefully* safe. The only threads that might be running
# outside of the GIL are those doing file operations, the Gio dbus
# proxy thread, and calls from the Gtk main loop. The file operations
# won't be doing things that may access the environment, fingers
# crossed, the GDbus thread shouldn't be doing anything weird since all
# of our dbus calls are from python and synchronous. Using
# gtk_action_wait ensures that this is Gtk main loop thread, and it's
# holding the GIL.
#
# There is a lot of uncertainty and weasliness in those statements.
# This is not good code.
#
# We cannot get around setting $LANG. Python's gettext implementation
# differs from C in that consults only the environment for the current
# language and not the data set via setlocale. If we want translations
# from python modules to work, something needs to be set in the
# environment when the language changes.
# pylint: disable=environment-modify
os.environ["LANG"] = lang
| gpl-2.0 | 4,544,419,149,237,900,300 | 41.9 | 111 | 0.655206 | false | 4.176312 | false | false | false |
apache/incubator-airflow | airflow/utils/callback_requests.py | 7 | 3600 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Optional
from airflow.models.taskinstance import SimpleTaskInstance
class CallbackRequest:
"""
Base Class with information about the callback to be executed.
:param full_filepath: File Path to use to run the callback
:param msg: Additional Message that can be used for logging
"""
def __init__(self, full_filepath: str, msg: Optional[str] = None):
self.full_filepath = full_filepath
self.msg = msg
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.__dict__)
class TaskCallbackRequest(CallbackRequest):
"""
A Class with information about the success/failure TI callback to be executed. Currently, only failure
callbacks (when tasks are externally killed) and Zombies are run via DagFileProcessorProcess.
:param full_filepath: File Path to use to run the callback
:param simple_task_instance: Simplified Task Instance representation
:param is_failure_callback: Flag to determine whether it is a Failure Callback or Success Callback
:param msg: Additional Message that can be used for logging to determine failure/zombie
"""
def __init__(
self,
full_filepath: str,
simple_task_instance: SimpleTaskInstance,
is_failure_callback: Optional[bool] = True,
msg: Optional[str] = None,
):
super().__init__(full_filepath=full_filepath, msg=msg)
self.simple_task_instance = simple_task_instance
self.is_failure_callback = is_failure_callback
class DagCallbackRequest(CallbackRequest):
"""
A Class with information about the success/failure DAG callback to be executed.
:param full_filepath: File Path to use to run the callback
:param dag_id: DAG ID
:param execution_date: Execution Date for the DagRun
:param is_failure_callback: Flag to determine whether it is a Failure Callback or Success Callback
:param msg: Additional Message that can be used for logging
"""
def __init__(
self,
full_filepath: str,
dag_id: str,
execution_date: datetime,
is_failure_callback: Optional[bool] = True,
msg: Optional[str] = None,
):
super().__init__(full_filepath=full_filepath, msg=msg)
self.dag_id = dag_id
self.execution_date = execution_date
self.is_failure_callback = is_failure_callback
class SlaCallbackRequest(CallbackRequest):
"""
A class with information about the SLA callback to be executed.
:param full_filepath: File Path to use to run the callback
:param dag_id: DAG ID
"""
def __init__(self, full_filepath: str, dag_id: str):
super().__init__(full_filepath)
self.dag_id = dag_id
| apache-2.0 | 4,597,060,604,835,068,400 | 34.643564 | 106 | 0.693889 | false | 4.235294 | false | false | false |
hasgeek/funnel | tests/integration/views/test_project_views.py | 1 | 2302 | from werkzeug.datastructures import MultiDict
from funnel.forms import LabelForm
from funnel.models import Label
def test_new_label_get(client, new_user, new_project):
with client.session_transaction() as session:
session['userid'] = new_user.userid
resp = client.get(new_project.url_for('new_label'))
label_form = LabelForm(parent=new_project, model=Label)
for field in label_form:
if field not in ('csrf_token', 'form_nonce'):
assert field.name in resp.data.decode('utf-8')
def test_new_label_without_option(client, new_user, new_project):
with client.session_transaction() as session:
session['userid'] = new_user.userid
resp_post = client.post(
new_project.url_for('new_label'),
data=MultiDict(
{
'title': "Label V1",
'icon_emoji': "👍",
'required': False,
'restricted': False,
}
),
follow_redirects=True,
)
assert "Manage labels" in resp_post.data.decode('utf-8')
label_v1 = Label.query.filter_by(
title="Label V1", icon_emoji="👍", project=new_project
).first()
assert label_v1 is not None
def test_new_label_with_option(client, new_user, new_project):
with client.session_transaction() as session:
session['userid'] = new_user.userid
resp_post = client.post(
new_project.url_for('new_label'),
data=MultiDict(
{
'title': ["Label V2", "Option V21", "Option V22"],
'icon_emoji': ["👍", "", ""],
'required': False,
'restricted': False,
}
),
follow_redirects=True,
)
assert "Manage labels" in resp_post.data.decode('utf-8')
label_v2 = Label.query.filter_by(
title="Label V2", icon_emoji="👍", project=new_project
).first()
assert label_v2 is not None
assert label_v2.has_options
assert len(label_v2.options) == 2
assert label_v2.options[0].title == "Option V21"
assert label_v2.options[0].icon_emoji == ""
assert label_v2.options[0].icon == "OV"
assert label_v2.options[1].title == "Option V22"
assert label_v2.options[1].icon_emoji == ""
assert label_v2.options[1].icon == "OV"
| agpl-3.0 | 3,498,146,482,524,586,000 | 32.676471 | 66 | 0.589956 | false | 3.501529 | false | false | false |
francesconistri/p2ptv-pi | acestream/ACEStream/Core/DecentralizedTracking/pymdht/core/routing_table.py | 4 | 5502 | #Embedded file name: ACEStream\Core\DecentralizedTracking\pymdht\core\routing_table.pyo
import ptime as time
import logging
logger = logging.getLogger('dht')
class PopError(Exception):
pass
class PutError(Exception):
pass
class SuperBucket(object):
def __init__(self, index, max_nodes):
self.index = index
self.main = Bucket(max_nodes)
self.replacement = Bucket(max_nodes)
class Bucket(object):
def __init__(self, max_rnodes):
self.max_rnodes = max_rnodes
self.rnodes = []
self.last_maintenance_ts = time.time()
self.last_changed_ts = 0
def get_rnode(self, node_):
i = self._find(node_)
if i >= 0:
return self.rnodes[i]
def add(self, rnode):
rnode.bucket_insertion_ts = time.time()
self.rnodes.append(rnode)
def remove(self, node_):
del self.rnodes[self._find(node_)]
def __repr__(self):
return '\n'.join(['b>'] + [ repr(rnode) for rnode in self.rnodes ])
def __len__(self):
return len(self.rnodes)
def __eq__(self, other):
if self.max_rnodes != other.max_rnodes or len(self) != len(other):
return False
for self_rnode, other_rnode in zip(self.rnodes, other.rnodes):
if self_rnode != other_rnode:
return False
return True
def __ne__(self, other):
return not self == other
def there_is_room(self, min_places = 1):
return len(self.rnodes) + min_places <= self.max_rnodes
def get_freshest_rnode(self):
freshest_ts = 0
freshest_rnode = None
for rnode in self.rnodes:
if rnode.last_seen > freshest_ts:
freshest_ts = rnode.last_seen
freshest_rnode = rnode
return freshest_rnode
def get_stalest_rnode(self):
oldest_ts = time.time()
stalest_rnode = None
for rnode in self.rnodes:
if rnode.last_seen < oldest_ts:
oldest_ts = rnode.last_seen
stalest_rnode = rnode
return stalest_rnode
def sorted_by_rtt(self):
return sorted(self.rnodes, key=lambda x: x.rtt)
def _find(self, node_):
for i, rnode in enumerate(self.rnodes):
if rnode == node_:
return i
return -1
NUM_SBUCKETS = 160
NUM_NODES = 8
class RoutingTable(object):
def __init__(self, my_node, nodes_per_bucket):
self.my_node = my_node
self.nodes_per_bucket = nodes_per_bucket
self.sbuckets = [None] * NUM_SBUCKETS
self.num_rnodes = 0
self.lowest_index = NUM_SBUCKETS
def get_sbucket(self, log_distance):
index = log_distance
if index < 0:
raise IndexError, 'index (%d) must be >= 0' % index
sbucket = self.sbuckets[index]
if not sbucket:
sbucket = SuperBucket(index, self.nodes_per_bucket[index])
self.sbuckets[index] = sbucket
return sbucket
def update_lowest_index(self, index):
if index < self.lowest_index:
sbucket = self.sbuckets[index]
if sbucket and sbucket.main:
self.lowest_index = sbucket.index
return
if index == self.lowest_index:
for i in range(index, NUM_SBUCKETS):
sbucket = self.sbuckets[i]
if sbucket and sbucket.main:
self.lowest_index = i
return
self.lowest_index = NUM_SBUCKETS
def get_closest_rnodes(self, log_distance, max_rnodes, exclude_myself):
result = []
index = log_distance
for i in range(index, self.lowest_index - 1, -1):
sbucket = self.sbuckets[i]
if not sbucket:
continue
result.extend(sbucket.main.rnodes[:max_rnodes - len(result)])
if len(result) == max_rnodes:
return result
if not exclude_myself:
result.append(self.my_node)
for i in range(index + 1, NUM_SBUCKETS):
sbucket = self.sbuckets[i]
if not sbucket:
continue
result.extend(sbucket.main.rnodes[:max_rnodes - len(result)])
if len(result) == max_rnodes:
break
return result
def find_next_bucket_with_room_index(self, node_ = None, log_distance = None):
index = log_distance or node_.log_distance(self.my_node)
for i in range(index + 1, NUM_SBUCKETS):
sbucket = self.sbuckets[i]
if sbucket is None or self.sbuckets[i].main.there_is_room():
return i
def get_main_rnodes(self):
rnodes = []
for i in range(self.lowest_index, NUM_SBUCKETS):
sbucket = self.sbuckets[i]
if sbucket:
rnodes.extend(sbucket.main.rnodes)
return rnodes
def print_stats(self):
num_nodes = 0
for i in range(self.lowest_index, NUM_SBUCKETS):
sbucket = self.sbuckets[i]
if sbucket and len(sbucket.main):
print i, len(sbucket.main), len(sbucket.replacement)
print 'Total:', self.num_rnodes
def __repr__(self):
begin = ['==============RoutingTable============= BEGIN']
data = [ '%d %r' % (i, sbucket) for i, sbucket in enumerate(self.sbuckets) ]
end = ['==============RoutingTable============= END']
return '\n'.join(begin + data + end)
| mit | -345,034,039,726,740,860 | 29.065574 | 87 | 0.556707 | false | 3.600785 | false | false | false |
fatiherikli/dbpatterns | web/dbpatterns/documents/parsers/django_orm.py | 1 | 5300 | import ast
from _ast import Call, Attribute, Name
from documents.constants import *
from documents.parsers import BaseParser, ParseError
MODEL_BASE_CLASS = "Model"
MANY_TO_MANY_FIELD = "many-to-many"
FOREIGN_KEY_FIELD = "foreign-key"
FIELD_TYPE_MAP = {
"PositiveIntegerField": TYPES_INTEGER,
"IntegerField": TYPES_INTEGER,
"CharField": TYPES_STRING,
"EmailField": TYPES_STRING,
"BooleanField": TYPES_BOOLEAN,
"DateTimeField": TYPES_DATETIME,
"DateField": TYPES_DATE,
"TimeField": TYPES_TIME,
"FileField": TYPES_STRING,
"ForeignKey": FOREIGN_KEY_FIELD,
"ManyToManyField": MANY_TO_MANY_FIELD,
"OneToOneField": FOREIGN_KEY_FIELD,
"FloatField": TYPES_DOUBLE
}
DEFAULT_FIELD_TYPE = "string"
class FieldVisitor(ast.NodeVisitor):
"""
A visitor that inspects model fields.
"""
def __init__(self):
self.fields = []
def add_field(self, field_name, field_type, relationship):
field = {
"name": field_name,
"type": field_type
}
if relationship is not None:
field["relationship"] = relationship
self.fields.append(field)
def visit_Assign(self, node):
field_name = None
field_type = None
relationship = None
if not isinstance(node.value, Call):
return
try:
field_name = node.targets[0].id
except AttributeError:
return
if isinstance(node.value.func, Attribute):
field_type = FIELD_TYPE_MAP.get(node.value.func.attr,
DEFAULT_FIELD_TYPE)
if field_type in [MANY_TO_MANY_FIELD, FOREIGN_KEY_FIELD]:
relationship = node.value.args[0].id
if field_type is not None:
self.add_field(field_name, field_type, relationship=relationship)
class ModelVisitor(ast.NodeVisitor):
"""
A visitor that detects django models.
"""
def __init__(self):
self.models = {}
def visit_ClassDef(self, node):
base_class = None
for base in node.bases:
if isinstance(base, Attribute):
base_class = base.attr
if isinstance(base, Name):
base_class = base.id
if base_class == MODEL_BASE_CLASS:
visitor = FieldVisitor()
visitor.visit(node)
self.models[node.name] = visitor.fields
class DjangoORMParser(BaseParser):
def parse(self, text):
try:
node = ast.parse(text)
except SyntaxError:
raise ParseError
else:
visitor = ModelVisitor()
visitor.visit(node)
return self.normalize_models(visitor.models)
def normalize_models(self, models):
"""
The normalization process for django models.
- Adds `id` field
- Separates many-to-many fields
- Converts foreign-key-fields to integer
"""
position_top = 0
position_left = 0
for model, fields in models.items():
attributes = [{
"name": "id",
"type": TYPES_INTEGER,
"is_primary_key": True
}]
for field in fields:
if field.get("type") == MANY_TO_MANY_FIELD:
position_left += ENTITY_POSITION_LEFT_INCREASE
position_top += ENTITY_POSITION_TOP_INCREASE
yield self.m2m_to_entity(model, field, position_top, position_left)
continue # skip the field addition
elif field.get("type") == FOREIGN_KEY_FIELD:
field["name"] += "_id"
field["type"] = TYPES_INTEGER
attributes.append(field)
position_left += ENTITY_POSITION_LEFT_INCREASE
position_top += ENTITY_POSITION_TOP_INCREASE
yield {
"name": model.lower(),
"attributes": attributes,
"position": {
"top": position_top,
"left": position_left
}
}
def m2m_to_entity(self, model, field, position_top, position_left):
"""
Returns an entity that consist of provided m2m field.
"""
return {
"name": model.lower() + "_" + field.get("name"),
"position": {
"top": position_top,
"left": position_left
},
"attributes": [
{
"name": "id",
"type": TYPES_INTEGER,
},
{
"name": model.lower() + "_id",
"type": TYPES_INTEGER,
"is_foreign_key": True,
"foreign_key_entity": model.lower(),
"foreign_key_attribute": "id"
},
{
"name": field.get("relationship").lower() + "_id",
"type": TYPES_INTEGER,
"is_foreign_key": True,
"foreign_key_entity": field.get("relationship").lower(),
"foreign_key_attribute": "id"
}
]
}
| mit | -2,033,049,868,149,611,300 | 27.961749 | 87 | 0.510943 | false | 4.383788 | false | false | false |
abanaiyan/sniper | scripts/scheduler-locality.py | 2 | 5346 | import sim
def getScoreMetricTime(thread_id):
return long(sim.stats.get('thread', thread_id, 'nonidle_elapsed_time'))
def getScoreMetricInstructions(thread_id):
return long(sim.stats.get('thread', thread_id, 'instruction_count'))
class Thread:
def __init__(self, thread_id, getScoreMetric):
self.thread_id = thread_id
self.getScoreMetric = lambda: getScoreMetric(thread_id)
self.core = None
self.runnable = False
self.unscheduled = False
self.score = 0 # Accumulated score
self.metric_last = 0 # State at start of last interval
sim.thread.set_thread_affinity(self.thread_id, ())
def updateScore(self):
metric_now = self.getScoreMetric()
self.score += metric_now - self.metric_last
self.metric_last = metric_now
def setScore(self, score):
self.score = score
self.metric_last = self.getScoreMetric()
def setCore(self, core_id, time = -1):
self.core = core_id
if core_id is None:
self.updateScore()
self.last_scheduled_out = time
sim.thread.set_thread_affinity(self.thread_id, ())
else:
self.last_scheduled_in = time
sim.thread.set_thread_affinity(self.thread_id, [ c == core_id for c in range(sim.config.ncores) ])
def __repr__(self):
return 'Thread(%d, %s, score = %d)' % (self.thread_id, 'core = %d' % self.core if self.core is not None else 'no core', self.score)
class SchedulerLocality:
def setup(self, args):
args = dict(enumerate((args or '').split(':')))
interval_ns = long(args.get(0, None) or 10000000)
scheduler_type = args.get(1, 'equal_time')
core_mask = args.get(2, '')
if scheduler_type == 'equal_time':
self.getScoreMetric = getScoreMetricTime
elif scheduler_type == 'equal_instructions':
self.getScoreMetric = getScoreMetricInstructions
else:
raise ValueError('Invalid scheduler type %s' % scheduler_type)
if core_mask:
core_mask = map(int, core_mask.split(',')) + [0]*sim.config.ncores
self.cores = [ core for core in range(sim.config.ncores) if core_mask[core] ]
else:
self.cores = range(sim.config.ncores)
sim.util.Every(interval_ns * sim.util.Time.NS, self.periodic)
self.threads = {}
self.last_core = 0
def hook_thread_start(self, thread_id, time):
self.threads[thread_id] = Thread(thread_id, self.getScoreMetric)
self.threads[thread_id].runnable = True
# Initial assignment: one thread per core until cores are exhausted
if self.last_core < len(self.cores):
self.threads[thread_id].setCore(self.cores[self.last_core], sim.stats.time())
self.last_core += 1
else:
self.threads[thread_id].setCore(None, sim.stats.time())
def hook_thread_exit(self, thread_id, time):
self.hook_thread_stall(thread_id, 'exit', time)
def hook_thread_stall(self, thread_id, reason, time):
if reason == 'unscheduled':
# Ignore calls due to the thread being scheduled out
self.threads[thread_id].unscheduled = True
else:
core = self.threads[thread_id].core
self.threads[thread_id].setCore(None, time)
self.threads[thread_id].runnable = False
# Schedule a new thread (runnable, but not running) on this free core
threads = [ thread for thread in self.threads.values() if thread.runnable and thread.core is None ]
if threads:
# Order by score
threads.sort(key = lambda thread: thread.score)
threads[0].setCore(core, time)
def hook_thread_resume(self, thread_id, woken_by, time):
if self.threads[thread_id].unscheduled:
# Ignore calls due to the thread being scheduled back in
self.threads[thread_id].unscheduled = False
else:
self.threads[thread_id].setScore(min([ thread.score for thread in self.threads.values() ]))
self.threads[thread_id].runnable = True
# If there is a free core, move us there now
used_cores = set([ thread.core for thread in self.threads.values() if thread.core is not None ])
free_cores = set(self.cores) - used_cores
if len(free_cores):
self.threads[thread_id].setCore(list(free_cores)[0], time)
def periodic(self, time, time_delta):
# Update thread scores
[ thread.updateScore() for thread in self.threads.values() if thread.core is not None ]
# Get a list of all runnable threads
threads = [ thread for thread in self.threads.values() if thread.runnable ]
# Order by score
threads.sort(key = lambda thread: thread.score)
# Select threads to run now, one per core
threads = threads[:len(self.cores)]
#print ', '.join(map(repr, threads))
# Filter out threads that are already running, and keep them on their current core
keep_threads = [ thread for thread in threads if thread.core is not None ]
used_cores = set([ thread.core for thread in keep_threads ])
# Move new threads to free cores
free_cores = set(self.cores) - used_cores
threads = [ thread for thread in threads if thread.core is None ]
assert(len(free_cores) >= len(threads))
for thread, core in zip(threads, sorted(free_cores)):
current_thread = [ t for t in self.threads.values() if t.core == core ]
if current_thread:
current_thread[0].setCore(None)
thread.setCore(core, time)
assert thread.runnable
sim.util.register(SchedulerLocality())
| mit | 1,987,437,797,327,282,400 | 37.73913 | 135 | 0.670034 | false | 3.510177 | false | false | false |
hoburg/gpkit | gpkit/tools/autosweep.py | 1 | 11266 | "Tools for optimal fits to GP sweeps"
from time import time
import numpy as np
from ..small_classes import Count
from ..small_scripts import mag
from ..solution_array import SolutionArray
from ..exceptions import InvalidGPConstraint
class BinarySweepTree: # pylint: disable=too-many-instance-attributes
"""Spans a line segment. May contain two subtrees that divide the segment.
Attributes
----------
bounds : two-element list
The left and right boundaries of the segment
sols : two-element list
The left and right solutions of the segment
costs : array
The left and right logcosts of the segment
splits : None or two-element list
If not None, contains the left and right subtrees
splitval : None or float
The worst-error point, where the split will be if tolerance is too low
splitlb : None or float
The cost lower bound at splitval
splitub : None or float
The cost upper bound at splitval
"""
def __init__(self, bounds, sols, sweptvar, costposy):
if len(bounds) != 2:
raise ValueError("bounds must be of length 2")
if bounds[1] <= bounds[0]:
raise ValueError("bounds[0] must be smaller than bounds[1].")
self.bounds = bounds
self.sols = sols
self.costs = np.log([mag(sol["cost"]) for sol in sols])
self.splits = None
self.splitval = None
self.splitlb = None
self.splitub = None
self.sweptvar = sweptvar
self.costposy = costposy
def add_split(self, splitval, splitsol):
"Creates subtrees from bounds[0] to splitval and splitval to bounds[1]"
if self.splitval:
raise ValueError("split already exists!")
if splitval <= self.bounds[0] or splitval >= self.bounds[1]:
raise ValueError("split value is at or outside bounds.")
self.splitval = splitval
self.splits = [BinarySweepTree([self.bounds[0], splitval],
[self.sols[0], splitsol],
self.sweptvar, self.costposy),
BinarySweepTree([splitval, self.bounds[1]],
[splitsol, self.sols[1]],
self.sweptvar, self.costposy)]
def add_splitcost(self, splitval, splitlb, splitub):
"Adds a splitval, lower bound, and upper bound"
if self.splitval:
raise ValueError("split already exists!")
if splitval <= self.bounds[0] or splitval >= self.bounds[1]:
raise ValueError("split value is at or outside bounds.")
self.splitval = splitval
self.splitlb, self.splitub = splitlb, splitub
def posy_at(self, posy, value):
"""Logspace interpolates between sols to get posynomial values.
No guarantees, just like a regular sweep.
"""
if value < self.bounds[0] or value > self.bounds[1]:
raise ValueError("query value is outside bounds.")
bst = self.min_bst(value)
lo, hi = bst.bounds
loval, hival = [sol(posy) for sol in bst.sols]
lo, hi, loval, hival = np.log(list(map(mag, [lo, hi, loval, hival])))
interp = (hi-np.log(value))/float(hi-lo)
return np.exp(interp*loval + (1-interp)*hival)
def cost_at(self, _, value, bound=None):
"Logspace interpolates between split and costs. Guaranteed bounded."
if value < self.bounds[0] or value > self.bounds[1]:
raise ValueError("query value is outside bounds.")
bst = self.min_bst(value)
if bst.splitlb:
if bound:
if bound == "lb":
splitcost = np.exp(bst.splitlb)
elif bound == "ub":
splitcost = np.exp(bst.splitub)
else:
splitcost = np.exp((bst.splitlb + bst.splitub)/2)
if value <= bst.splitval:
lo, hi = bst.bounds[0], bst.splitval
loval, hival = bst.sols[0]["cost"], splitcost
else:
lo, hi = bst.splitval, bst.bounds[1]
loval, hival = splitcost, bst.sols[1]["cost"]
else:
lo, hi = bst.bounds
loval, hival = [sol["cost"] for sol in bst.sols]
lo, hi, loval, hival = np.log(list(map(mag, [lo, hi, loval, hival])))
interp = (hi-np.log(value))/float(hi-lo)
return np.exp(interp*loval + (1-interp)*hival)
def min_bst(self, value):
"Returns smallest bst around value."
if not self.splits:
return self
choice = self.splits[0] if value <= self.splitval else self.splits[1]
return choice.min_bst(value)
def sample_at(self, values):
"Creates a SolutionOracle at a given range of values"
return SolutionOracle(self, values)
@property
def sollist(self):
"Returns a list of all the solutions in an autosweep"
sollist = [self.sols[0]]
if self.splits:
sollist.extend(self.splits[0].sollist[1:])
sollist.extend(self.splits[1].sollist[1:-1])
sollist.append(self.sols[1])
return sollist
@property
def solarray(self):
"Returns a solution array of all the solutions in an autosweep"
solution = SolutionArray()
for sol in self.sollist:
solution.append(sol)
solution.to_arrays()
return solution
def save(self, filename="autosweep.p"):
"""Pickles the autosweep and saves it to a file.
The saved autosweep is identical except for two things:
- the cost is made unitless
- each solution's 'program' attribute is removed
Solution can then be loaded with e.g.:
>>> import cPickle as pickle
>>> pickle.load(open("autosweep.p"))
"""
import pickle
pickle.dump(self, open(filename, "wb"))
class SolutionOracle:
"Acts like a SolutionArray for autosweeps"
def __init__(self, bst, sampled_at):
self.sampled_at = sampled_at
self.bst = bst
def __call__(self, key):
return self.__getval(key)
def __getitem__(self, key):
return self.__getval(key)
def _is_cost(self, key):
if hasattr(key, "hmap") and key.hmap == self.bst.costposy.hmap:
return True
return key == "cost"
def __getval(self, key):
"Gets values from the BST and units them"
if self._is_cost(key):
key_at = self.bst.cost_at
v0 = self.bst.sols[0]["cost"]
else:
key_at = self.bst.posy_at
v0 = self.bst.sols[0](key)
units = getattr(v0, "units", None)
fit = [key_at(key, x) for x in self.sampled_at]
return fit*units if units else np.array(fit)
def cost_lb(self):
"Gets cost lower bounds from the BST and units them"
units = getattr(self.bst.sols[0]["cost"], "units", None)
fit = [self.bst.cost_at("cost", x, "lb") for x in self.sampled_at]
return fit*units if units else np.array(fit)
def cost_ub(self):
"Gets cost upper bounds from the BST and units them"
units = getattr(self.bst.sols[0]["cost"], "units", None)
fit = [self.bst.cost_at("cost", x, "ub") for x in self.sampled_at]
return fit*units if units else np.array(fit)
def plot(self, posys=None, axes=None):
"Plots the sweep for each posy"
import matplotlib.pyplot as plt
from ..interactive.plot_sweep import assign_axes
from .. import GPBLU
if not hasattr(posys, "__len__"):
posys = [posys]
for i, posy in enumerate(posys):
if posy in [None, "cost"]:
posys[i] = self.bst.costposy
posys, axes = assign_axes(self.bst.sweptvar, posys, axes)
for posy, ax in zip(posys, axes):
if self._is_cost(posy): # with small tol should look like a line
ax.fill_between(self.sampled_at,
self.cost_lb(), self.cost_ub(),
facecolor=GPBLU, edgecolor=GPBLU,
linewidth=0.75)
else:
ax.plot(self.sampled_at, self(posy), color=GPBLU)
if len(axes) == 1:
axes, = axes
return plt.gcf(), axes
def autosweep_1d(model, logtol, sweepvar, bounds, **solvekwargs):
"Autosweep a model over one sweepvar"
original_val = model.substitutions.get(sweepvar, None)
start_time = time()
solvekwargs.setdefault("verbosity", 1)
solvekwargs["verbosity"] -= 1
sols = Count().next
firstsols = []
for bound in bounds:
model.substitutions.update({sweepvar: bound})
try:
model.solve(**solvekwargs)
firstsols.append(model.program.result)
except InvalidGPConstraint:
raise InvalidGPConstraint("only GPs can be autoswept.")
sols()
bst = BinarySweepTree(bounds, firstsols, sweepvar, model.cost)
tol = recurse_splits(model, bst, sweepvar, logtol, solvekwargs, sols)
bst.nsols = sols() # pylint: disable=attribute-defined-outside-init
if solvekwargs["verbosity"] > -1:
print("Solved in %2i passes, cost logtol +/-%.3g" % (bst.nsols, tol))
print("Autosweeping took %.3g seconds." % (time() - start_time))
if original_val:
model.substitutions[sweepvar] = original_val
else:
del model.substitutions[sweepvar]
return bst
def recurse_splits(model, bst, variable, logtol, solvekwargs, sols):
"Recursively splits a BST until logtol is reached"
x, lb, ub = get_tol(bst.costs, bst.bounds, bst.sols, variable)
tol = (ub-lb)/2.0
if tol >= logtol:
model.substitutions.update({variable: x})
model.solve(**solvekwargs)
bst.add_split(x, model.program.result)
sols()
tols = [recurse_splits(model, split, variable, logtol, solvekwargs,
sols)
for split in bst.splits]
bst.tol = max(tols)
return bst.tol
bst.add_splitcost(x, lb, ub)
return tol
def get_tol(costs, bounds, sols, variable): # pylint: disable=too-many-locals
"Gets the intersection point and corresponding bounds from two solutions."
y0, y1 = costs
x0, x1 = np.log(bounds)
s0, s1 = [sol["sensitivities"]["variables"][variable] for sol in sols]
# y0 + s0*(x - x0) == y1 + s1*(x - x1)
num = y1-y0 + x0*s0-x1*s1
denom = s0-s1
# NOTE: several branches below deal with straight lines, where lower
# and upper bounds are identical and so x is undefined
if denom == 0:
# mosek runs into this on perfect straight lines, num also equal to 0
# mosek_cli also runs into this on near-straight lines, num ~= 0
interp = -1 # flag interp as out-of bounds
else:
x = num/denom
lb = y0 + s0*(x-x0)
interp = (x1-x)/(x1-x0)
ub = y0*interp + y1*(1-interp)
if interp < 1e-7 or interp > 1 - 1e-7: # cvxopt on straight lines
x = (x0 + x1)/2 # x is undefined? stick it in the middle!
lb = ub = (y0 + y1)/2
return np.exp(x), lb, ub
| mit | 5,650,122,931,816,554,000 | 37.189831 | 79 | 0.583526 | false | 3.573105 | false | false | false |
nlangellier/gaussian-processes | solar_GP_RH.py | 1 | 3673 | import time
import numpy as np
import scipy.linalg as lin
import scipy.stats as stat
import matplotlib.pyplot as plt
import kernel_qp as qp
def log_likelihood(theta, t, Y):
tLen = len(t.reshape(-1))
Nw = theta.shape[0]
Y = np.ascontiguousarray(Y.reshape(-1))
K = qp.gamma_qp(t, theta)
Kinv_mu = np.empty((Nw, tLen))
for i in range(Nw):
K[i], lower = lin.cho_factor(K[i])
Kinv_mu[i] = lin.cho_solve((K[i], lower), Y)
muT_Kinv_mu = (Y*Kinv_mu).sum(axis=1)
HalfLogDetK = np.log(K.diagonal(axis1=1, axis2=2)).sum(axis=1)
return -0.5*tLen*np.log(2*np.pi) - HalfLogDetK - 0.5*muT_Kinv_mu
def log_prior(theta):
out = stat.norm.logpdf(theta[:,0], loc=0.0005, scale=0.00025) # Sc
out += stat.norm.logpdf(theta[:,1], loc=25.3, scale=2.0) # Pqp
out += stat.uniform.logpdf(theta[:,2], loc=0.1, scale=1.5) # lambda_P
out += stat.norm.logpdf(theta[:,3], loc=50.0, scale=25.0) # lambda_e
out += stat.norm.logpdf(theta[:,4], loc=0.0005, scale=0.00025) # sig
return out.squeeze()
def log_posterior(theta, t, Y):
return log_likelihood(theta, t, Y) + log_prior(theta)
def proposal(size=1, a=2.0):
U = stat.uniform.rvs(size=size)
return (U*(a - 1) + 1)**2/a
def main():
Nparam = 5;
Ni = int(1e4)
Nw = int(1e2)
Nw2 = int(Nw/2)
Ns = int(Ni*Nw)
Z = proposal(size=(Ni, Nw2, 2))
r = stat.uniform.rvs(size=(Ni, Nw2, 2))
logrZNp = np.log(r*(Z**(1 - Nparam)))
rw = stat.randint.rvs(low=0, high=Nw2, size=(Ni, Nw2, 2))
x01 = stat.norm.rvs(loc=0.0005, scale=0.00025, size=(Nw2, 2)) # Sc
x02 = stat.norm.rvs(loc=25.3, scale=2.0, size=(Nw2, 2)) # Pqp
x03 = stat.uniform.rvs(loc=0.1, scale=1.5, size=(Nw2, 2)) # lambda_P
x04 = stat.norm.rvs(loc=50.0, scale=25.0, size=(Nw2, 2)) # lambda_e
x05 = stat.norm.rvs(loc=0.0005, scale=0.00025, size=(Nw2, 2)) # sig2
theta0 = np.array([x01, x02, x03, x04, x05])
theta = np.zeros((Nparam, Ni, Nw2, 2))
acpt = np.zeros(Z.shape)
fname = r'C:\Users\Nicholas\Documents\Walsworth_group\solar_RVs\solarSindexCutDaily.txt'
data = np.loadtxt(fname, skiprows=1)
t = data[:,0]
Y = data[:,2]
for l in range(0, Ni):
if l % 1 == 0: print(100*l/Ni, '% complete')
j = rw[l,:,0]
thetaP = theta0[:,j,1] + Z[l,:,0]*(theta0[:,:,0] - theta0[:,j,1])
logPthetaP = log_posterior(thetaP.T, t, Y)
logPtheta0 = log_posterior(theta0[:,:,0].T, t, Y)
idx = (logPthetaP - logPtheta0) >= logrZNp[l,:,0]
acpt[l,idx,0] += 1
theta[:,l, idx,0] = thetaP[:,idx]
theta[:,l,~idx,0] = theta0[:,~idx,0]
theta0[:,:,0] = theta[:,l,:,0]
j = rw[l,:,1]
thetaP = theta0[:,j,0] + Z[l,:,1]*(theta0[:,:,1] - theta0[:,j,0])
logPthetaP = log_posterior(thetaP.T, t, Y)
logPtheta0 = log_posterior(theta0[:,:,1].T, t, Y)
idx = (logPthetaP - logPtheta0) >= logrZNp[l,:,1]
acpt[l,idx,1] += 1
theta[:,l, idx,1] = thetaP[:,idx]
theta[:,l,~idx,1] = theta0[:,~idx,1]
theta0[:,:,1] = theta[:,l,:,1]
fname = r'C:\Users\Nicholas\Documents\Walsworth_group\solar_RVs\S_samples20'
np.save(fname, theta)
fname = r'C:\Users\Nicholas\Documents\Walsworth_group\solar_RVs\accept20'
np.save(fname, acpt)
if __name__ == '__main__': main() | unlicense | 8,800,759,658,656,268,000 | 36.684211 | 92 | 0.518377 | false | 2.575736 | false | false | false |
kurbatovaei/python_training | fixture/contact.py | 1 | 9249 | from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def open_home_page(self):
wd = self.app.wd
if not ((wd.current_url.endswith("addressbook/") or wd.current_url.endswith("/index.php")) and (
len(wd.find_elements_by_link_text("Last name")) > 0)):
wd.find_element_by_link_text("home").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_form(self, contact):
self.change_field_value("firstname", contact.firstname)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.home_phone)
self.change_field_value("mobile", contact.mobile_phone)
self.change_field_value("work", contact.work_phone)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.email1)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("homepage", contact.homepage)
def create(self, contact):
wd = self.app.wd
self.open_home_page()
# init contact creation
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
wd.find_element_by_link_text("home page").click()
self.contact_cache = None
def edit_first_contact(self, contact):
self.edit_contact_by_index(0, contact)
def edit_contact_by_index(self, index, contact):
wd = self.app.wd
self.open_home_page()
# init contact editing
# wd.find_element_by_xpath("//tbody/tr[" + str(index+1) + "]/td[8]/a").click()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
self.fill_contact_form(contact)
# submit update
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def edit_contact_by_id(self, id, contact):
wd = self.app.wd
self.open_home_page()
# init contact editing
checkbox = wd.find_element_by_id(id)
row = checkbox.find_element_by_xpath("./../..")
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
self.fill_contact_form(contact)
# submit update
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_home_page()
self.select_contact_by_index(index)
# submit deletion
# wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.find_element_by_xpath("//div/div[4]/form[2]/div[2]/input").click()
# confirm deletion alert
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.open_home_page()
self.select_contact_by_id(id)
# submit deletion
# wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.find_element_by_xpath("//div/div[4]/form[2]/div[2]/input").click()
# confirm deletion alert
wd.switch_to_alert().accept()
self.contact_cache = None
def count(self):
wd = self.app.wd
self.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_home_page()
self.contact_cache = []
for element in wd.find_elements_by_name("entry"):
lastname = element.find_element_by_css_selector("td:nth-child(2)").text
firstname = element.find_element_by_css_selector("td:nth-child(3)").text
address = element.find_element_by_css_selector("td:nth-child(4)").text
all_emails = element.find_element_by_css_selector("td:nth-child(5)").text
all_phones = element.find_element_by_css_selector("td:nth-child(6)").text
homepage = element.find_element_by_css_selector("td:nth-child(10)").text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, address=address,
all_emails_from_home_page=all_emails,
all_phones_from_home_page=all_phones, homepage=homepage, id=id))
return self.contact_cache
def get_contact_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
home_phone = wd.find_element_by_name("home").get_attribute("value")
mobile_phone = wd.find_element_by_name("mobile").get_attribute("value")
work_phone = wd.find_element_by_name("work").get_attribute("value")
email1 = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
homepage = wd.find_element_by_name("homepage").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, address=address, home_phone=home_phone,
mobile_phone=mobile_phone, work_phone=work_phone, email1=email1, email2=email2, email3=email3,
homepage=homepage, id=id)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
home_phone = re.search("H: (.*)", text).group(1)
mobile_phone = re.search("M: (.*)", text).group(1)
work_phone = re.search("W: (.*)", text).group(1)
return Contact(home_phone=home_phone, mobile_phone=mobile_phone, work_phone=work_phone)
def clear(self, s):
wd = self.app.wd
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(self, contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: self.clear(x),
filter(lambda x: x is not None,
[contact.home_phone, contact.mobile_phone, contact.work_phone,
contact.fax]))))
def merge_emails_like_on_home_page(self, contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: self.clear(x),
filter(lambda x: x is not None,
[contact.email1, contact.email2, contact.email3]))))
def add_contact_to_group(self, app, contact, group):
wd = self.app.wd
self.open_home_page()
self.select_contact_by_id(contact.id)
app.group.add_to_group(group)
def remove_contact_from_group(self, app, contact, group):
wd = self.app.wd
self.open_home_page()
app.group.select_group_on_homepage_by_id(group.id)
self.select_contact_by_id(contact.id)
wd.find_element_by_name("remove").click()
assert ("group page \"" + group.name + "\"") == wd.find_element_by_xpath("//div/div[4]/div/i/a").text
wd.find_element_by_xpath("//div/div[4]/div/i/a").click()
| apache-2.0 | -4,742,112,173,271,384,000 | 44.117073 | 117 | 0.586982 | false | 3.471847 | false | false | false |
flschiavoni/harpia | harpia/extensions/c/opencv/liveDelay.py | 2 | 2875 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains the LiveDelay class.
"""
from harpia.GUI.fieldtypes import *
from harpia.model.plugin import Plugin
class LiveDelay(Plugin):
"""
This class contains methods related the liveDelay class.
"""
# -------------------------------------------------------------------------
def __init__(self):
Plugin.__init__(self)
self.frameNumber = 5
# Appearance
self.help = "Inserts a delay inside a live stream."
self.label = "Live Delay"
self.color = "250:20:30:150"
self.in_types = ["harpia.extensions.c.ports.image"]
self.out_types = ["harpia.extensions.c.ports.image"]
self.group = "General"
self.properties = [{"name": "Time (in frames)",
"label": "frameNumber",
"type": HARPIA_INT,
"lower": 1,
"upper": 200,
"step": 1
}
]
# ------------------------------C/OpenCv code--------------------------
self.codes[2] = '''
if(block$id$_img_i0){
cvReleaseImage(&(block$id$_buffer[i_$id$]));
block$id$_buffer[i_$id$] = cvCloneImage(block$id$_img_i0);
i_$id$++;
i_$id$ %= $frameNumber$;
block$id$_img_o0 = block$id$_buffer[i_$id$];
}
'''
self.codes[3] = 'cvReleaseImage(&block$id$_img_i0);\n'
self.codes[4] = '''
for(i_$id$=0; i_$id$<$frameNumber$; i_$id$++)
if(block$id$_buffer[i_$id$] != NULL)
cvReleaseImage(&(block$id$_buffer[i_$id$]));
'''
# ----------------------------------------------------------------------
def generate_vars(self):
self.frameNumber = int(round(float(self.frameNumber)))
value = \
'IplImage * block$id$_img_i0 = NULL;\n' + \
'IplImage * block$id$_img_o0 = NULL;\n' + \
'int i_$id$ = 0;\n' + \
'IplImage * block$id$_buffer[$frameNumber$] = {'
for idx in range(self.frameNumber):
value += 'NULL'
if idx != self.frameNumber - 1:
value += ','
value += '};\n'
for idx in range(self.frameNumber):
value += 'block$id$_buffer[' + str(
idx) + '] = cvCreateImage( cvSize(640,480), 8, 3);\n'
value += 'cvSetZero(block$id$_buffer[' + str(idx) + ']);\n'
value += 'block$id$_img_o0 = block$id$_buffer[' + \
str(self.frameNumber - 1) + '];\n'
return value
self.language = "c"
self.framework = "opencv"
# -----------------------------------------------------------------------------
| gpl-3.0 | -3,415,179,329,173,071,000 | 34.493827 | 79 | 0.414957 | false | 3.753264 | false | false | false |
rajendrauppal/elasticsearch-reddit | elasticsearch_reddit.py | 1 | 1431 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python application to fetch, store and offline search Reddit data.
Requirements:
1. Python 2.7+
2. requests (pip install requests)
3. elasticsearch (pip install elasticsearch)
4. ElasticSearch server running
(downlaod from https://www.elastic.co/downloads/elasticsearch, install and start)
"""
__author__ = 'Rajendra Kumar Uppal'
__copyright__ = "Copyright 2015, Rajendra Kumar Uppal"
__credits__ = ["Fletcher Heisler", "Rajendra Kumar Uppal"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Rajendra Kumar Uppal"
__email__ = "[email protected]"
__status__ = "Production"
import requests
from elasticsearch import Elasticsearch
class Reddit():
pass
def main():
es = Elasticsearch()
# get top 100 IAMA posts of all time
response = requests.get("http://api.reddit.com/r/iama/top/?t=all&limit=1",
headers={"User-Agent":"TrackMaven"})
fields = ['title', 'selftext', 'author', 'score', 'ups', 'downs',
'num_comments', 'url', 'created']
# loop through the response and add each data dictionary to reddit index
for i, iama in enumerate(response.json()['data']['children']):
content = iama['data']
doc = {}
for field in fields:
doc[field] = content[field]
print doc
es.index(index='reddit', doc_type='iama', body=doc)
if __name__ == '__main__':
main()
| mit | 5,101,915,752,633,036,000 | 25.5 | 81 | 0.638015 | false | 3.297235 | false | false | false |
certik/sympy-oldcore | sympy/functions/elementary/tests/test_hyperbolic.py | 1 | 6236 |
from sympy import *
def test_sinh():
x, y = symbols('xy')
k = Symbol('k', integer=True)
assert sinh(nan) == nan
assert sinh(oo) == oo
assert sinh(-oo) == -oo
assert sinh(0) == 0
assert sinh(1) == sinh(1)
assert sinh(-1) == -sinh(1)
assert sinh(x) == sinh(x)
assert sinh(-x) == -sinh(x)
assert sinh(pi) == sinh(pi)
assert sinh(-pi) == -sinh(pi)
assert sinh(2**1024 * E) == sinh(2**1024 * E)
assert sinh(-2**1024 * E) == -sinh(2**1024 * E)
assert sinh(pi*I) == 0
assert sinh(-pi*I) == 0
assert sinh(2*pi*I) == 0
assert sinh(-2*pi*I) == 0
assert sinh(-3*10**73*pi*I) == 0
assert sinh(7*10**103*pi*I) == 0
assert sinh(pi*I/2) == I
assert sinh(-pi*I/2) == -I
assert sinh(5*pi*I/2) == I
assert sinh(7*pi*I/2) == -I
assert sinh(pi*I/3) == Basic.Half()*sqrt(3)*I
assert sinh(-2*pi*I/3) == -Basic.Half()*sqrt(3)*I
assert sinh(pi*I/4) == Basic.Half()*sqrt(2)*I
assert sinh(-pi*I/4) == -Basic.Half()*sqrt(2)*I
assert sinh(17*pi*I/4) == Basic.Half()*sqrt(2)*I
assert sinh(-3*pi*I/4) == -Basic.Half()*sqrt(2)*I
assert sinh(pi*I/6) == Basic.Half()*I
assert sinh(-pi*I/6) == -Basic.Half()*I
assert sinh(7*pi*I/6) == -Basic.Half()*I
assert sinh(-5*pi*I/6) == -Basic.Half()*I
assert sinh(pi*I/105) == sin(pi/105)*I
assert sinh(-pi*I/105) == -sin(pi/105)*I
assert sinh(2 + 3*I) == sinh(2 + 3*I)
assert sinh(x*I) == sin(x)*I
assert sinh(k*pi*I) == 0
assert sinh(17*k*pi*I) == 0
assert sinh(k*pi*I/2) == sin(k*pi/2)*I
def test_cosh():
x, y = symbols('xy')
k = Symbol('k', integer=True)
assert cosh(nan) == nan
assert cosh(oo) == oo
assert cosh(-oo) == oo
assert cosh(0) == 1
assert cosh(1) == cosh(1)
assert cosh(-1) == cosh(1)
assert cosh(x) == cosh(x)
assert cosh(-x) == cosh(x)
assert cosh(pi*I) == cos(pi)
assert cosh(-pi*I) == cos(pi)
assert cosh(2**1024 * E) == cosh(2**1024 * E)
assert cosh(-2**1024 * E) == cosh(2**1024 * E)
assert cosh(pi*I/2) == 0
assert cosh(-pi*I/2) == 0
assert cosh(pi*I/2) == 0
assert cosh(-pi*I/2) == 0
assert cosh((-3*10**73+1)*pi*I/2) == 0
assert cosh((7*10**103+1)*pi*I/2) == 0
assert cosh(pi*I) == -1
assert cosh(-pi*I) == -1
assert cosh(5*pi*I) == -1
assert cosh(8*pi*I) == 1
assert cosh(pi*I/3) == Basic.Half()
assert cosh(-2*pi*I/3) == -Basic.Half()
assert cosh(pi*I/4) == Basic.Half()*sqrt(2)
assert cosh(-pi*I/4) == Basic.Half()*sqrt(2)
assert cosh(11*pi*I/4) == -Basic.Half()*sqrt(2)
assert cosh(-3*pi*I/4) == -Basic.Half()*sqrt(2)
assert cosh(pi*I/6) == Basic.Half()*sqrt(3)
assert cosh(-pi*I/6) == Basic.Half()*sqrt(3)
assert cosh(7*pi*I/6) == -Basic.Half()*sqrt(3)
assert cosh(-5*pi*I/6) == -Basic.Half()*sqrt(3)
assert cosh(pi*I/105) == cos(pi/105)
assert cosh(-pi*I/105) == cos(pi/105)
assert cosh(2 + 3*I) == cosh(2 + 3*I)
assert cosh(x*I) == cos(x)
assert cosh(k*pi*I) == cos(k*pi)
assert cosh(17*k*pi*I) == cos(17*k*pi)
assert cosh(k*pi) == cosh(k*pi)
def test_tanh():
x, y = symbols('xy')
k = Symbol('k', integer=True)
assert tanh(nan) == nan
assert tanh(oo) == 1
assert tanh(-oo) == -1
assert tanh(0) == 0
assert tanh(1) == tanh(1)
assert tanh(-1) == -tanh(1)
assert tanh(x) == tanh(x)
assert tanh(-x) == -tanh(x)
assert tanh(pi) == tanh(pi)
assert tanh(-pi) == -tanh(pi)
assert tanh(2**1024 * E) == tanh(2**1024 * E)
assert tanh(-2**1024 * E) == -tanh(2**1024 * E)
assert tanh(pi*I) == 0
assert tanh(-pi*I) == 0
assert tanh(2*pi*I) == 0
assert tanh(-2*pi*I) == 0
assert tanh(-3*10**73*pi*I) == 0
assert tanh(7*10**103*pi*I) == 0
assert tanh(pi*I/2) == tanh(pi*I/2)
assert tanh(-pi*I/2) == -tanh(pi*I/2)
assert tanh(5*pi*I/2) == tanh(5*pi*I/2)
assert tanh(7*pi*I/2) == tanh(7*pi*I/2)
assert tanh(pi*I/3) == sqrt(3)*I
assert tanh(-2*pi*I/3) == sqrt(3)*I
assert tanh(pi*I/4) == I
assert tanh(-pi*I/4) == -I
assert tanh(17*pi*I/4) == I
assert tanh(-3*pi*I/4) == I
assert tanh(pi*I/6) == I/sqrt(3)
assert tanh(-pi*I/6) == -I/sqrt(3)
assert tanh(7*pi*I/6) == I/sqrt(3)
assert tanh(-5*pi*I/6) == I/sqrt(3)
assert tanh(pi*I/105) == tan(pi/105)*I
assert tanh(-pi*I/105) == -tan(pi/105)*I
assert tanh(2 + 3*I) == tanh(2 + 3*I)
assert tanh(x*I) == tan(x)*I
assert tanh(k*pi*I) == 0
assert tanh(17*k*pi*I) == 0
assert tanh(k*pi*I/2) == tan(k*pi/2)*I
def test_coth():
x, y = symbols('xy')
k = Symbol('k', integer=True)
assert coth(nan) == nan
assert coth(oo) == 1
assert coth(-oo) == -1
assert coth(0) == coth(0)
assert coth(1) == coth(1)
assert coth(-1) == -coth(1)
assert coth(x) == coth(x)
assert coth(-x) == -coth(x)
assert coth(pi*I) == -cot(pi)*I
assert coth(-pi*I) == cot(pi)*I
assert coth(2**1024 * E) == coth(2**1024 * E)
assert coth(-2**1024 * E) == -coth(2**1024 * E)
assert coth(pi*I) == -cot(pi)*I
assert coth(-pi*I) == cot(pi)*I
assert coth(2*pi*I) == -cot(2*pi)*I
assert coth(-2*pi*I) == cot(2*pi)*I
assert coth(-3*10**73*pi*I) == cot(3*10**73*pi)*I
assert coth(7*10**103*pi*I) == -cot(7*10**103*pi)*I
assert coth(pi*I/2) == 0
assert coth(-pi*I/2) == 0
assert coth(5*pi*I/2) == 0
assert coth(7*pi*I/2) == 0
assert coth(pi*I/3) == -I/sqrt(3)
assert coth(-2*pi*I/3) == -I/sqrt(3)
assert coth(pi*I/4) == -I
assert coth(-pi*I/4) == I
assert coth(17*pi*I/4) == -I
assert coth(-3*pi*I/4) == -I
assert coth(pi*I/6) == -sqrt(3)*I
assert coth(-pi*I/6) == sqrt(3)*I
assert coth(7*pi*I/6) == -sqrt(3)*I
assert coth(-5*pi*I/6) == -sqrt(3)*I
assert coth(pi*I/105) == -cot(pi/105)*I
assert coth(-pi*I/105) == cot(pi/105)*I
assert coth(2 + 3*I) == coth(2 + 3*I)
assert coth(x*I) == -cot(x)*I
assert coth(k*pi*I) == -cot(k*pi)*I
assert coth(17*k*pi*I) == -cot(17*k*pi)*I
assert coth(k*pi*I) == -cot(k*pi)*I
#def test_asinh():
#def test_acosh():
#def test_atanh():
#def test_acoth():
| bsd-3-clause | -6,516,422,457,788,406,000 | 23.844622 | 55 | 0.526299 | false | 2.28928 | true | false | false |
stevecassidy/hcsvlab_robochef | hcsvlab_robochef/eopas_test/ingest.py | 1 | 5401 | from hcsvlab_robochef.annotations import *
from hcsvlab_robochef.ingest_base import IngestBase
from hcsvlab_robochef.rdf.map import *
from hcsvlab_robochef.utils.filehandler import *
from hcsvlab_robochef.utils.serialiser import *
from hcsvlab_robochef.utils.statistics import *
from rdf import paradisecMap
from xml.etree import ElementTree as ET
import codecs
import mimetypes
import urllib
import re
class EopasTestIngest(IngestBase):
olac_role_map = {'annotator' : OLAC.annotator, 'author' : OLAC.author, 'compiler' : OLAC.compiler,
'consultant' : OLAC.consultant, 'data_inputter' : OLAC.data_inputter,
'depositor' : OLAC.depositor, 'developer' : OLAC.developer, 'editor' : OLAC.editor,
'illustrator' : OLAC.illustrator, 'interpreter' : OLAC.interpreter,
'interviewer' : OLAC.interviewer, 'participant' : OLAC.participant,
'performer' : OLAC.performer, 'photographer' : OLAC.photographer,
'recorder' : OLAC.recorder, 'researcher' : OLAC.researcher,
'research_participant' : OLAC.research_participant, 'responder' : OLAC.responder,
'signer' : OLAC.signer, 'singer' : OLAC.singer, 'speaker' : OLAC.speaker,
'sponsor' : OLAC.sponsor, 'transcriber' : OLAC.transcriber, 'translator' : OLAC.translator }
def ingestCorpus(self, srcdir, outdir):
''' This function will initiate the ingest process for the Auslit corpus '''
print " converting corpus in", srcdir, "into normalised data in", outdir
print " clearing and creating output location"
self.clear_output_dir(outdir)
print " processing files..."
files_to_process = self.__get_files(srcdir)
total = len(files_to_process)
sofar = 0
for f in files_to_process:
meta_dict = self.ingestDocument(srcdir, f)
f = f.replace(srcdir, outdir, 1)
try:
os.makedirs(os.path.dirname(f))
except:
pass
(sampleid, _) = os.path.splitext(f)
serialiser = MetaSerialiser()
serialiser.serialise(outdir, sampleid, paradisecMap, meta_dict, True)
sofar = sofar + 1
print "\033[2K ", sofar, "of", total, f, "\033[A"
print "\033[2K ", total, "files processed"
def setMetaData(self, rcdir):
''' Loads the meta data for use during ingest '''
pass
def ingestDocument(self, srcdir, sourcepath):
""" Read and process a corpus document """
xml_tree = self.__load_xml_tree(sourcepath)
meta_dict = metadata.xml2tuplelist(xml_tree, ['olac', 'metadata'])
self.__get_documents(meta_dict)
self.__get_people(meta_dict)
return meta_dict
def __get_documents(self, meta_dict):
for k, v in meta_dict:
if k == 'tableOfContents':
filetype = self.__get_type(v)
file_meta = {'id' : v, 'filename' : v, 'filetype' : filetype, 'documenttitle' : v}
meta_dict.append(('table_document_' + v, file_meta))
meta_dict[:] = [(k, v) for k, v in meta_dict if 'tableOfContents' not in k]
def __get_people(self, meta_dict):
# TODO: maybe this belongs elsewhere
roles = self.olac_role_map.keys()
for k, v in meta_dict:
if k in roles:
person = {'role' : self.olac_role_map[k], 'id' : re.sub(' ', '_', v), 'name' : v}
meta_dict.append(('table_person_' + k, person))
meta_dict[:] = [(k, v) for k, v in meta_dict if k.strip() not in roles]
# TODO: this could be moved to somewhere like ../utils where other modules could use it
def __get_type(self, filepath):
url = urllib.pathname2url(filepath)
mime_type, _ = mimetypes.guess_type(url)
filetype = None
if mime_type:
filetype = mime_type.split('/')[0].title()
if not filetype or filetype == 'Application':
filetype = 'Other'
return filetype
def __get_files(self, srcdir):
''' This function retrieves a list of files that the HCSvLab ingest should actually process '''
filehandler = FileHandler()
files = filehandler.getFiles(srcdir, r'^.+?(?:pas|box).xml$')
return_files = [os.path.join(srcdir, f) for f in files]
return return_files
def __tuplelist2dict__(self, tuplelist):
result = dict()
for (k, v) in tuplelist:
if k and v:
result[k] = v
return result
def __load_xml_tree(self, sourcepath):
'''
This function reads in a XML docment as a text file and converts it into
an XML tree for further processing
'''
fhandle = codecs.open(sourcepath, "r", "utf-8")
text = fhandle.read()
fhandle.close()
text = text.replace('–', u"\u2013")
text = text.replace('—', u"\u2014")
text = text.replace('©', u"\u00A9")
text = text.replace('“', u"\u201C")
text = text.replace('”', u"\u201D")
text = text.replace(' ', u"\u2003")
text = text.replace('é', u"\u00E9")
text = text.replace('‘', u"\u2018")
text = text.replace('’', u"\u2019")
text = text.replace('ê', u"\u00EA")
text = text.replace('à', u"\u00E0")
text = text.replace('è', u"\u00E8")
text = text.replace('œ', u"\u0153")
text = text.replace('æ', u"\u00E6")
text = text.replace('…', u"\u2026")
return ET.fromstring(text.encode("utf-8"))
| gpl-3.0 | -4,263,839,446,201,702,000 | 35.006667 | 110 | 0.61933 | false | 3.301345 | false | false | false |
jgomezdans/sampyl | examples/slice_sample.py | 2 | 1708 | import sys
sys.path.append('.')
import sampyl as smp
from sampyl.state import State
from sampyl import np
from sampyl.diagnostics import diagnostics
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import seaborn as sns
# correlated gaussian log likelihood
def logp(x, y):
icov = np.linalg.inv(np.array([[1., .8], [.8, 1.]]))
d = np.array([x, y])
return -.5 * np.dot(np.dot(d, icov), d)
logp_xy = lambda(th): logp(th[0], th[1])
# compare slice samplers, metropolis hastings, and the two variable
# slice sampler
ssamp = smp.Slice(logp, start={'x': 4., 'y': 4.} )
slice_trace = ssamp.sample(1000)
met = smp.Metropolis(logp, start={'x': 4., 'y': 4.})
met_trace = met.sample(1000)
bslice = smp.Slice(logp_xy, start={'th': np.array([4., 4.])})
btrace = bslice.sample(1000)
# compute effective sample size based on autocorrelation
slice_eff = diagnostics.compute_n_eff_acf(slice_trace.x)
met_eff = diagnostics.compute_n_eff_acf(met_trace.x)
b_eff = diagnostics.compute_n_eff_acf(btrace.th[:,0])
print "Slice effective sample size: %2.2f"%slice_eff
print "MH effective sample size: %2.2f"%met_eff
print "two var slice effective sample size: %2.2f"%b_eff
print " ----- "
print "Slice sampler evals per sample: ", ssamp.evals_per_sample
# graphically compare samples
fig, axarr = plt.subplots(1, 3, figsize=(12,4))
axarr[0].scatter(slice_trace.x, slice_trace.y)
axarr[0].set_title("Slice samples")
axarr[1].scatter(met_trace.x, met_trace.y)
axarr[1].set_title("MH samples")
axarr[2].scatter(btrace.th[:,0], btrace.th[:,1])
axarr[2].set_title("Two var Slice samples")
for ax in axarr:
ax.set_xlim((-4, 4))
ax.set_ylim((-4, 4))
plt.show()
| apache-2.0 | 8,433,334,324,752,320,000 | 31.226415 | 68 | 0.68267 | false | 2.706815 | false | false | false |
rmcgibbo/nebterpolator | setup.py | 1 | 2337 | """
setup.py: Install nebterpolator.
"""
VERSION="1.0"
__author__ = "Robert McGibbon and Lee-Ping Wang"
__version__ = VERSION
from distutils.sysconfig import get_config_var
from distutils.core import setup,Extension
import os
import shutil
import numpy
import glob
# Comment left here as an example
# Copied from MSMBuilder 'contact' library for rapidly computing interatomic distances.
# CONTACT = Extension('forcebalance/_contact_wrap',
# sources = ["ext/contact/contact.c",
# "ext/contact/contact_wrap.c"],
# extra_compile_args=["-std=c99","-O3","-shared",
# "-fopenmp", "-Wall"],
# extra_link_args=['-lgomp'],
# include_dirs = [numpy.get_include(), os.path.join(numpy.get_include(), 'numpy')])
def buildKeywordDictionary():
from distutils.core import Extension
setupKeywords = {}
setupKeywords["name"] = "nebterpolator"
setupKeywords["version"] = VERSION
setupKeywords["author"] = __author__
setupKeywords["author_email"] = "[email protected]"
setupKeywords["license"] = "GPL 3.0"
setupKeywords["url"] = "https://github.com/rmcgibbo/nebterpolator"
setupKeywords["download_url"] = "https://github.com/rmcgibbo/nebterpolator"
setupKeywords["scripts"] = glob.glob("bin/*.py") + glob.glob("bin/*.sh")
setupKeywords["packages"] = ["nebterpolator", "nebterpolator.io", "nebterpolator.core"]
# setupKeywords["package_data"] = {"nebterpolator" : ["data/*.sh","data/uffparms.in","data/oplsaa.ff/*"]}
setupKeywords["data_files"] = []
setupKeywords["ext_modules"] = []
setupKeywords["platforms"] = ["Linux"]
setupKeywords["description"] = "Internal coordinate smoothing."
outputString=""
firstTab = 40
secondTab = 60
for key in sorted( setupKeywords.iterkeys() ):
value = setupKeywords[key]
outputString += key.rjust(firstTab) + str( value ).rjust(secondTab) + "\n"
print "%s" % outputString
return setupKeywords
def main():
setupKeywords=buildKeywordDictionary()
setup(**setupKeywords)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,680,237,756,592,672,000 | 37.95 | 114 | 0.593068 | false | 3.7392 | false | false | false |
nextview/medicticket | txomon/ticket/tests/batch.py | 1 | 10825 | from trac.perm import PermissionCache
from trac.test import Mock, EnvironmentStub
from txomon.ticket import default_workflow, web_ui
from txomon.ticket.batch import BatchModifyModule
from txomon.ticket.model import Ticket
from trac.util.datefmt import utc
import unittest
class BatchModifyTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True,
enable=[default_workflow.ConfigurableTicketWorkflow,
web_ui.TicketModule])
self.req = Mock(href=self.env.href, authname='anonymous', tz=utc)
self.req.session = {}
self.req.perm = PermissionCache(self.env)
def assertCommentAdded(self, ticket_id, comment):
ticket = Ticket(self.env, int(ticket_id))
changes = ticket.get_changelog()
comment_change = [c for c in changes if c[2] == 'comment'][0]
self.assertEqual(comment_change[2], comment)
def assertFieldChanged(self, ticket_id, field, new_value):
ticket = Ticket(self.env, int(ticket_id))
changes = ticket.get_changelog()
field_change = [c for c in changes if c[2] == field][0]
self.assertEqual(field_change[4], new_value)
def _change_list_test_helper(self, original, new, new2, mode):
batch = BatchModifyModule(self.env)
return batch._change_list(original, new, new2, mode)
def _add_list_test_helper(self, original, to_add):
return self._change_list_test_helper(original, to_add, '', '+')
def _remove_list_test_helper(self, original, to_remove):
return self._change_list_test_helper(original, to_remove, '', '-')
def _add_remove_list_test_helper(self, original, to_add, to_remove):
return self._change_list_test_helper(original, to_add, to_remove,
'+-')
def _assign_list_test_helper(self, original, new):
return self._change_list_test_helper(original, new, '', '=')
def _insert_ticket(self, summary, **kw):
"""Helper for inserting a ticket into the database"""
ticket = Ticket(self.env)
for k, v in kw.items():
ticket[k] = v
return ticket.insert()
def test_ignore_summary_reporter_and_description(self):
"""These cannot be added through the UI, but if somebody tries
to build their own POST data they will be ignored."""
batch = BatchModifyModule(self.env)
self.req.args = {}
self.req.args['batchmod_value_summary'] = 'test ticket'
self.req.args['batchmod_value_reporter'] = 'anonymous'
self.req.args['batchmod_value_description'] = 'synergize the widgets'
values = batch._get_new_ticket_values(self.req)
self.assertEqual(len(values), 0)
def test_add_batchmod_value_data_from_request(self):
batch = BatchModifyModule(self.env)
self.req.args = {}
self.req.args['batchmod_value_milestone'] = 'milestone1'
values = batch._get_new_ticket_values(self.req)
self.assertEqual(values['milestone'], 'milestone1')
def test_selected_tickets(self):
self.req.args = { 'selected_tickets' : '1,2,3' }
batch = BatchModifyModule(self.env)
selected_tickets = batch._get_selected_tickets(self.req)
self.assertEqual(selected_tickets, ['1', '2', '3'])
def test_no_selected_tickets(self):
"""If nothing is selected, the return value is the empty list."""
self.req.args = { 'selected_tickets' : '' }
batch = BatchModifyModule(self.env)
selected_tickets = batch._get_selected_tickets(self.req)
self.assertEqual(selected_tickets, [])
# Assign list items
def test_change_list_replace_empty_with_single(self):
"""Replace emtpy field with single item."""
changed = self._assign_list_test_helper('', 'alice')
self.assertEqual(changed, 'alice')
def test_change_list_replace_empty_with_items(self):
"""Replace emtpy field with items."""
changed = self._assign_list_test_helper('', 'alice, bob')
self.assertEqual(changed, 'alice, bob')
def test_change_list_replace_item(self):
"""Replace item with a different item."""
changed = self._assign_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'bob')
def test_change_list_replace_item_with_items(self):
"""Replace item with different items."""
changed = self._assign_list_test_helper('alice', 'bob, carol')
self.assertEqual(changed, 'bob, carol')
def test_change_list_replace_items_with_item(self):
"""Replace items with a different item."""
changed = self._assign_list_test_helper('alice, bob', 'carol')
self.assertEqual(changed, 'carol')
def test_change_list_replace_items(self):
"""Replace items with different items."""
changed = self._assign_list_test_helper('alice, bob', 'carol, dave')
self.assertEqual(changed, 'carol, dave')
def test_change_list_replace_items_partial(self):
"""Replace items with different (or not) items."""
changed = self._assign_list_test_helper('alice, bob', 'bob, dave')
self.assertEqual(changed, 'bob, dave')
def test_change_list_clear(self):
"""Clear field."""
changed = self._assign_list_test_helper('alice bob', '')
self.assertEqual(changed, '')
# Add / remove list items
def test_change_list_add_item(self):
"""Append additional item."""
changed = self._add_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'alice, bob')
def test_change_list_add_items(self):
"""Append additional items."""
changed = self._add_list_test_helper('alice, bob', 'carol, dave')
self.assertEqual(changed, 'alice, bob, carol, dave')
def test_change_list_remove_item(self):
"""Remove existing item."""
changed = self._remove_list_test_helper('alice, bob', 'bob')
self.assertEqual(changed, 'alice')
def test_change_list_remove_items(self):
"""Remove existing items."""
changed = self._remove_list_test_helper('alice, bob, carol',
'alice, carol')
self.assertEqual(changed, 'bob')
def test_change_list_remove_idempotent(self):
"""Ignore missing item to be removed."""
changed = self._remove_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'alice')
def test_change_list_remove_mixed(self):
"""Ignore only missing item to be removed."""
changed = self._remove_list_test_helper('alice, bob', 'bob, carol')
self.assertEqual(changed, 'alice')
def test_change_list_add_remove(self):
"""Remove existing item and append additional item."""
changed = self._add_remove_list_test_helper('alice, bob', 'carol',
'alice')
self.assertEqual(changed, 'bob, carol')
def test_change_list_add_no_duplicates(self):
"""Existing items are not duplicated."""
changed = self._add_list_test_helper('alice, bob', 'bob, carol')
self.assertEqual(changed, 'alice, bob, carol')
def test_change_list_remove_all_duplicates(self):
"""Remove all duplicates."""
changed = self._remove_list_test_helper('alice, bob, alice', 'alice')
self.assertEqual(changed, 'bob')
# Save
def test_save_comment(self):
"""Comments are saved to all selected tickets."""
first_ticket_id = self._insert_ticket('Test 1', reporter='joe')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, 'comment',
'leave')
self.assertCommentAdded(first_ticket_id, 'comment')
self.assertCommentAdded(second_ticket_id, 'comment')
def test_save_values(self):
"""Changed values are saved to all tickets."""
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
component='foo')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
new_values = { 'component' : 'bar' }
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, new_values, '',
'leave')
self.assertFieldChanged(first_ticket_id, 'component', 'bar')
self.assertFieldChanged(second_ticket_id, 'component', 'bar')
def test_action_with_state_change(self):
"""Actions can have change status."""
self.env.config.set('ticket-workflow', 'embiggen', '* -> big')
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
status='small')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, '',
'embiggen')
ticket = Ticket(self.env, int(first_ticket_id))
changes = ticket.get_changelog()
self.assertFieldChanged(first_ticket_id, 'status', 'big')
self.assertFieldChanged(second_ticket_id, 'status', 'big')
def test_action_with_side_effects(self):
"""Actions can have operations with side effects."""
self.env.config.set('ticket-workflow', 'buckify', '* -> *')
self.env.config.set('ticket-workflow', 'buckify.operations',
'set_owner')
self.req.args = {}
self.req.args['action_buckify_reassign_owner'] = 'buck'
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
owner='foo')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, '',
'buckify')
ticket = Ticket(self.env, int(first_ticket_id))
changes = ticket.get_changelog()
self.assertFieldChanged(first_ticket_id, 'owner', 'buck')
self.assertFieldChanged(second_ticket_id, 'owner', 'buck')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BatchModifyTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause | 1,928,104,001,783,783,000 | 41.285156 | 78 | 0.610531 | false | 3.841377 | true | false | false |
Art-SoftWare/ircbot | extern/quizz/Quizz.py | 1 | 1269 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from random import randint
import re
import util.cfg
class Quizz:
MODE_ONESHOT = 1
MODE_REGULAR = 2
MODE_PARTY = 3
def __init__(self, botName, channel, defaultMode=MODE_ONESHOT):
util.cfg.default = {}
self.questions = util.cfg.load("extern/quizz/questions.json")
self.mode = defaultMode
self.channel = channel
self.activeQuestion = None
def randomQuestion(self):
"""Get a random question"""
self.activeQuestion = list(self.questions.keys())[randint(0,len(self.questions)-1)]
return self.activeQuestion
def checkAnswer(self, answer):
"""Verifies the answer to the active question"""
if self.activeQuestion != None and re.search(self.questions[self.activeQuestion], answer) != None:
self.activeQuestion = None
return True
return False
def questionPending(self):
"""Is there a active question ?"""
return self.activeQuestion != None
def setQuestion(self, question, answerPattern=" "):
"""Saves the answer to a given question"""
self.questions[question] = answerPattern
util.cfg.save(self.questions, "extern/quizz/questions.json")
| gpl-3.0 | 1,677,504,767,373,008,600 | 29.95122 | 106 | 0.639086 | false | 4.003155 | false | false | false |
rstones/counting_statistics | counting_statistics/fcs_solver.py | 1 | 11597 | import numpy as np
import numpy.linalg as npla
import scipy.linalg as la
import scipy.special as sp
from counting_statistics.lindblad_system import LindbladSystem
#from counting_statistics import optimized_funcs
class FCSSolver(LindbladSystem):
'''
Users should be able to create a FCSSolver instance with the system Hamiltonian,
Lindblad operators, somehow define the counting operators and associated rates.
NEED TO MAKE COMPATIBLE WITH PYTHON 2 AND 3!
Also need to decide how to deal with numpy Matrix objects as well as ndarrays
For the zero-frequency cumulants, I can implement a recursive scheme to generate them to arbitrary
order following Flindt et al. 2010 (optimized using numba). Maybe worth still having up to skewness
hard coded for speed and ease of seeing the specific structure of those equations when reading the code.
finite_freq functions can almost certainly be optimized with numba or cython, or at least the functions
should be vectorized wrt the frequency values
This solver will be restricted to calculating full counting statistics for Markovian systems
that can be expressing in Lindblad form with counting transitions occurring to a single state
(ie. a single drain lead in a standard electron transport setup, infinite voltage bias/unidirectional transport)
Need to provide docs with references and examples in juypter notebook hosted on github.
Maybe implement non-Markovian counting stats at some point.
May want to include experimental support for sparse matrices, somehow minimizing need to convert to dense
for pinv operation. (Maybe in an FCSSolverHEOM class that extends HEOMSystem, eventually having dependence
on the heom_solver package I will write)
'''
# def __init__(self, H, D_ops, D_rates, jump_idx, reduce_dim=False):
# self.__watch_variables = ['H', 'D_ops', 'D_rates', 'jump_idx', 'reduce_dim'] # could get this with inspect
# self.__cache_is_stale = True
#
# LindbladSystem.__init__(self, H, D_ops, D_rates, reduce_dim=reduce_dim)
# self.jump_idx = jump_idx
def __init__(self, L, jump_op, pops, from_hilbert_space=False):
self.L = L
self.jump_op = jump_op
self.pops = pops
self.from_hilbert_space = from_hilbert_space
self.__watch_variables = ['H', 'D_ops', 'D_rates', 'jump_idx', 'reduce_dim'] \
if self.from_hilbert_space else ['L', 'jump_op', 'pops']
self.__cache_is_stale = True
@classmethod
def from_hilbert_space(cls, H, D_ops, D_rates, jump_idx, reduce_dim=False):
# create instance of subclass
instance = object.__new__(cls)
# initialize superclass first to allow construction of Liouvillian etc by LindbladSystem
super(FCSSolver, instance).__init__(H, D_ops, D_rates, reduce_dim=reduce_dim)
instance.jump_idx = jump_idx
L = instance.liouvillian()
# initialize subclass
instance.__init__(L, instance.construct_jump_operator(L), instance.pops, from_hilbert_space=True)
return instance
def __setattr__(self, name, value):
'''Overridden to watch selected variables to trigger cache refresh.'''
try:
if name in self.__watch_variables:
self.__cache_is_stale = True
except AttributeError:
# stop an Error being thrown when self.__watch_variables is first created on class instantiation
# maybe throw a warning here?
pass
object.__setattr__(self, name, value)
def refresh_cache(self):
'''Refresh necessary quantities for counting statistics calculations.'''
if self.from_hilbert_space:
self.pops = self.I.flatten()
self.L = self.liouvillian()
self.jump_op = self.construct_jump_operator(self.L)
self.ss = self.stationary_state(self.L, self.pops)
self.__cache_is_stale = False
def construct_jump_operator(self, L):
'''Sum kron(A,A) of all jump_ops.'''
jump_op = np.zeros((self.sys_dim**2, self.sys_dim**2))
for i in np.flatnonzero(self.jump_idx):
jump_op += self.D_rates[i] * np.kron(self.D_ops[i], self.D_ops[i])
if self.reduce_dim:
try:
jump_op = np.delete(jump_op, self.idx_to_remove, 0)
jump_op = np.delete(jump_op, self.idx_to_remove, 1)
except AttributeError:
self.idx_to_remove = self.indices_to_remove(L)
jump_op = np.delete(jump_op, self.idx_to_remove, 0)
jump_op = np.delete(jump_op, self.idx_to_remove, 1)
return jump_op
@staticmethod
def stationary_state(L, pops):
'''Should test for number of nullspaces found somewhere, possibly here, as the system is set up
under the assumption it is fully connected and has a single stationary state.
Send a warning if there are multiple nullspaces.'''
# calculate
u,s,v = la.svd(L)
# check for number of nullspaces
# normalize
ss = v[-1].conj() / np.dot(pops, v[-1])
return ss
def mean(self):
if self.__cache_is_stale:
self.refresh_cache()
return np.real(np.dot(self.pops, np.dot(self.jump_op, self.ss)))
@staticmethod
def pseudoinverse(L, freq, Q):
return np.dot(Q, np.dot(npla.pinv(1.j*freq*np.eye(L.shape[0]) - L), Q))
@staticmethod
def Q(L, steady_state, pops):
return np.eye(L.shape[0]) - np.outer(steady_state, pops)
def noise(self, freq):
if self.__cache_is_stale:
self.refresh_cache()
# handle either array or scalar freq values
scalar = False
if np.isscalar(freq):
scalar = True
freq = np.array([freq])
elif isinstance(freq, list):
freq = np.array(freq)
# do the calculation
Q = self.Q(self.L, self.ss, self.pops)
noise = np.zeros(freq.size, dtype='complex128')
for i in range(len(freq)):
R_plus = self.pseudoinverse(self.L, freq[i], Q)
R_minus = self.pseudoinverse(self.L, -freq[i], Q)
noise[i] = np.dot(self.pops, np.dot(self.jump_op \
+ np.dot(np.dot(self.jump_op, R_plus), self.jump_op) \
+ np.dot(np.dot(self.jump_op, R_minus), self.jump_op), self.ss))
return np.real(noise[0] if scalar else noise)
def skewness(self, freq1, freq2):
if self.__cache_is_stale:
self.refresh_cache()
Q = self.Q(self.L, self.ss, self.pops)
skewness = np.zeros((freq1.size, freq2.size), dtype='complex128')
for i in range(len(freq1)):
for j in range(len(freq2)):
'''Currently ignoring zero-frequency limit as its a bit more complicated
than for the noise. This should cause a test failure until its fixed.'''
if freq1[i] == 0 or freq2[j] == 0 or freq1[i] == freq2[j]:
continue
R1 = self.pseudoinverse(self.L, -freq1[i], Q)
R2 = self.pseudoinverse(self.L, freq1[i]-freq2[j], Q)
R3 = self.pseudoinverse(self.L, freq2[j], Q)
R4 = self.pseudoinverse(self.L, -freq2[j], Q)
R5 = self.pseudoinverse(self.L, freq1[i], Q)
R6 = self.pseudoinverse(self.L, freq2[j]-freq1[i], Q)
jump_op_average = np.dot(self.pops, np.dot(self.jump_op, self.ss))
skewness[i,j] = np.dot(self.pops, np.dot(self.jump_op \
+ np.dot(self.jump_op, np.dot(R1+R2+R3, self.jump_op)) \
+ np.dot(self.jump_op, np.dot(R4+R5+R6, self.jump_op)) \
+ np.dot(np.dot(self.jump_op, R1), np.dot(self.jump_op, np.dot(R4+R6, self.jump_op))) \
+ np.dot(np.dot(self.jump_op, R2), np.dot(self.jump_op, np.dot(R4+R5, self.jump_op))) \
+ np.dot(np.dot(self.jump_op, R3), np.dot(self.jump_op, np.dot(R5+R6, self.jump_op))) \
+ (-jump_op_average/(1.j*freq1[i])) * np.dot(self.jump_op, np.dot(R4-R2+R6-R3, self.jump_op)) \
+ (jump_op_average/(1.j*freq1[i]-1.j*freq2[j])) * np.dot(self.jump_op, np.dot(R4-R1+R5-R3, self.jump_op)) \
+ (jump_op_average/(1.j*freq2[j])) * np.dot(self.jump_op, np.dot(R6-R1+R5-R2, self.jump_op)), self.ss))
return np.real(skewness)
def second_order_fano_factor(self, freq):
return self.noise(freq) / self.mean()
def third_order_fano_factor(self, freq1, freq2):
return self.skewness(freq1, freq2) / self.mean()
def binom_coeff_vector(self, n):
'''Generates vector of binomial coefficients from m=1 to n, reversed.'''
return sp.binom(n, range(n,0,-1))
def generate_cumulant(self, n):
'''Generates zero-frequency cumulant to arbitrary order using recursive scheme.
Also could use a function to generate next level of hierarchy from a previously
generated set of cumulants and states so don't need to start from the beginning
each time.
It would also be cool to dynamically generate a function for the requested cumulant
which a user can save. Currently every time a parameter changes the cumulant needs to be regenerated which is
probably going to be quite inefficient for large cumulants.'''
if self.__cache_is_stale:
self.refresh_cache()
R = self.pseudoinverse(self.L, 0, self.Q(self.L, self.ss, self.pops))
bc_vector = self.binom_coeff_vector(n)
cumulants = np.zeros(n)
states = np.zeros((n+1, self.L.shape[0]), dtype='complex128')
states[0] = self.ss
def recursion(m, cumulants, states):
# check n is an integer >= 1
if m > 1:
# get previous cumulants and states
cumulants, states = recursion(m-1, cumulants, states)
elif m == 1:
# lowest level cumulant
cumulants[0] = np.dot(self.pops, np.dot(self.jump_op, states[0]))
states[1] = np.dot(R, np.dot(cumulants[0]*np.eye(self.L.shape[0]) - self.jump_op, states[0]))
#print states[1] + np.dot(R, np.dot(self.jump_op, states[0]))
return cumulants, states
else:
raise ValueError("Cannot calculate cumulants for n < 1")
# calculate cumulant at current level
#cumulants[m-1] = np.dot(self.pops, np.dot(self.jump_op, np.dot(bc_vector, states[:m])))
for i in range(m):
cumulants[m-1] += bc_vector[i]*np.dot(self.pops, np.dot(self.jump_op, states[i]))
# construct 3D matrix
#W = np.vstack([bc_vector[i]*(cumulants[i]*np.eye(self.L.shape[0]) - self.jump_op)[np.newaxis,...] for i in range(m)])
W = np.sum([np.dot(bc_vector[i]*(cumulants[i]*np.eye(self.L.shape[0]) - self.jump_op), states[i]) for i in range(m)], axis=0)
states[m] = np.dot(R, W)
return cumulants, states
return recursion(n, cumulants, states)
def generate_fano_factor(self, n):
return self.generate_cumulant(n)[0][n-1] / self.mean()
| mit | 2,002,734,828,700,820,700 | 48.144068 | 137 | 0.590756 | false | 3.489919 | false | false | false |
matthazinski/tempnet | gateway/remoted.py | 1 | 1706 | #!/usr/bin/env python
import os
import time
import RPi.GPIO as GPIO
from flask import Flask
app = Flask(__name__)
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
tempsensor_sn = '28-000005abe684' # Varies depending on sensor
sensor = '/sys/bus/w1/devices/' + tempsensor_sn + '/w1_slave'
# Sets pins 19(r), 21(g), and 23(b) as output pins
GPIO.setmode(GPIO.BOARD)
GPIO.setup(19, GPIO.OUT)
GPIO.setup(21, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
def raw_data():
"""Retrieves the raw data from the temperature sensor on the Raspberry Pi"""
x = open(sensor, 'r')
data = x.readlines()
x.close()
return data
@app.route('/temp')
def get_temp():
"""Retrieves current fahrenheit temperature value"""
data = raw_data()
while data[0].strip()[-3:] != 'YES':
time.sleep(0.2)
data = raw_data()
temp_val = data[1].find('t=')
if temp_val != -1:
temp_string = data[1].strip()[temp_val + 2:]
temp_fahrenheit = 32.0 + ((float(temp_string) / 1000.0) * 1.8)
return temp_fahrenheit
def set_led(r, g, b):
"""Set the color of the LED"""
GPIO.output(19, r)
GPIO.output(21, g)
GPIO.output(23, b)
def set_color(color):
"""Receives name of color and sets the LED"""
if color == 'red':
set_led(0, 1, 1)
elif color == 'green':
set_led(1, 0, 1)
elif color == 'blue':
set_led(1, 1, 0)
elif color == 'yellow':
set_led(0, 0, 1)
elif color == 'magenta':
set_led(0, 1, 0)
elif color == 'cyan':
set_led(1, 0, 0)
elif color == 'white':
set_led(0, 0, 0)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=False)
| apache-2.0 | -3,130,640,940,812,404,000 | 23.724638 | 80 | 0.575029 | false | 2.857621 | false | false | false |
shenfei/oj_codes | leetcode/python/n146_LRU_Cache.py | 1 | 1711 | class ListNode:
def __init__(self, x):
self.val = x
self.pre = None
self.next = None
class LRUCache:
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.size = 0
self.head = ListNode(0)
self.tail = ListNode(0)
self.head.next = self.tail
self.tail.pre = self.head
self.data = dict()
self.lru = dict()
def insert_head(self, node):
node.pre = self.head
node.next = self.head.next
node.next.pre = node
self.head.next = node
def move_head(self, key):
node = self.lru[key]
node.pre.next = node.next
node.next.pre = node.pre
self.insert_head(node)
def delete(self):
node = self.tail.pre
node.pre.next = self.tail
self.tail.pre = node.pre
key = node.val
del self.data[key]
del self.lru[key]
del node
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key in self.data:
self.move_head(key)
return self.data[key]
else:
return -1
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if key in self.data:
self.data[key] = value
self.move_head(key)
else:
self.data[key] = value
node = ListNode(key)
self.lru[key] = node
if self.size == self.capacity:
self.delete()
self.size -= 1
self.insert_head(node)
self.size += 1
| mit | -1,529,732,343,830,840,600 | 22.763889 | 42 | 0.474576 | false | 3.785398 | false | false | false |
hsiegel/postsai-commitstop | permissions/response.py | 1 | 1942 | # The MIT License (MIT)
# Copyright (c) 2016-2017 HIS e. G.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import json
import datetime
def ret200(cause):
retHttp(200,cause)
def ret403(cause):
retHttp(403,cause)
def ret400(cause):
retHttp(400,cause)
def retHttp(status, cause):
print("Status: " + str(status) + " Ok\r")
print("Content-Type: text/plain; charset='utf-8'\r")
print("\r")
print(cause)
def retJson(data):
""" send a JSON onject to the client """
print("Status: 200 Ok\r")
#print("Content-Type: text/plain; charset='utf-8'\r")
print("Content-Type: application/json; charset='utf-8'\r")
print("\r")
date_handler = lambda obj: (
obj.isoformat()
if isinstance(obj, datetime.datetime)
or isinstance(obj, datetime.date)
else None
)
jsonString = json.dumps(data, default=date_handler)
print(jsonString)
| mit | -522,409,232,732,326,500 | 31.366667 | 77 | 0.705458 | false | 3.907445 | false | false | false |
tntnatbry/tensorflow | tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py | 81 | 1492 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for creating Stochastic Tensors.
See the @{$python/contrib.bayesflow.stochastic_tensor} guide.
@@BaseStochasticTensor
@@StochasticTensor
@@MeanValue
@@SampleValue
@@value_type
@@get_current_value_type
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.bayesflow.python.ops.stochastic_tensor_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"BaseStochasticTensor",
"StochasticTensor",
"ObservedStochasticTensor",
"MeanValue",
"SampleValue",
"value_type",
"get_current_value_type",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | 8,521,956,410,367,352,000 | 30.083333 | 80 | 0.714477 | false | 3.968085 | false | false | false |
georgenicolaou/nfi | AndroidMisc/LockSettingsSaltOld.py | 2 | 3222 | '''
NFI -- Silensec's Nyuki Forensics Investigator
Copyright (C) 2014 George Nicolaou (george[at]silensec[dot]com)
Silensec Ltd.
This file is part of Nyuki Forensics Investigator (NFI).
NFI is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
NFI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with NFI. If not, see <http://www.gnu.org/licenses/>.
'''
from IMiscSource import IMiscSource, KnownFile, ParserType, KnownFieldSQL, FieldType
from IMiscSource import KnownField, KnownFieldBin, BinaryClass, BinaryRead
from IMiscSource import KnownFieldXML, ReadTypeXML, Label
from Catalog import Catalog
import ConvertUtils
PASSWORD_QUALITY = {
0: "PASSWORD_QUALITY_UNSPECIFIED",
0x8000: "PASSWORD_QUALITY_BIOMETRIC_WEAK",
0x10000: "PASSWORD_QUALITY_SOMETHING",
0x20000: "PASSWORD_QUALITY_NUMERIC",
0x40000: "PASSWORD_QUALITY_ALPHABETIC",
0x50000: "PASSWORD_QUALITY_ALPHANUMERIC",
0x60000: "PASSWORD_QUALITY_COMPLEX"
}
def password_type_tostr(val):
try:
val = int(val)
if val in PASSWORD_QUALITY:
return PASSWORD_QUALITY[val]
except:
pass
return "Unknown"
class LockSettings(IMiscSource):
version = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]
catalog_id = Catalog.CATALOG_DEVINFO
title = Label( "Screen Lock", "screen_lock" )
relative_directories = [ "data", "com.android.providers.settings",
"databases" ]
knownfiles = {
"settings.db": KnownFile(ParserType.TYPE_SQLITE3,
{
Label("Lock Settings", "lock_settings"): [
KnownFieldSQL( FieldType.TYPE_STR,
"""
SELECT value FROM
secure
WHERE name = 'lock_screen_owner_info'
""",
"Owner Info",
"lock_screen_owner_info",
),
KnownFieldSQL( FieldType.TYPE_STR,
"""
SELECT value FROM
secure
WHERE name = 'lockscreen.password_salt'
""",
"Password Salt",
"password_salt",
),
KnownFieldSQL( FieldType.TYPE_STR,
"""
SELECT value FROM
locksettings
WHERE name = 'lockscreen.password_type'
""",
"Password Quality",
"lockscreen.password_type",
converter=password_type_tostr
),
]
}
)
} | gpl-3.0 | 4,246,504,954,763,836,400 | 35.625 | 84 | 0.555556 | false | 4.27321 | false | false | false |
franek/weboob | modules/freemobile/pages/homepage.py | 3 | 2080 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.bill import Subscription
from weboob.tools.browser import BasePage
__all__ = ['HomePage']
class HomePage(BasePage):
def on_loaded(self):
pass
def get_list(self):
for divglobal in self.document.xpath('//div[@class="abonne"]'):
for link in divglobal.xpath('.//div[@class="acceuil_btn"]/a'):
login = link.attrib['href'].split('=').pop()
if login.isdigit():
break
divabo = divglobal.xpath('div[@class="idAbonne pointer"]')[0]
owner = unicode(divabo.xpath('p')[0].text.replace(' - ', ''))
phone = unicode(divabo.xpath('p/span')[0].text)
self.browser.logger.debug('Found ' + login + ' as subscription identifier')
self.browser.logger.debug('Found ' + owner + ' as subscriber')
self.browser.logger.debug('Found ' + phone + ' as phone number')
phoneplan = unicode(self.document.xpath('//div[@class="forfaitChoisi"]')[0].text.lstrip().rstrip())
self.browser.logger.debug('Found ' + phoneplan + ' as subscription type')
subscription = Subscription(phone)
subscription.label = phone + ' - ' + phoneplan
subscription.subscriber = owner
subscription._login = login
yield subscription
| agpl-3.0 | 7,789,149,495,777,014,000 | 39.784314 | 111 | 0.642308 | false | 4.086444 | false | false | false |
googleinterns/schemaorg-generator | protogenerator/main.py | 1 | 2484 | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import core.schema_generator as schema_generator
import urllib.request
import time
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-s',
'--SRC',
type=str,
help='Path to source file')
group.add_argument('-v',
'--VER',
type=str,
help='Schema.org release number')
parser.add_argument('-o',
'--OUT',
type=str,
help='Path to output directory', required=True)
parser.add_argument('-p',
'--PKG',
type=str,
help='Proto package name', required=True)
def main():
args = parser.parse_args()
src = args.SRC
dest = args.OUT
pkg = args.PKG
ver = args.VER
if dest[-1] != '/':
dest = dest + '/'
if isinstance(src, str):
schema = schema_generator.SchemaGenerator(src)
schema.write_proto(dest, pkg)
else:
url = 'https://raw.githubusercontent.com/schemaorg/schemaorg/master/data/releases/' + \
ver + '/schema.nt'
name = './temp-' + str(int(time.time())) + '.nt'
try:
urllib.request.urlretrieve(url, name)
except urllib.error.HTTPError:
print('Invalid release number or check your internet connection.')
else:
schema = schema_generator.SchemaGenerator(name)
schema.write_proto(dest, pkg)
os.remove(name)
if __name__ == '__main__':
"""Generates protobuf code from a given schema.
Args:
-h, --help Show this help message and exit
-s, --SRC Path to source file
-v, --VER Schema.org release number
-o, --OUT Path to out file
-p, --PKG Proto package name
"""
main()
| apache-2.0 | 7,994,906,288,935,356,000 | 29.666667 | 95 | 0.588969 | false | 4.17479 | false | false | false |
sekikn/incubator-airflow | airflow/providers/google/cloud/example_dags/example_postgres_to_gcs.py | 7 | 1796 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG using PostgresToGoogleCloudStorageOperator.
"""
import os
from airflow import models
from airflow.providers.google.cloud.transfers.postgres_to_gcs import PostgresToGCSOperator
from airflow.utils.dates import days_ago
PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCS_BUCKET = os.environ.get("GCP_GCS_BUCKET_NAME", "postgres_to_gcs_example")
FILENAME = "test_file"
SQL_QUERY = "select * from test_table;"
with models.DAG(
dag_id='example_postgres_to_gcs',
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
tags=['example'],
) as dag:
upload_data = PostgresToGCSOperator(
task_id="get_data", sql=SQL_QUERY, bucket=GCS_BUCKET, filename=FILENAME, gzip=False
)
upload_data_server_side_cursor = PostgresToGCSOperator(
task_id="get_data_with_server_side_cursor",
sql=SQL_QUERY,
bucket=GCS_BUCKET,
filename=FILENAME,
gzip=False,
use_server_side_cursor=True,
)
| apache-2.0 | -8,917,713,997,084,104,000 | 35.653061 | 91 | 0.731626 | false | 3.650407 | false | false | false |
MrSprigster/script.module.python.twitch | resources/lib/twitch/api/v5/videos.py | 1 | 3493 | # -*- encoding: utf-8 -*-
"""
Reference: https://dev.twitch.tv/docs/v5/reference/videos/
Copyright (C) 2016-2018 script.module.python.twitch
This file is part of script.module.python.twitch
SPDX-License-Identifier: GPL-3.0-only
See LICENSES/GPL-3.0-only for more information.
"""
from ... import keys, methods
from ...api.parameters import BroadcastType, Period, Language
from ...queries import V5Query as Qry
from ...queries import HiddenApiQuery as HQry
from ...queries import UploadsQuery as UQry
from ...queries import query
# required scope: none
@query
def by_id(video_id):
q = Qry('videos/{video_id}', use_token=False)
q.add_urlkw(keys.VIDEO_ID, video_id)
return q
# required scope: none
@query
def get_top(limit=10, offset=0, game=None, period=Period.WEEK, broadcast_type=BroadcastType.HIGHLIGHT):
q = Qry('videos/top', use_token=False)
q.add_param(keys.LIMIT, limit, 10)
q.add_param(keys.OFFSET, offset, 0)
q.add_param(keys.GAME, game)
q.add_param(keys.PERIOD, Period.validate(period), Period.WEEK)
q.add_param(keys.BROADCAST_TYPE, BroadcastType.validate(broadcast_type))
return q
# required scope: user_read
@query
def get_followed(limit=10, offset=0, broadcast_type=BroadcastType.HIGHLIGHT):
q = Qry('videos/followed')
q.add_param(keys.LIMIT, limit, 10)
q.add_param(keys.OFFSET, offset, 0)
q.add_param(keys.BROADCAST_TYPE, BroadcastType.validate(broadcast_type))
return q
# required scope: channel_editor
@query
def create(channel_id, title, description=None, game=None, language=None, tag_list=None):
q = Qry('videos/', method=methods.POST)
q.add_param(keys.CHANNEL_ID, channel_id)
q.add_param(keys.TITLE, title)
q.add_param(keys.DESCRIPTION, description)
q.add_param(keys.GAME, game)
if language is not None:
q.add_param(keys.LANGUAGE, Language.validate(language))
q.add_param(keys.TAG_LIST, tag_list)
return q
# required scope: channel_editor
@query
def update(video_id, title=None, description=None, game=None, language=None, tag_list=None):
q = Qry('videos/{video_id}', method=methods.PUT)
q.add_urlkw(keys.VIDEO_ID, video_id)
q.add_param(keys.TITLE, title)
q.add_param(keys.DESCRIPTION, description)
q.add_param(keys.GAME, game)
if language is not None:
q.add_param(keys.LANGUAGE, Language.validate(language))
q.add_param(keys.TAG_LIST, tag_list)
return q
# required scope: channel_editor
@query
def delete(video_id):
q = Qry('videos/{video_id}', method=methods.DELETE)
q.add_urlkw(keys.VIDEO_ID, video_id)
return q
# requires upload token
@query
def upload_part(video_id, part, upload_token, content_length, data):
q = UQry('upload/{video_id}', method=methods.PUT)
q.set_headers({'Content-Length': content_length, 'Content-Type': 'application/octet-stream'})
q.add_urlkw(keys.VIDEO_ID, video_id)
q.add_param(keys.PART, part)
q.add_param(keys.UPLOAD_TOKEN, upload_token)
q.add_bin(data)
return q
# requires upload token
@query
def complete_upload(video_id, upload_token):
q = UQry('upload/{video_id}/complete', method=methods.POST)
q.add_urlkw(keys.VIDEO_ID, video_id)
q.add_param(keys.UPLOAD_TOKEN, upload_token)
return q
# required scope: none
# undocumented / unsupported
@query
def _by_id(video_id, headers={}):
q = HQry('videos/{video_id}', headers=headers, use_token=False)
q.add_urlkw(keys.VIDEO_ID, video_id)
return q
| gpl-3.0 | -441,533,330,065,040,300 | 29.640351 | 103 | 0.696536 | false | 3.06942 | false | false | false |
tum-vision/autonavx_ardrone | ardrone_python/src/example3_trajectory.py | 1 | 1200 | #!/usr/bin/env python
import rospy
import roslib; roslib.load_manifest('ardrone_python')
from std_msgs.msg import Empty
from geometry_msgs.msg import Twist, Vector3
if __name__ == '__main__':
rospy.init_node('example_node', anonymous=True)
# publish commands (send to quadrotor)
pub_velocity = rospy.Publisher('/cmd_vel', Twist)
pub_takeoff = rospy.Publisher('/ardrone/takeoff', Empty)
pub_land = rospy.Publisher('/ardrone/land', Empty)
pub_reset = rospy.Publisher('/ardrone/reset', Empty)
print("ready!")
rospy.sleep(1.0)
print("takeoff..")
pub_takeoff.publish(Empty())
rospy.sleep(5.0)
print("flying forward..")
pub_velocity.publish(Twist(Vector3(0.05,0,0),Vector3(0,0,0)))
rospy.sleep(2.0)
print("turning around yaw axis..")
pub_velocity.publish(Twist(Vector3(0,0,0),Vector3(0,0,1)))
rospy.sleep(2.0)
print("flying forward..")
pub_velocity.publish(Twist(Vector3(0.05,0,0),Vector3(0,0,0)))
rospy.sleep(2.0)
print("stop..")
pub_velocity.publish(Twist(Vector3(0,0,0),Vector3(0,0,0)))
rospy.sleep(5.0)
print("land..")
pub_land.publish(Empty())
print("done!")
| mit | 5,187,995,165,854,732,000 | 27.571429 | 65 | 0.635 | false | 2.962963 | false | true | false |
abhattad4/Digi-Menu | digimenu/digimenu/settings.py | 3 | 2718 | """
Django settings for digimenu project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w%s(j9w3996gp$-djl#(@p@$^5++&)rnkr2n9&1mm_z#o-0t_v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'digimenu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'digimenu.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'digimenu',
'USER': 'root',
'PASSWORD': 'a',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| bsd-3-clause | -2,644,085,489,206,268,400 | 24.641509 | 71 | 0.677704 | false | 3.449239 | false | false | false |
caioserra/apiAdwords | examples/adspygoogle/adwords/v201309/campaign_management/validate_text_ad.py | 2 | 2895 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example shows how to use validateOnly SOAP header.
Tags: CampaignService.mutate
Api: AdWordsOnly
"""
__author__ = '[email protected] (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
from adspygoogle.adwords.AdWordsErrors import AdWordsRequestError
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service with validate only flag enabled.
client.validate_only = True
ad_group_ad_service = client.GetAdGroupAdService(version='v201309')
# Construct operations to add a text ad.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
}
}
}]
ad_group_ad_service.Mutate(operations)
# No error means the request is valid.
# Now let's check an invalid ad using a very long line to trigger an error.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for all astronauts in orbit',
'headline': 'Luxury Cruise to Mars'
}
}
}]
try:
ad_group_ad_service.Mutate(operations)
except AdWordsRequestError, e:
print 'Validation correctly failed with \'%s\'.' % str(e)
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_group_id)
| apache-2.0 | 5,534,271,348,208,787,000 | 30.813187 | 77 | 0.617617 | false | 3.711538 | false | false | false |
janinko/pnc-cli | test/integration/test_environments_api.py | 3 | 2609 | import pytest
from pnc_cli.swagger_client.apis.environments_api import EnvironmentsApi
from test import testutils
import pnc_cli.user_config as uc
@pytest.fixture(scope='function', autouse=True)
def get_envs_api():
global envs_api
envs_api = EnvironmentsApi(uc.user.get_api_client())
def test_get_all_invalid_param():
testutils.assert_raises_typeerror(envs_api, 'get_all')
def test_get_all():
envs = envs_api.get_all(page_index=0, page_size=1000000, sort='', q='').content
assert envs is not None
# def test_create_invalid_param():
# testutils.assert_raises_typeerror(envs_api, 'create_new')
#def test_create_new(new_environment):
# env_ids = [env.id for env in envs_api.get_all(page_size=1000000).content]
# assert new_environment.id in env_ids
def test_get_specific_no_id():
testutils.assert_raises_valueerror(envs_api, 'get_specific', id=None)
def test_get_specific_invalid_param():
testutils.assert_raises_typeerror(envs_api, 'get_specific', id=1)
#def test_get_specific(new_environment):
def test_get_specific():
assert envs_api.get_specific(id=1).content is not None
# def test_update_no_id():
# testutils.assert_raises_valueerror(envs_api, 'update', id=None)
# def test_update_invalid_param():
# testutils.assert_raises_typeerror(envs_api, 'update', id=1)
# environment manipulation is currently disabled in pnc
# def test_update(new_environment):
# randname = testutils.gen_random_name()
# updated_env = environments.create_environment_object(name=randname, system_image_type='VIRTUAL_MACHINE_RAW', description='DOCKER',
# system_image_id=randname)
# envs_api.update(id=new_environment.id, body=updated_env)
# retrieved_env = envs_api.get_specific(new_environment.id).content
# assert (retrieved_env.description == 'DOCKER')
# assert (retrieved_env.name == randname)
# # the following fields are immutable, and should remain unchanged
# assert (retrieved_env.system_image_id == retrieved_env.system_image_id)
# assert (retrieved_env.system_image_type == 'DOCKER_IMAGE')
# def test_delete_no_id():
# testutils.assert_raises_valueerror(envs_api, 'delete', id=None)
# def test_delete_invalid_param():
# testutils.assert_raises_typeerror(envs_api, 'delete', id=1)
# environment manipulation is currently disabled in pnc
# def test_delete(new_environment):
# envs_api.delete(new_environment.id)
# env_ids = [env.id for env in envs_api.get_all(page_size=1000000).content]
# assert new_environment.id not in env_ids
| apache-2.0 | -5,567,619,514,136,862,000 | 32.883117 | 136 | 0.701801 | false | 3.132053 | true | false | false |
alx/torrentflux | TF_BitTornado/BitTornado/BTcrypto.py | 5 | 3202 | # Written by John Hoffman
# based on code by Uoti Urpala
# see LICENSE.txt for license information
from __future__ import generators # for python 2.2
from random import randrange,randint,seed
try:
from os import urandom
except:
seed()
urandom = lambda x: ''.join([chr(randint(0,255)) for i in xrange(x)])
from sha import sha
try:
True
except:
True = 1
False = 0
try:
from Crypto.Cipher import ARC4
CRYPTO_OK = True
except:
CRYPTO_OK = False
KEY_LENGTH = 160
DH_PRIME = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A36210000000000090563
PAD_MAX = 200 # less than protocol maximum, and later assumed to be < 256
DH_BYTES = 96
def bytetonum(x):
return long(x.encode('hex'), 16)
def numtobyte(x):
x = hex(x).lstrip('0x').rstrip('Ll')
x = '0'*(192 - len(x)) + x
return x.decode('hex')
class Crypto:
def __init__(self, initiator, disable_crypto = False):
self.initiator = initiator
self.disable_crypto = disable_crypto
if not disable_crypto and not CRYPTO_OK:
raise NotImplementedError, "attempt to run encryption w/ none installed"
self.privkey = bytetonum(urandom(KEY_LENGTH/8))
self.pubkey = numtobyte(pow(2, self.privkey, DH_PRIME))
self.keylength = DH_BYTES
self._VC_pattern = None
def received_key(self, k):
self.S = numtobyte(pow(bytetonum(k), self.privkey, DH_PRIME))
self.block3a = sha('req1'+self.S).digest()
self.block3bkey = sha('req3'+self.S).digest()
self.block3b = None
def _gen_block3b(self, SKEY):
a = sha('req2'+SKEY).digest()
return ''.join([ chr(ord(a[i])^ord(self.block3bkey[i]))
for i in xrange(20) ])
def test_skey(self, s, SKEY):
block3b = self._gen_block3b(SKEY)
if block3b != s:
return False
self.block3b = block3b
if not self.disable_crypto:
self.set_skey(SKEY)
return True
def set_skey(self, SKEY):
if not self.block3b:
self.block3b = self._gen_block3b(SKEY)
crypta = ARC4.new(sha('keyA'+self.S+SKEY).digest())
cryptb = ARC4.new(sha('keyB'+self.S+SKEY).digest())
if self.initiator:
self.encrypt = crypta.encrypt
self.decrypt = cryptb.decrypt
else:
self.encrypt = cryptb.encrypt
self.decrypt = crypta.decrypt
self.encrypt('x'*1024) # discard first 1024 bytes
self.decrypt('x'*1024)
def VC_pattern(self):
if not self._VC_pattern:
self._VC_pattern = self.decrypt('\x00'*8)
return self._VC_pattern
def read(self, s):
self._read(self.decrypt(s))
def write(self, s):
self._write(self.encrypt(s))
def setrawaccess(self, _read, _write):
self._read = _read
self._write = _write
def padding(self):
return urandom(randrange(PAD_MAX-16)+16)
| gpl-2.0 | -6,187,985,701,630,689,000 | 29.087379 | 205 | 0.597751 | false | 3.202 | false | false | false |
gbenson/i8c | src/i8c/compiler/optimizer.py | 1 | 12705 | # -*- coding: utf-8 -*-
# Copyright (C) 2015-16 Red Hat, Inc.
# This file is part of the Infinity Note Compiler.
#
# The Infinity Note Compiler is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# The Infinity Note Compiler is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infinity Note Compiler. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import logger
from . import operations
from .types import INTTYPE
import inspect
# The primary goal of these optimizations is to reduce the instruction
# count, to aid consumers using interpreters to execute notes. The
# secondary goal of these optimizations is to reduce the size of the
# bytecode in cases where this does not conflict with the primary goal
# of reducing instruction count.
class Optimizer(object):
"""Base class for all optimizers.
"""
def visit_toplevel(self, toplevel):
for node in toplevel.functions:
node.accept(self)
def debug_print_hit(self, location):
if self.debug_print.is_enabled:
optimization = inspect.stack()[1][0].f_code.co_name
for prefix in ("__", "try_"):
if optimization.startswith(prefix):
optimization = optimization[len(prefix):]
self.debug_print("%s: %s\n" % (location.fileline, optimization))
class BlockOptimizer(Optimizer):
"""Optimizations performed before serialization.
"""
debug_print = logger.debug_printer_for("blockopt")
def visit_function(self, function):
self.visited = {}
function.entry_block.accept(self)
self.try_combine_blocks(self.visited.keys())
def try_combine_blocks(self, blocks):
blocks = sorted((block.index, block) for block in blocks)
blocks = [block for index, block in blocks]
while self.__try_combine_blocks(blocks):
pass
def __try_combine_blocks(self, blocks):
for i in range(len(blocks)):
block_1 = blocks[i]
for j in range(i + 1, len(blocks)):
block_2 = blocks[j]
if block_2.is_equivalent_to(block_1):
self.debug_print(
("Blocks #%d and #%d are equivalent, "
+ "removing #%d\n") % (block_1.index,
block_2.index,
block_2.index))
for block in blocks:
block.replace_exit(block_2, block_1)
blocks.remove(block_2)
return True
return False
def visit_basicblock(self, block):
if self.visited.get(block, False):
return
self.visited[block] = True
self.try_all_optimizations(block)
if self.debug_print.is_enabled:
self.debug_print(str(block) + "\n\n")
for block in block.exits:
block.accept(self)
def try_all_optimizations(self, block):
self.try_eliminate_cmp_bra_const_const(block)
self.try_eliminate_lit0_cmp_before_bra(block)
self.try_reverse_branch_exits(block)
self.try_peephole(block, self.try_eliminate_identity_math, 2)
self.try_peephole(block, self.try_use_plus_uconst, 2)
def __tecbcc_helper(self, block):
"""Helper for try_eliminate_cmp_bra_const_const.
"""
if len(block.entries) != 1:
return
if len(block.ops) < 2:
return
if not block.ops[0].is_load_constant:
return
constant = block.ops[0]
if constant.type.basetype != INTTYPE:
return
return constant.value
def try_eliminate_cmp_bra_const_const(self, block):
"""Optimize cases where the blocks following a conditional
branch load the constants that the comparison pushed to the
stack.
This is relevant for libpthread notes. All the libthread_db
functions that the libpthread notes replace return a td_err_e
error code defined as:
typedef enum {
TD_OK, /* No error. */
TD_ERR, /* General error. */
... /* Specific errors. */
} td_err_e;
Some libthread_db functions call proc_service functions which
return a similar ps_err_e error code:
typedef enum {
PS_OK, /* No error. */
PS_ERR, /* General error. */
... /* Specific errors. */
} ps_err_e;
Note that TD_OK == PS_OK == 0 and TD_ERR == PS_ERR == 1.
This optimizer replaces code of the following pattern:
call /* Some proc_service function. */
load PS_OK /* == 0 */
bne fail
load TD_OK /* == 0 */
return
fail:
load TD_ERR /* == 1 */
return
With this:
call /* Some proc_service function. */
load PS_OK
ne
"""
# Does the block end with "comparison, branch"?
if len(block.ops) < 2:
return
if not block.ops[-1].is_branch:
return
if not block.ops[-2].is_comparison:
return
# Do the successors start with "const 0" and "const 1"?
constants = list(map(
self.__tecbcc_helper, (block.nobranch_exit,
block.branched_exit)))
if 0 not in constants or 1 not in constants:
return
# Are the successors otherwise the same?
s1, s2 = block.exits
if s1.exits != s2.exits:
return
if len(s1.ops) != len(s2.ops):
return
for op1, op2 in list(zip(s1.ops, s2.ops))[1:-1]:
if not op1.is_equivalent_to(op2):
return
self.debug_print_hit(block.ops[-1])
# Reverse the comparison if necessary
if constants == [1, 0]:
block.ops[-2].reverse()
# Lose one of the successor blocks (doesn't matter which)
dead_block = block.exits.pop()
dead_block.entries.remove(block)
assert not dead_block.entries
# Reduce the branch to a goto
block.ops[-1] = operations.SyntheticGoto(block.ops[-1])
# Move the the remaining successor and drop the ConstOp.
# This messes with the types a bit (what was an INTTYPE
# is now a BOOLTYPE) but that doesn't matter once it's
# bytecode.
[block] = block.exits
removed_op = block.ops.pop(0)
assert removed_op.is_load_constant
def try_eliminate_lit0_cmp_before_bra(self, block):
# Does the block end with "load 0, {eq,ne}, branch"?
if len(block.ops) < 3:
return
if not block.ops[-1].is_branch:
return
if not block.ops[-2].is_comparison:
return
if block.ops[-2].dwarfname not in ("eq", "ne"):
return
if not block.ops[-3].is_load_constant:
return
if block.ops[-3].value != 0:
return
self.debug_print_hit(block.ops[-2])
# Reverse the branch if necessary
if block.ops[-2].dwarfname == "eq":
block.exits.reverse()
# Remove the load and the comparison
removed_op = block.ops.pop(-3)
assert removed_op.is_load_constant
removed_op = block.ops.pop(-2)
assert removed_op.is_comparison
def try_reverse_branch_exits(self, block):
# Does the block end with "compare, branch"?
if len(block.ops) < 2:
return
if not block.ops[-1].is_branch:
return
if not block.ops[-2].is_comparison:
return
# Does the nobranch case immediately jump somewhere?
tmp = block.nobranch_exit.first_op
if not (tmp.is_goto or tmp.is_return):
return
# Does the branch case NOT immediately jump somewhere?
tmp = block.branched_exit.first_op
if tmp.is_goto or tmp.is_return:
return
self.debug_print_hit(block.ops[-2])
# Reverse both the comparison and the branch
block.ops[-2].reverse()
block.exits.reverse()
def try_peephole(self, block, action, size):
start = 0
while True:
start = self.__try_peephole(block, action, size)
if start is None:
break
def __try_peephole(self, block, action, size):
"""Helper for try_peephole.
"""
for index in range(len(block.ops) - size):
if action(block, index):
return index
IDENTITIES = {
"plus": 0, "minus": 0, "mul": 1,
"div": 1, "shl": 0, "shr": 0,
"shra": 0, "or": 0, "xor": 0}
def try_eliminate_identity_math(self, block, index):
if not block.ops[index].is_load_constant:
return False
opname = getattr(block.ops[index + 1], "dwarfname", None)
if opname is None:
return False
identity = self.IDENTITIES.get(opname, None)
if identity is None:
return False
if block.ops[index].value != identity:
return False
self.debug_print_hit(block.ops[index + 1])
# Remove the operations
removed_op = block.ops.pop(index + 1)
assert removed_op.dwarfname == opname
removed_op = block.ops.pop(index)
assert removed_op.is_load_constant
return True
def try_use_plus_uconst(self, block, index):
if not block.ops[index].is_load_constant:
return False
if block.ops[index].value < 0:
return False
if not block.ops[index + 1].is_add:
return False
self.debug_print_hit(block.ops[index])
# Insert the plus_uconst
block.ops[index] = operations.PlusUConst(block.ops[index])
# Remove the add
removed_op = block.ops.pop(index + 1)
assert removed_op.is_add
return True
class StreamOptimizer(Optimizer):
"""Optimizations performed after serialization.
"""
debug_print = logger.debug_printer_for("streamopt")
def debug_print_stream(self, stream):
self.debug_print("%s\n" % stream)
def visit_function(self, function):
function.ops.accept(self)
def visit_operationstream(self, stream):
while True:
if self.try_remove_multijump(stream):
continue
if self.try_remove_goto_next(stream):
continue
if self.try_remove_unreachable_code(stream):
continue
break
def try_remove_multijump(self, stream):
for index, op in stream.items():
target = stream.jumps.get(op, None)
if target is None:
continue
if not target.is_goto:
continue
self.debug_print_hit(op)
stream.retarget_jump(op, stream.jumps[target])
self.debug_print_stream(stream)
return True
return False
def try_remove_goto_next(self, stream):
for index, op in stream.items():
if index + 1 == len(stream.ops):
continue
if not op.is_goto:
continue
if stream.labels.get(op, None) is not None:
continue
if stream.jumps[op] is not stream.ops[index + 1]:
continue
self.debug_print_hit(op)
stream.remove_by_index_op(index, op)
self.debug_print_stream(stream)
return True
return False
def try_remove_unreachable_code(self, stream):
last_was_goto = False
for index, op in stream.items():
if last_was_goto:
if stream.labels.get(op, None) is None:
self.debug_print_hit(op)
stream.remove_by_index_op(index, op)
self.debug_print_stream(stream)
return True
last_was_goto = op.is_goto
return False
| lgpl-2.1 | -8,282,089,333,127,545,000 | 32.346457 | 76 | 0.564502 | false | 4.000315 | false | false | false |
faunalia/rt_geosisma_offline | DlgSelectRequest.py | 1 | 5159 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : RT Geosisma Offline
Description : Geosisma Offline Plugin
Date : October 21, 2011
copyright : (C) 2013 by Luigi Pirelli (Faunalia)
email : [email protected]
***************************************************************************/
Works done from Faunalia (http://www.faunalia.it) with funding from Regione
Toscana - Servizio Sismico (http://www.rete.toscana.it/sett/pta/sismica/)
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from collections import OrderedDict
from dlgSelectRequest_ui import Ui_Dialog
from ArchiveManager import ArchiveManager
class DlgSelectRequest(QDialog, Ui_Dialog):
# signals
loadRequestsDone = pyqtSignal()
loadTableDone = pyqtSignal()
def __init__(self, currentRequestId=None, parent=None):
QDialog.__init__(self, parent)
self.currentRequestId = currentRequestId
self.currentRequest = None
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Close).setText(self.tr("Ignora"))
self.loadRequestsDone.connect(self.updateButtonsState)
self.loadRequestsDone.connect(self.loadTable)
self.requestsTableWidget.itemSelectionChanged.connect(self.updateButtonsState)
self.loadTableDone.connect(self.selectCurrentRequest)
self.buttonBox.button(QDialogButtonBox.Ok).clicked.connect(self.setCurrentRequest)
self.loadRequests()
self.loadTable()
def loadRequests(self):
self.records = ArchiveManager.instance().loadRequests()
self.loadRequestsDone.emit()
def loadTable(self):
self.requestsTableWidget.setSortingEnabled(True)
# organize colums
Hide = True
Show = False
columns = OrderedDict()
columns['id'] = ( self.tr(u'id'), Show )
columns['event_id'] = ( self.tr(u'Evento'), Show )
columns['s1prov'] = ( self.tr(u'Provincia'), Show )
columns['s1com'] = ( self.tr(u'Comune'), Show )
columns['s1loc'] = ( self.tr(u'Localitá'), Show )
columns['s1via'] = ( self.tr(u'Via'), Show )
columns['s1civico'] = ( self.tr(u'Civico'), Show )
columns['s1catpart1'] = ( self.tr(u'Particella'), Show )
columns['s1catfoglio'] = ( self.tr(u'Foglio'), Show )
columns['created'] = ( self.tr(u'Data di creazione'), Show )
columns['number'] = ( self.tr(u'Squadra'), Show )
columns['team_id'] = ( self.tr(u'Id della Squadra'), Hide )
columns['s1name'] = ( self.tr(u'Richiesto da'), Show )
# set table size
self.requestsTableWidget.clear()
self.requestsTableWidget.setRowCount( len(self.records) )
self.requestsTableWidget.setColumnCount( len(columns) )
# resizing mode of column
header = self.requestsTableWidget.horizontalHeader()
header.setResizeMode(QHeaderView.ResizeToContents)
# fill tha table
self.requestsTableWidget.setHorizontalHeaderLabels( [val[0] for val in columns.values()] )
for row, record in enumerate(self.records):
for column, columnKey in enumerate(columns.keys()):
item = QTableWidgetItem()
try:
value = int(record[columnKey])
except:
value = str(record[columnKey])
item.setData(Qt.DisplayRole, value)
# add record in the first "id" colum
if columnKey == "id":
item.setData(Qt.UserRole, record)
self.requestsTableWidget.setItem(row, column, item )
# column to be shown
for index, key in enumerate(columns):
self.requestsTableWidget.setColumnHidden(index, columns[key][1])
self.loadTableDone.emit()
def selectCurrentRequest(self):
if self.currentRequestId is None:
return
for row in range( self.requestsTableWidget.rowCount() ):
item = self.requestsTableWidget.item(row, 0)
if str(self.currentRequestId) == item.text():
self.requestsTableWidget.selectRow(row)
break
def setCurrentRequest(self):
selectedItems = self.requestsTableWidget.selectedItems()
if len(selectedItems) == 0:
self.currentRequestId = None
self.currentRequest = None
return
# assume that only one row is selected => get row from an element
row = selectedItems[0].row()
item = self.requestsTableWidget.item(row, 0) # assume id is the first column
self.currentRequestId = item.text()
self.currentRequest = item.data(Qt.UserRole)
def updateButtonsState(self):
if len(self.records) > 0:
enabled = True
if len(self.requestsTableWidget.selectedItems()) == 0:
enabled = False
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enabled)
| gpl-3.0 | 1,876,209,328,122,207,200 | 34.572414 | 92 | 0.638232 | false | 3.511232 | false | false | false |
DEV3L/python-behave-sandbox | class_variable_test.py | 1 | 1168 | """
This file helped clarify for me how Python handles class and instance variables.
Instances of a class refer to the class value until modified on the instance
This file can simply be ran from the console:
python class_variable_test.py
"""
class SomeClass():
a_field = None
def main():
print("classfield: " + str(SomeClass.a_field)) # None
instanceOne = SomeClass()
print("instance_field_1: " + str(instanceOne.a_field)) # None
instanceOne.a_field = "One"
print("instance_field_1: " + instanceOne.a_field) # One
print("classfield: " + str(SomeClass.a_field)) # None
SomeClass.a_field = "Classfield"
print("classfield: " + SomeClass.a_field) # Classfield
print("instance_field_1: " + instanceOne.a_field) # One
instanceTwo = SomeClass()
print("instance_field_2: " + instanceTwo.a_field) # Classfield
SomeClass.a_field = "Classfield_Other"
print("instance_field_2: " + instanceTwo.a_field) # Classfield_Other
instanceTwo.a_field = "Two"
print("instance_field_2: " + instanceTwo.a_field) # Two
print("classfield: " + SomeClass.a_field) # Classfield
if __name__ == '__main__':
main()
| mit | 4,065,829,938,781,323,000 | 28.2 | 80 | 0.669521 | false | 3.337143 | false | false | false |
clubadm/clubadm | clubadm/migrations/0001_initial.py | 1 | 5914 | from django.db import migrations, models
from django.db.models import deletion
from django.utils import timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name="User",
fields=[
("id", models.AutoField(
auto_created=True, primary_key=True, serialize=False,
verbose_name="ID")),
("username", models.CharField(
max_length=25, unique=True,
verbose_name="имя пользователя")),
("access_token", models.CharField(
blank=True, max_length=40, verbose_name="токен доступа")),
("is_oldfag", models.BooleanField(
default=False, verbose_name="старый участник",
help_text="Отметьте, чтобы снять ограничение кармы.")),
("is_banned", models.BooleanField(
default=False, verbose_name="забанен")),
("first_login", models.DateTimeField(
default=timezone.now, verbose_name="первый вход")),
("last_login", models.DateTimeField(
blank=True, null=True, verbose_name="последний вход")),
],
options={
"verbose_name": "пользователь",
"verbose_name_plural": "пользователи",
"ordering": ["username"],
},
),
migrations.CreateModel(
name="Mail",
fields=[
("id", models.AutoField(
auto_created=True, primary_key=True, serialize=False,
verbose_name="ID")),
("body", models.TextField(max_length=400)),
("send_date", models.DateTimeField(
db_index=True, default=timezone.now)),
("read_date", models.DateTimeField(
blank=True, db_index=True, null=True)),
],
options={
"ordering": ["send_date"],
},
),
migrations.CreateModel(
name="Member",
fields=[
("id", models.AutoField(
auto_created=True, primary_key=True, serialize=False,
verbose_name="ID")),
("fullname", models.CharField(
max_length=80, verbose_name="полное имя")),
("postcode", models.CharField(
max_length=20, verbose_name="индекс")),
("address", models.TextField(
max_length=200, verbose_name="адрес")),
("gift_sent", models.DateTimeField(
blank=True, db_index=True, null=True,
verbose_name="подарок отправлен")),
("gift_received", models.DateTimeField(
blank=True, db_index=True, null=True,
verbose_name="подарок получен")),
("giftee", models.OneToOneField(
blank=True, null=True, on_delete=deletion.CASCADE,
related_name="santa", to="clubadm.Member",
verbose_name="получатель подарка")),
],
options={
"verbose_name": "участник",
"verbose_name_plural": "участники",
"ordering": ["season", "fullname"],
},
),
migrations.CreateModel(
name="Season",
fields=[
("year", models.IntegerField(
primary_key=True, serialize=False, verbose_name="год")),
("gallery", models.URLField(
blank=True, verbose_name="пост хвастовства подарками")),
("signups_start", models.DateField(
verbose_name="начало регистрации")),
("signups_end", models.DateField(
verbose_name="жеребьевка адресов")),
("ship_by", models.DateField(
help_text="После этой даты сезон закрывается и уходит вархив.",
verbose_name="последний срок отправки подарка")),
],
options={
"verbose_name": "сезон",
"verbose_name_plural": "сезоны",
"ordering": ["year"],
"get_latest_by": "year",
},
),
migrations.AddField(
model_name="member",
name="season",
field=models.ForeignKey(
on_delete=deletion.CASCADE, to="clubadm.Season",
verbose_name="сезон"),
),
migrations.AddField(
model_name="member",
name="user",
field=models.ForeignKey(
on_delete=deletion.CASCADE, to="clubadm.User",
verbose_name="пользователь"),
),
migrations.AddField(
model_name="mail",
name="recipient",
field=models.ForeignKey(
on_delete=deletion.CASCADE, related_name="+",
to="clubadm.Member"),
),
migrations.AddField(
model_name="mail",
name="sender",
field=models.ForeignKey(
on_delete=deletion.CASCADE, related_name="+",
to="clubadm.Member"),
),
migrations.AlterUniqueTogether(
name="member",
unique_together=set([("user", "season")]),
),
]
| mit | 5,740,575,148,904,608,000 | 38.870504 | 83 | 0.481776 | false | 4.069016 | false | false | false |
belemizz/mimic2_tools | clinical_db/patient_statistics.py | 1 | 5359 | '''Statistics of mimic2 database'''
from get_sample import Mimic2
from mutil import Graph
import numpy as np
from matplotlib.pyplot import waitforbuttonpress
mimic2 = Mimic2()
graph = Graph()
def readmission_statisic():
l_id = mimic2.subject_with_chf(max_seq=1)
total_admission = 0
alive_on_disch = 0
death_on_disch = 0
readm_and_death_within_th = 0
readm_and_no_death_within_th = 0
no_readm_death_within_th = 0
no_readm_no_death_within_th = 0
duration_th = 30
readm_after_th = 0
no_readm_after_th = 0
l_adm_duration = []
l_readm_duration = []
l_death_duration = []
l_n_admission = []
for id in l_id:
subject = mimic2.patient(id)
death_dt = subject[0][3]
admissions = mimic2.admission(id)
l_n_admission.append(len(admissions))
total_admission += len(admissions)
for idx, adm in enumerate(admissions):
admit_dt = admissions[idx][2]
disch_dt = admissions[idx][3]
adm_duratrion = (disch_dt - admit_dt).days
l_adm_duration.append(adm_duratrion)
if death_dt is not None:
death_duration = (death_dt - disch_dt).days
else:
death_duration = np.inf
l_death_duration.append(death_duration)
if idx < len(admissions) - 1:
next_adm_dt = admissions[idx + 1][2]
readm_duration = (next_adm_dt - disch_dt).days
else:
readm_duration = np.inf
l_readm_duration.append(readm_duration)
# counter
if death_duration < 1:
death_on_disch += 1
else:
alive_on_disch += 1
if death_duration <= duration_th and readm_duration <= duration_th:
readm_and_death_within_th += 1
elif death_duration > duration_th and readm_duration <= duration_th:
readm_and_no_death_within_th += 1
elif death_duration <= duration_th and readm_duration > duration_th:
no_readm_death_within_th += 1
else:
no_readm_no_death_within_th += 1
if readm_duration is np.inf:
no_readm_after_th += 1
else:
readm_after_th += 1
n_subject = len(l_n_admission)
l_death_or_readm_duration = []
for idx in range(len(l_readm_duration)):
l_death_or_readm_duration.append(min(l_death_duration[idx], l_readm_duration[idx]))
print "Total subject: %d" % n_subject
print "Total admission: %d" % total_admission
print "Mean Admission Length: %f" % np.mean(l_adm_duration)
print "Median Admission Length: %f" % np.median(l_adm_duration)
print "Death discharge: %d" % death_on_disch
print "Alive discharge: %d" % alive_on_disch
print "__Within %d days__" % duration_th
print "Readm / Death: %d" % readm_and_death_within_th
print "Readm / no Death: %d" % readm_and_no_death_within_th
print "no Readm / Death: %d" % no_readm_death_within_th
print "no Readm / no Death: %d" % no_readm_no_death_within_th
print "__After %d days__" % duration_th
print "Readm: %d" % readm_after_th
print "No Readm: %d" % no_readm_after_th
print "Histogram of #admissions per subject"
hist, bins = np.histogram(l_adm_duration, bins=range(0, 32))
graph.bar_histogram(hist, bins, "Number of Patients", "Admission Duration", True)
print "Histogram of #admissions per subject"
hist, bins = np.histogram(l_n_admission, bins=range(1, max(l_n_admission) + 1))
graph.bar_histogram(hist, bins, "Number of Patients", "Recorded admissions per patient", True)
print "Histogram of readmission duration"
hist, bins = np.histogram(l_readm_duration, bins=range(1, 602, 30))
graph.bar_histogram(hist, bins, "Number of readmissions",
"Duration between discharge and readmission", False)
hist, bins = np.histogram(l_readm_duration, bins=range(1, 32, 1))
graph.bar_histogram(hist, bins, "Number of readmissions",
"Duration between discharge and readmission", True)
print "Histogram of death duration"
hist, bins = np.histogram(l_death_duration, bins=range(1, 601, 30))
graph.bar_histogram(hist, bins, "Number of deaths",
"Duration between discharge and death", False)
hist, bins = np.histogram(l_death_duration, bins=range(1, 32, 1))
graph.bar_histogram(hist, bins, "Number of readmissions",
"Duration between discharge and death", True)
print "Histogram of death or readdm duration"
hist, bins = np.histogram(l_death_or_readm_duration, bins=range(1, 602, 30))
graph.bar_histogram(hist, bins, "Number of deaths",
"Duration between discharge and death or readmission", False,
filename="DorR_600")
hist, bins = np.histogram(l_death_or_readm_duration, bins=range(1, 32, 1))
graph.bar_histogram(hist, bins, "Number of readmissions",
"Duration between discharge and death or readmission", True,
filename="DorR_30")
if __name__ == '__main__':
readmission_statisic()
waitforbuttonpress()
| mit | 6,756,680,952,525,994,000 | 37.553957 | 98 | 0.594514 | false | 3.381073 | false | false | false |
ptitjes/quodlibet | tests/test_metadata.py | 2 | 4043 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from tests import mkstemp, TestCase, get_data_path
import os
from quodlibet import formats
from quodlibet import config
from shutil import copyfileobj
class TestMetaDataBase(TestCase):
base = get_data_path("silence-44-s")
def setUp(self):
"""Copy the base silent file to a temp name/location and load it"""
config.init()
fd, self.filename = mkstemp(suffix=self.ext, text=False)
dst = os.fdopen(fd, 'wb')
src = open(self.base + self.ext, 'rb')
copyfileobj(src, dst)
dst.close()
self.song = formats.MusicFile(self.filename)
def tearDown(self):
"""Delete the temp file"""
os.remove(self.filename)
del self.filename
del self.song
config.quit()
class _TestMetaDataMixin(object):
def test_base_data(self):
self.failUnlessEqual(self.song['artist'], 'piman\njzig')
self.failUnlessEqual(self.song['album'], 'Quod Libet Test Data')
self.failUnlessEqual(self.song['title'], 'Silence')
def test_mutability(self):
self.failIf(self.song.can_change('=foo'))
self.failIf(self.song.can_change('foo~bar'))
self.failUnless(self.song.can_change('artist'))
self.failUnless(self.song.can_change('title'))
self.failUnless(self.song.can_change('tracknumber'))
self.failUnless(self.song.can_change('somebadtag'))
self.failUnless(self.song.can_change('some%punctuated:tag.'))
def _test_tag(self, tag, values, remove=True):
self.failUnless(self.song.can_change(tag))
for value in values:
self.song[tag] = value
self.song.write()
written = formats.MusicFile(self.filename)
self.failUnlessEqual(written[tag], value)
if remove:
del self.song[tag]
self.song.write()
deleted = formats.MusicFile(self.filename)
self.failIf(tag in deleted)
def test_artist(self): # a normalish tag
self._test_tag('artist', [u'me', u'you\nme',
u'\u6d5c\u5d0e\u3042\u3086\u307f'])
def test_date(self): # unusual special handling for mp3s
self._test_tag('date', [u'2004', u'2005', u'2005-06-12'], False)
def test_genre(self): # unusual special handling for mp3s
self._test_tag('genre', [u'Pop', u'Rock\nClassical', u'Big Bird',
u'\u30a2\u30cb\u30e1\u30b5\u30f3\u30c8\u30e9'])
def test_odd_performer(self):
values = [u"A Person", u"Another"]
self._test_tag("performer:vocals", values)
self._test_tag("performer:guitar", values)
def test_wackjob(self): # undefined tag
self._test_tag('wackjob', [u'Jelly\nDanish', u'Muppet',
u'\u30cf\u30f3\u30d0\u30fc\u30ac\u30fc'])
tags = ['album', 'arranger', 'artist', 'author', 'comment', 'composer',
'conductor', 'copyright', 'discnumber', 'encodedby', 'genre', 'isrc',
'language', 'license', 'lyricist', 'organization', 'performer', 'title',
'tracknumber', 'version', 'xyzzy_undefined_tag', 'musicbrainz_trackid',
'releasecountry']
for ext in formats.loaders.keys():
if os.path.exists(TestMetaDataBase.base + ext):
extra_tests = {}
for tag in tags:
if tag in ['artist', 'date', 'genre']:
continue
def _test_tag(self, tag=tag):
self._test_tag(tag, [u'a'])
extra_tests['test_tag_' + tag] = _test_tag
def _test_tags(self, tag=tag):
self._test_tag(tag, [u'b\nc'])
extra_tests['test_tags_' + tag] = _test_tags
name = 'MetaData' + ext
testcase = type(
name, (TestMetaDataBase, _TestMetaDataMixin), extra_tests)
testcase.ext = ext
globals()[name] = testcase
| gpl-2.0 | -5,440,857,690,750,559,000 | 34.464912 | 75 | 0.61118 | false | 3.411814 | true | false | false |
familonet/nexmo-download-link | handler.py | 1 | 10979 | #!/usr/bin/env python
# coding=UTF-8
# Title: handler.py
# Description: This file contains all tornado.web.RequestHandler classes used in this application
# Author David Nellessen <[email protected]>
# Date: 12.01.15
# Note:
# ==============================================================================
# Import modules
from tornado import web, gen, escape
from tornado.escape import utf8
import logging
import phonenumbers
import pygeoip
from tornado.iostream import StreamClosedError
class BaseHandler(web.RequestHandler):
"""
A base handler providing localization features, phone number validation
and formation as well as use of service limitation based on IP addresses.
It also implements support for JSONP (for cross-domain requests).
"""
guess_country = True
default_country = 'DE'
def __init__(self, application, request, **kwargs):
super(BaseHandler, self).__init__(application, request, **kwargs)
self.counter = {}
def write(self, chunk):
"""
Overwrites the default write method to support tornado.webJSONP.
"""
if self._finished:
raise RuntimeError("Cannot write() after finish(). May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
callback = self.get_argument('callback', None)
if callback:
chunk = callback + '(' + chunk + ');'
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def get_browser_locale_code(self):
"""
Determines the user's locale from ``Accept-Language`` header.
This is similar to tornado.web.get_browser_locale except it
returns the code and not a Locale instance. Also this will return
a result weather a translation for this language was loaded or not.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
logging.debug(locales)
codes = [l[0] for l in locales]
return codes[0]
return self.__class__.default_country
def get_user_country_by_ip(self):
"""
Determines the user's country by his IP-address. This will return
the country code or None if not found.
"""
try:
country = self.application.geo_ip.country_code_by_addr(
self.request.remote_ip)
except pygeoip.GeoIPError:
try:
country = self.application.geo_ipv6.country_code_by_addr(
self.request.remote_ip)
except pygeoip.GeoIPError:
pass
if not country:
logging.warning('Could not locate country for ' + self.request.remote_ip)
return None
else:
logging.debug('Determined country by IP address: ' + country)
return country
def parse_phonenumber(self, number):
"""
Validates and parses a phonenumber. It will return a
phone number object or False if parsing failed.
If the phone number is not given in full international notion the
parameter the country will be guesses if the class attribute guess_country
is True. Guessing will be done as follows:
1. If a query string parameter 'country' is given as a country code
(i.e. 'US', 'DE', ...) it will be used.
2. If no parameter country is given the country will be determined by
the remote IP address.
3. Otherwise the country determined by the request header
Accept-Language will be used.
4. As a fall-back the classes default_country attribute will be used.
"""
try:
return phonenumbers.parse(number)
except:
# Get the country code to use for phone number parsing.
if self.__class__.guess_country:
country_code = self.get_argument('country', None)
if country_code == None:
country_code = self.get_user_country_by_ip()
if country_code == None:
code = self.get_browser_locale_code().replace('-', '_')
parts = code.split('_')
if len(parts) > 1: country_code = parts[1]
if country_code == None: country_code = self.__class__.default_country
country_code = country_code.upper()
logging.debug("Final country code: " + country_code)
else:
country_code = self.__class__.default_country
# Parse the phone number into international notion.
try:
number_parsed = phonenumbers.parse(number, country_code)
return number_parsed
except:
return False
@gen.coroutine
def limit_call(self, chash=None, amount=2, expire=10):
"""
Use this function to limit user requests. Returns True if this function
was called less then 'amount' times in the last 'expire' seconds with
the same value 'chash' and the same remote IP address or False
otherwise.
"""
key = 'limit_call_' + chash + '_' + self.request.remote_ip
redis = self.application.redis
try:
current_value = yield gen.Task(redis.get, key)
except StreamClosedError:
yield gen.Task(self.application.redis_reconnect)
redis = self.application.redis
current_value = yield gen.Task(redis.get, key)
if current_value != None and int(current_value) >= amount:
logging.info('Call Limitation acceded: ' + key)
raise gen.Return(False)
else:
yield gen.Task(redis.incr, key)
if not current_value: yield gen.Task(redis.expire, key, expire)
raise gen.Return(True)
class DLRHandler(web.RequestHandler):
"""
Handles delivery receipts.
"""
def get(self):
"""
All delivery receipts will be send as HTTP-GET requests.
"""
# TODO: Parse request!
logging.info('Received DLR. Not yet parsed though.')
class NumberValidationHandler(BaseHandler):
"""
Validates a phone number.
"""
limit_amount = 10
limit_expires = 3600
@gen.coroutine
def get(self):
"""
Validates a phone number given as the query string parameter 'number'.
If the phone number is not given in full international notion the
parameter the country will be guesses if the class attribute guess_country
is True. Guessing will be done as follows:
1. If a query string parameter 'country' is given as a country code
(i.e. 'US', 'DE', ...) it will be used.
2. If no parameter country is given the country will be determined by
the remote IP address.
3. Otherwise the country determined by the request header
Accept-Language will be used.
4. As a fall-back the classes attribute default_country will be used.
"""
# Limit calls.
if self.limit_amount and not (yield self.limit_call('number_validation', self.limit_amount, self.limit_expires)):
#raise web.HTTPError(403, 'Number Validation request limit acceded')
self.finish({'status': 'error',
'error': 'limit_acceded'})
return
# Decode request's query string parameters.
number = self.get_argument('number', None)
if not number:
self.finish({'status': 'error',
'error': 'number_missing'})
return
logging.debug('Received number {} for validation'.format(number))
numberobj = self.parse_phonenumber(number)
if numberobj:
number = phonenumbers.format_number(numberobj,
phonenumbers.PhoneNumberFormat.INTERNATIONAL)
else: number = False
self.finish({'status': 'ok',
'number': number})
class SimpleMessageHandler(BaseHandler):
message = 'This is an Example Message'
sender = 'Put a sender title or number here'
limit_amount = 10
limit_expires = 3600
@gen.coroutine
def get(self):
# Limit calls.
if self.limit_amount and not (yield (self.limit_call('example_handler', self.limit_amount, self.limit_expires))):
self.finish({'status': 'error',
'error': 'limit_acceded'})
return
# Get receiver's phone number as 'receiver' parameter.
receiver = self.get_argument('receiver', None)
if not receiver:
self.finish({'status': 'error',
'error': 'receiver_missing'})
return
# Parse the given phone number.
receiverobj = self.parse_phonenumber(receiver)
if not receiverobj:
self.finish({'status': 'error',
'error': 'receiver_validation'})
return
# Format numbers for processing and displaying.
receiver_nice = phonenumbers.format_number(receiverobj,
phonenumbers.PhoneNumberFormat.INTERNATIONAL)
receiver = phonenumbers.format_number(receiverobj,
phonenumbers.PhoneNumberFormat.E164)
# Send message to receiver.
result = yield gen.Task(self.application.nexmo_client.send_message,
self.__class__.sender, receiver,
self.__class__.message)
# Process result.
if result: self.finish({'status': 'ok',
'message': 'Message sent',
'number': receiver_nice})
else: self.finish({'status': 'error',
'error': 'nexmo_error',
'message': 'Nexmo Service Error',
'number': receiver_nice})
| mit | -4,039,656,774,473,736,000 | 38.778986 | 121 | 0.566627 | false | 4.603354 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.